1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-18 18:42:46 +02:00

[RISCV] Avoid using x0,x0 vsetvli for vmv.x.s and vfmv.f.s unless we know the sew/lmul ratio is constant.

Since we're changing VTYPE, we may change VLMAX which could
invalidate the previous VL. If we can't tell if it is safe we
should use an AVL of 1 instead of keeping the old VL.

This is a quick fix. We may want to thread VL to the pseudo
instruction instead of making up a value. That will require ISD
opcode changes and changes to the C intrinsic interface.

This fixes the issue raised in D106286.

Reviewed By: frasercrmck

Differential Revision: https://reviews.llvm.org/D106403
This commit is contained in:
Craig Topper 2021-07-23 09:05:23 -07:00
parent da55d61e8f
commit a4ed0c97a6
16 changed files with 192 additions and 181 deletions

View File

@ -431,11 +431,22 @@ void RISCVInsertVSETVLI::insertVSETVLI(MachineBasicBlock &MBB, MachineInstr &MI,
Register AVLReg = Info.getAVLReg();
if (AVLReg == RISCV::NoRegister) {
BuildMI(MBB, MI, DL, TII->get(RISCV::PseudoVSETVLI))
// We can only use x0, x0 if there's no chance of the vtype change causing
// the previous vl to become invalid.
if (PrevInfo.isValid() && !PrevInfo.isUnknown() &&
Info.hasSameVLMAX(PrevInfo)) {
BuildMI(MBB, MI, DL, TII->get(RISCV::PseudoVSETVLI))
.addReg(RISCV::X0, RegState::Define | RegState::Dead)
.addReg(RISCV::X0, RegState::Kill)
.addImm(Info.encodeVTYPE())
.addReg(RISCV::VL, RegState::Implicit);
return;
}
// Otherwise use an AVL of 0 to avoid depending on previous vl.
BuildMI(MBB, MI, DL, TII->get(RISCV::PseudoVSETIVLI))
.addReg(RISCV::X0, RegState::Define | RegState::Dead)
.addReg(RISCV::X0, RegState::Kill)
.addImm(Info.encodeVTYPE())
.addReg(RISCV::VL, RegState::Implicit);
.addImm(0)
.addImm(Info.encodeVTYPE());
return;
}

View File

@ -5,7 +5,7 @@
define half @extractelt_nxv1f16_0(<vscale x 1 x half> %v) {
; CHECK-LABEL: extractelt_nxv1f16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 1 x half> %v, i32 0
@ -37,7 +37,7 @@ define half @extractelt_nxv1f16_idx(<vscale x 1 x half> %v, i32 %idx) {
define half @extractelt_nxv2f16_0(<vscale x 2 x half> %v) {
; CHECK-LABEL: extractelt_nxv2f16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, mf2, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 2 x half> %v, i32 0
@ -69,7 +69,7 @@ define half @extractelt_nxv2f16_idx(<vscale x 2 x half> %v, i32 %idx) {
define half @extractelt_nxv4f16_0(<vscale x 4 x half> %v) {
; CHECK-LABEL: extractelt_nxv4f16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 4 x half> %v, i32 0
@ -101,7 +101,7 @@ define half @extractelt_nxv4f16_idx(<vscale x 4 x half> %v, i32 %idx) {
define half @extractelt_nxv8f16_0(<vscale x 8 x half> %v) {
; CHECK-LABEL: extractelt_nxv8f16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, m2, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 8 x half> %v, i32 0
@ -133,7 +133,7 @@ define half @extractelt_nxv8f16_idx(<vscale x 8 x half> %v, i32 %idx) {
define half @extractelt_nxv16f16_0(<vscale x 16 x half> %v) {
; CHECK-LABEL: extractelt_nxv16f16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 16 x half> %v, i32 0
@ -165,7 +165,7 @@ define half @extractelt_nxv16f16_idx(<vscale x 16 x half> %v, i32 %idx) {
define half @extractelt_nxv32f16_0(<vscale x 32 x half> %v) {
; CHECK-LABEL: extractelt_nxv32f16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, m8, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 32 x half> %v, i32 0
@ -197,7 +197,7 @@ define half @extractelt_nxv32f16_idx(<vscale x 32 x half> %v, i32 %idx) {
define float @extractelt_nxv1f32_0(<vscale x 1 x float> %v) {
; CHECK-LABEL: extractelt_nxv1f32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 1 x float> %v, i32 0
@ -229,7 +229,7 @@ define float @extractelt_nxv1f32_idx(<vscale x 1 x float> %v, i32 %idx) {
define float @extractelt_nxv2f32_0(<vscale x 2 x float> %v) {
; CHECK-LABEL: extractelt_nxv2f32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 2 x float> %v, i32 0
@ -261,7 +261,7 @@ define float @extractelt_nxv2f32_idx(<vscale x 2 x float> %v, i32 %idx) {
define float @extractelt_nxv4f32_0(<vscale x 4 x float> %v) {
; CHECK-LABEL: extractelt_nxv4f32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, m2, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 4 x float> %v, i32 0
@ -293,7 +293,7 @@ define float @extractelt_nxv4f32_idx(<vscale x 4 x float> %v, i32 %idx) {
define float @extractelt_nxv8f32_0(<vscale x 8 x float> %v) {
; CHECK-LABEL: extractelt_nxv8f32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, m4, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 8 x float> %v, i32 0
@ -325,7 +325,7 @@ define float @extractelt_nxv8f32_idx(<vscale x 8 x float> %v, i32 %idx) {
define float @extractelt_nxv16f32_0(<vscale x 16 x float> %v) {
; CHECK-LABEL: extractelt_nxv16f32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 16 x float> %v, i32 0
@ -357,7 +357,7 @@ define float @extractelt_nxv16f32_idx(<vscale x 16 x float> %v, i32 %idx) {
define double @extractelt_nxv1f64_0(<vscale x 1 x double> %v) {
; CHECK-LABEL: extractelt_nxv1f64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 1 x double> %v, i32 0
@ -389,7 +389,7 @@ define double @extractelt_nxv1f64_idx(<vscale x 1 x double> %v, i32 %idx) {
define double @extractelt_nxv2f64_0(<vscale x 2 x double> %v) {
; CHECK-LABEL: extractelt_nxv2f64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e64, m2, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 2 x double> %v, i32 0
@ -421,7 +421,7 @@ define double @extractelt_nxv2f64_idx(<vscale x 2 x double> %v, i32 %idx) {
define double @extractelt_nxv4f64_0(<vscale x 4 x double> %v) {
; CHECK-LABEL: extractelt_nxv4f64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e64, m4, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 4 x double> %v, i32 0
@ -453,7 +453,7 @@ define double @extractelt_nxv4f64_idx(<vscale x 4 x double> %v, i32 %idx) {
define double @extractelt_nxv8f64_0(<vscale x 8 x double> %v) {
; CHECK-LABEL: extractelt_nxv8f64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e64, m8, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 8 x double> %v, i32 0

View File

@ -5,7 +5,7 @@
define half @extractelt_nxv1f16_0(<vscale x 1 x half> %v) {
; CHECK-LABEL: extractelt_nxv1f16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 1 x half> %v, i32 0
@ -37,7 +37,7 @@ define half @extractelt_nxv1f16_idx(<vscale x 1 x half> %v, i32 signext %idx) {
define half @extractelt_nxv2f16_0(<vscale x 2 x half> %v) {
; CHECK-LABEL: extractelt_nxv2f16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, mf2, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 2 x half> %v, i32 0
@ -69,7 +69,7 @@ define half @extractelt_nxv2f16_idx(<vscale x 2 x half> %v, i32 signext %idx) {
define half @extractelt_nxv4f16_0(<vscale x 4 x half> %v) {
; CHECK-LABEL: extractelt_nxv4f16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 4 x half> %v, i32 0
@ -101,7 +101,7 @@ define half @extractelt_nxv4f16_idx(<vscale x 4 x half> %v, i32 signext %idx) {
define half @extractelt_nxv8f16_0(<vscale x 8 x half> %v) {
; CHECK-LABEL: extractelt_nxv8f16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, m2, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 8 x half> %v, i32 0
@ -133,7 +133,7 @@ define half @extractelt_nxv8f16_idx(<vscale x 8 x half> %v, i32 signext %idx) {
define half @extractelt_nxv16f16_0(<vscale x 16 x half> %v) {
; CHECK-LABEL: extractelt_nxv16f16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 16 x half> %v, i32 0
@ -165,7 +165,7 @@ define half @extractelt_nxv16f16_idx(<vscale x 16 x half> %v, i32 signext %idx)
define half @extractelt_nxv32f16_0(<vscale x 32 x half> %v) {
; CHECK-LABEL: extractelt_nxv32f16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, m8, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 32 x half> %v, i32 0
@ -197,7 +197,7 @@ define half @extractelt_nxv32f16_idx(<vscale x 32 x half> %v, i32 signext %idx)
define float @extractelt_nxv1f32_0(<vscale x 1 x float> %v) {
; CHECK-LABEL: extractelt_nxv1f32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 1 x float> %v, i32 0
@ -229,7 +229,7 @@ define float @extractelt_nxv1f32_idx(<vscale x 1 x float> %v, i32 signext %idx)
define float @extractelt_nxv2f32_0(<vscale x 2 x float> %v) {
; CHECK-LABEL: extractelt_nxv2f32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 2 x float> %v, i32 0
@ -261,7 +261,7 @@ define float @extractelt_nxv2f32_idx(<vscale x 2 x float> %v, i32 signext %idx)
define float @extractelt_nxv4f32_0(<vscale x 4 x float> %v) {
; CHECK-LABEL: extractelt_nxv4f32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, m2, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 4 x float> %v, i32 0
@ -293,7 +293,7 @@ define float @extractelt_nxv4f32_idx(<vscale x 4 x float> %v, i32 signext %idx)
define float @extractelt_nxv8f32_0(<vscale x 8 x float> %v) {
; CHECK-LABEL: extractelt_nxv8f32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, m4, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 8 x float> %v, i32 0
@ -325,7 +325,7 @@ define float @extractelt_nxv8f32_idx(<vscale x 8 x float> %v, i32 signext %idx)
define float @extractelt_nxv16f32_0(<vscale x 16 x float> %v) {
; CHECK-LABEL: extractelt_nxv16f32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 16 x float> %v, i32 0
@ -357,7 +357,7 @@ define float @extractelt_nxv16f32_idx(<vscale x 16 x float> %v, i32 signext %idx
define double @extractelt_nxv1f64_0(<vscale x 1 x double> %v) {
; CHECK-LABEL: extractelt_nxv1f64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 1 x double> %v, i32 0
@ -389,7 +389,7 @@ define double @extractelt_nxv1f64_idx(<vscale x 1 x double> %v, i32 signext %idx
define double @extractelt_nxv2f64_0(<vscale x 2 x double> %v) {
; CHECK-LABEL: extractelt_nxv2f64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e64, m2, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 2 x double> %v, i32 0
@ -421,7 +421,7 @@ define double @extractelt_nxv2f64_idx(<vscale x 2 x double> %v, i32 signext %idx
define double @extractelt_nxv4f64_0(<vscale x 4 x double> %v) {
; CHECK-LABEL: extractelt_nxv4f64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e64, m4, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 4 x double> %v, i32 0
@ -453,7 +453,7 @@ define double @extractelt_nxv4f64_idx(<vscale x 4 x double> %v, i32 signext %idx
define double @extractelt_nxv8f64_0(<vscale x 8 x double> %v) {
; CHECK-LABEL: extractelt_nxv8f64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e64, m8, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 8 x double> %v, i32 0

View File

@ -5,7 +5,7 @@
define signext i8 @extractelt_nxv1i8_0(<vscale x 1 x i8> %v) {
; CHECK-LABEL: extractelt_nxv1i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e8, mf8, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 1 x i8> %v, i32 0
@ -37,7 +37,7 @@ define signext i8 @extractelt_nxv1i8_idx(<vscale x 1 x i8> %v, i32 %idx) {
define signext i8 @extractelt_nxv2i8_0(<vscale x 2 x i8> %v) {
; CHECK-LABEL: extractelt_nxv2i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e8, mf4, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 2 x i8> %v, i32 0
@ -69,7 +69,7 @@ define signext i8 @extractelt_nxv2i8_idx(<vscale x 2 x i8> %v, i32 %idx) {
define signext i8 @extractelt_nxv4i8_0(<vscale x 4 x i8> %v) {
; CHECK-LABEL: extractelt_nxv4i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e8, mf2, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 4 x i8> %v, i32 0
@ -101,7 +101,7 @@ define signext i8 @extractelt_nxv4i8_idx(<vscale x 4 x i8> %v, i32 %idx) {
define signext i8 @extractelt_nxv8i8_0(<vscale x 8 x i8> %v) {
; CHECK-LABEL: extractelt_nxv8i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e8, m1, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 8 x i8> %v, i32 0
@ -133,7 +133,7 @@ define signext i8 @extractelt_nxv8i8_idx(<vscale x 8 x i8> %v, i32 %idx) {
define signext i8 @extractelt_nxv16i8_0(<vscale x 16 x i8> %v) {
; CHECK-LABEL: extractelt_nxv16i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e8, m2, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 16 x i8> %v, i32 0
@ -165,7 +165,7 @@ define signext i8 @extractelt_nxv16i8_idx(<vscale x 16 x i8> %v, i32 %idx) {
define signext i8 @extractelt_nxv32i8_0(<vscale x 32 x i8> %v) {
; CHECK-LABEL: extractelt_nxv32i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e8, m4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e8, m4, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 32 x i8> %v, i32 0
@ -197,7 +197,7 @@ define signext i8 @extractelt_nxv32i8_idx(<vscale x 32 x i8> %v, i32 %idx) {
define signext i8 @extractelt_nxv64i8_0(<vscale x 64 x i8> %v) {
; CHECK-LABEL: extractelt_nxv64i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e8, m8, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 64 x i8> %v, i32 0
@ -229,7 +229,7 @@ define signext i8 @extractelt_nxv64i8_idx(<vscale x 64 x i8> %v, i32 %idx) {
define signext i16 @extractelt_nxv1i16_0(<vscale x 1 x i16> %v) {
; CHECK-LABEL: extractelt_nxv1i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 1 x i16> %v, i32 0
@ -261,7 +261,7 @@ define signext i16 @extractelt_nxv1i16_idx(<vscale x 1 x i16> %v, i32 %idx) {
define signext i16 @extractelt_nxv2i16_0(<vscale x 2 x i16> %v) {
; CHECK-LABEL: extractelt_nxv2i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, mf2, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 2 x i16> %v, i32 0
@ -293,7 +293,7 @@ define signext i16 @extractelt_nxv2i16_idx(<vscale x 2 x i16> %v, i32 %idx) {
define signext i16 @extractelt_nxv4i16_0(<vscale x 4 x i16> %v) {
; CHECK-LABEL: extractelt_nxv4i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 4 x i16> %v, i32 0
@ -325,7 +325,7 @@ define signext i16 @extractelt_nxv4i16_idx(<vscale x 4 x i16> %v, i32 %idx) {
define signext i16 @extractelt_nxv8i16_0(<vscale x 8 x i16> %v) {
; CHECK-LABEL: extractelt_nxv8i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, m2, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 8 x i16> %v, i32 0
@ -357,7 +357,7 @@ define signext i16 @extractelt_nxv8i16_idx(<vscale x 8 x i16> %v, i32 %idx) {
define signext i16 @extractelt_nxv16i16_0(<vscale x 16 x i16> %v) {
; CHECK-LABEL: extractelt_nxv16i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 16 x i16> %v, i32 0
@ -389,7 +389,7 @@ define signext i16 @extractelt_nxv16i16_idx(<vscale x 16 x i16> %v, i32 %idx) {
define signext i16 @extractelt_nxv32i16_0(<vscale x 32 x i16> %v) {
; CHECK-LABEL: extractelt_nxv32i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, m8, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 32 x i16> %v, i32 0
@ -421,7 +421,7 @@ define signext i16 @extractelt_nxv32i16_idx(<vscale x 32 x i16> %v, i32 %idx) {
define i32 @extractelt_nxv1i32_0(<vscale x 1 x i32> %v) {
; CHECK-LABEL: extractelt_nxv1i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 1 x i32> %v, i32 0
@ -453,7 +453,7 @@ define i32 @extractelt_nxv1i32_idx(<vscale x 1 x i32> %v, i32 %idx) {
define i32 @extractelt_nxv2i32_0(<vscale x 2 x i32> %v) {
; CHECK-LABEL: extractelt_nxv2i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 2 x i32> %v, i32 0
@ -485,7 +485,7 @@ define i32 @extractelt_nxv2i32_idx(<vscale x 2 x i32> %v, i32 %idx) {
define i32 @extractelt_nxv4i32_0(<vscale x 4 x i32> %v) {
; CHECK-LABEL: extractelt_nxv4i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, m2, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 4 x i32> %v, i32 0
@ -517,7 +517,7 @@ define i32 @extractelt_nxv4i32_idx(<vscale x 4 x i32> %v, i32 %idx) {
define i32 @extractelt_nxv8i32_0(<vscale x 8 x i32> %v) {
; CHECK-LABEL: extractelt_nxv8i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, m4, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 8 x i32> %v, i32 0
@ -549,7 +549,7 @@ define i32 @extractelt_nxv8i32_idx(<vscale x 8 x i32> %v, i32 %idx) {
define i32 @extractelt_nxv16i32_0(<vscale x 16 x i32> %v) {
; CHECK-LABEL: extractelt_nxv16i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 16 x i32> %v, i32 0

View File

@ -5,7 +5,7 @@
define signext i8 @extractelt_nxv1i8_0(<vscale x 1 x i8> %v) {
; CHECK-LABEL: extractelt_nxv1i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e8, mf8, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 1 x i8> %v, i32 0
@ -37,7 +37,7 @@ define signext i8 @extractelt_nxv1i8_idx(<vscale x 1 x i8> %v, i32 signext %idx)
define signext i8 @extractelt_nxv2i8_0(<vscale x 2 x i8> %v) {
; CHECK-LABEL: extractelt_nxv2i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e8, mf4, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 2 x i8> %v, i32 0
@ -69,7 +69,7 @@ define signext i8 @extractelt_nxv2i8_idx(<vscale x 2 x i8> %v, i32 signext %idx)
define signext i8 @extractelt_nxv4i8_0(<vscale x 4 x i8> %v) {
; CHECK-LABEL: extractelt_nxv4i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e8, mf2, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 4 x i8> %v, i32 0
@ -101,7 +101,7 @@ define signext i8 @extractelt_nxv4i8_idx(<vscale x 4 x i8> %v, i32 signext %idx)
define signext i8 @extractelt_nxv8i8_0(<vscale x 8 x i8> %v) {
; CHECK-LABEL: extractelt_nxv8i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e8, m1, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 8 x i8> %v, i32 0
@ -133,7 +133,7 @@ define signext i8 @extractelt_nxv8i8_idx(<vscale x 8 x i8> %v, i32 signext %idx)
define signext i8 @extractelt_nxv16i8_0(<vscale x 16 x i8> %v) {
; CHECK-LABEL: extractelt_nxv16i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e8, m2, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 16 x i8> %v, i32 0
@ -165,7 +165,7 @@ define signext i8 @extractelt_nxv16i8_idx(<vscale x 16 x i8> %v, i32 signext %id
define signext i8 @extractelt_nxv32i8_0(<vscale x 32 x i8> %v) {
; CHECK-LABEL: extractelt_nxv32i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e8, m4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e8, m4, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 32 x i8> %v, i32 0
@ -197,7 +197,7 @@ define signext i8 @extractelt_nxv32i8_idx(<vscale x 32 x i8> %v, i32 signext %id
define signext i8 @extractelt_nxv64i8_0(<vscale x 64 x i8> %v) {
; CHECK-LABEL: extractelt_nxv64i8_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e8, m8, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 64 x i8> %v, i32 0
@ -229,7 +229,7 @@ define signext i8 @extractelt_nxv64i8_idx(<vscale x 64 x i8> %v, i32 signext %id
define signext i16 @extractelt_nxv1i16_0(<vscale x 1 x i16> %v) {
; CHECK-LABEL: extractelt_nxv1i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 1 x i16> %v, i32 0
@ -261,7 +261,7 @@ define signext i16 @extractelt_nxv1i16_idx(<vscale x 1 x i16> %v, i32 signext %i
define signext i16 @extractelt_nxv2i16_0(<vscale x 2 x i16> %v) {
; CHECK-LABEL: extractelt_nxv2i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, mf2, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 2 x i16> %v, i32 0
@ -293,7 +293,7 @@ define signext i16 @extractelt_nxv2i16_idx(<vscale x 2 x i16> %v, i32 signext %i
define signext i16 @extractelt_nxv4i16_0(<vscale x 4 x i16> %v) {
; CHECK-LABEL: extractelt_nxv4i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 4 x i16> %v, i32 0
@ -325,7 +325,7 @@ define signext i16 @extractelt_nxv4i16_idx(<vscale x 4 x i16> %v, i32 signext %i
define signext i16 @extractelt_nxv8i16_0(<vscale x 8 x i16> %v) {
; CHECK-LABEL: extractelt_nxv8i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, m2, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 8 x i16> %v, i32 0
@ -357,7 +357,7 @@ define signext i16 @extractelt_nxv8i16_idx(<vscale x 8 x i16> %v, i32 signext %i
define signext i16 @extractelt_nxv16i16_0(<vscale x 16 x i16> %v) {
; CHECK-LABEL: extractelt_nxv16i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 16 x i16> %v, i32 0
@ -389,7 +389,7 @@ define signext i16 @extractelt_nxv16i16_idx(<vscale x 16 x i16> %v, i32 signext
define signext i16 @extractelt_nxv32i16_0(<vscale x 32 x i16> %v) {
; CHECK-LABEL: extractelt_nxv32i16_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, m8, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 32 x i16> %v, i32 0
@ -421,7 +421,7 @@ define signext i16 @extractelt_nxv32i16_idx(<vscale x 32 x i16> %v, i32 signext
define signext i32 @extractelt_nxv1i32_0(<vscale x 1 x i32> %v) {
; CHECK-LABEL: extractelt_nxv1i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 1 x i32> %v, i32 0
@ -453,7 +453,7 @@ define signext i32 @extractelt_nxv1i32_idx(<vscale x 1 x i32> %v, i32 signext %i
define signext i32 @extractelt_nxv2i32_0(<vscale x 2 x i32> %v) {
; CHECK-LABEL: extractelt_nxv2i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 2 x i32> %v, i32 0
@ -485,7 +485,7 @@ define signext i32 @extractelt_nxv2i32_idx(<vscale x 2 x i32> %v, i32 signext %i
define signext i32 @extractelt_nxv4i32_0(<vscale x 4 x i32> %v) {
; CHECK-LABEL: extractelt_nxv4i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, m2, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 4 x i32> %v, i32 0
@ -517,7 +517,7 @@ define signext i32 @extractelt_nxv4i32_idx(<vscale x 4 x i32> %v, i32 signext %i
define signext i32 @extractelt_nxv8i32_0(<vscale x 8 x i32> %v) {
; CHECK-LABEL: extractelt_nxv8i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, m4, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 8 x i32> %v, i32 0
@ -549,7 +549,7 @@ define signext i32 @extractelt_nxv8i32_idx(<vscale x 8 x i32> %v, i32 signext %i
define signext i32 @extractelt_nxv16i32_0(<vscale x 16 x i32> %v) {
; CHECK-LABEL: extractelt_nxv16i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 16 x i32> %v, i32 0
@ -581,7 +581,7 @@ define signext i32 @extractelt_nxv16i32_idx(<vscale x 16 x i32> %v, i32 signext
define i64 @extractelt_nxv1i64_0(<vscale x 1 x i64> %v) {
; CHECK-LABEL: extractelt_nxv1i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 1 x i64> %v, i32 0
@ -613,7 +613,7 @@ define i64 @extractelt_nxv1i64_idx(<vscale x 1 x i64> %v, i32 signext %idx) {
define i64 @extractelt_nxv2i64_0(<vscale x 2 x i64> %v) {
; CHECK-LABEL: extractelt_nxv2i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e64, m2, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 2 x i64> %v, i32 0
@ -645,7 +645,7 @@ define i64 @extractelt_nxv2i64_idx(<vscale x 2 x i64> %v, i32 signext %idx) {
define i64 @extractelt_nxv4i64_0(<vscale x 4 x i64> %v) {
; CHECK-LABEL: extractelt_nxv4i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e64, m4, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 4 x i64> %v, i32 0
@ -677,7 +677,7 @@ define i64 @extractelt_nxv4i64_idx(<vscale x 4 x i64> %v, i32 signext %idx) {
define i64 @extractelt_nxv8i64_0(<vscale x 8 x i64> %v) {
; CHECK-LABEL: extractelt_nxv8i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e64, m8, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 8 x i64> %v, i32 0

View File

@ -17,7 +17,7 @@ define <32 x i1> @bitcast_v4i8_v32i1(<4 x i8> %a, <32 x i1> %b) {
define i8 @bitcast_v1i8_i8(<1 x i8> %a) {
; CHECK-LABEL: bitcast_v1i8_i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e8, mf8, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%b = bitcast <1 x i8> %a to i8
@ -27,7 +27,7 @@ define i8 @bitcast_v1i8_i8(<1 x i8> %a) {
define i16 @bitcast_v2i8_i16(<2 x i8> %a) {
; CHECK-LABEL: bitcast_v2i8_i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%b = bitcast <2 x i8> %a to i16
@ -37,7 +37,7 @@ define i16 @bitcast_v2i8_i16(<2 x i8> %a) {
define i16 @bitcast_v1i16_i16(<1 x i16> %a) {
; CHECK-LABEL: bitcast_v1i16_i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%b = bitcast <1 x i16> %a to i16
@ -47,7 +47,7 @@ define i16 @bitcast_v1i16_i16(<1 x i16> %a) {
define i32 @bitcast_v4i8_i32(<4 x i8> %a) {
; CHECK-LABEL: bitcast_v4i8_i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%b = bitcast <4 x i8> %a to i32
@ -57,7 +57,7 @@ define i32 @bitcast_v4i8_i32(<4 x i8> %a) {
define i32 @bitcast_v2i16_i32(<2 x i16> %a) {
; CHECK-LABEL: bitcast_v2i16_i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%b = bitcast <2 x i16> %a to i32
@ -67,7 +67,7 @@ define i32 @bitcast_v2i16_i32(<2 x i16> %a) {
define i32 @bitcast_v1i32_i32(<1 x i32> %a) {
; CHECK-LABEL: bitcast_v1i32_i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%b = bitcast <1 x i32> %a to i32
@ -86,7 +86,7 @@ define i64 @bitcast_v8i8_i64(<8 x i8> %a) {
;
; RV64-LABEL: bitcast_v8i8_i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: ret
%b = bitcast <8 x i8> %a to i64
@ -105,7 +105,7 @@ define i64 @bitcast_v4i16_i64(<4 x i16> %a) {
;
; RV64-LABEL: bitcast_v4i16_i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: ret
%b = bitcast <4 x i16> %a to i64
@ -124,7 +124,7 @@ define i64 @bitcast_v2i32_i64(<2 x i32> %a) {
;
; RV64-LABEL: bitcast_v2i32_i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: ret
%b = bitcast <2 x i32> %a to i64
@ -143,7 +143,7 @@ define i64 @bitcast_v1i64_i64(<1 x i64> %a) {
;
; RV64-LABEL: bitcast_v1i64_i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: ret
%b = bitcast <1 x i64> %a to i64
@ -153,7 +153,7 @@ define i64 @bitcast_v1i64_i64(<1 x i64> %a) {
define half @bitcast_v2i8_f16(<2 x i8> %a) {
; CHECK-LABEL: bitcast_v2i8_f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%b = bitcast <2 x i8> %a to half
@ -163,7 +163,7 @@ define half @bitcast_v2i8_f16(<2 x i8> %a) {
define half @bitcast_v1i16_f16(<1 x i16> %a) {
; CHECK-LABEL: bitcast_v1i16_f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%b = bitcast <1 x i16> %a to half
@ -173,7 +173,7 @@ define half @bitcast_v1i16_f16(<1 x i16> %a) {
define float @bitcast_v4i8_f32(<4 x i8> %a) {
; CHECK-LABEL: bitcast_v4i8_f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%b = bitcast <4 x i8> %a to float
@ -183,7 +183,7 @@ define float @bitcast_v4i8_f32(<4 x i8> %a) {
define float @bitcast_v2i16_f32(<2 x i16> %a) {
; CHECK-LABEL: bitcast_v2i16_f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%b = bitcast <2 x i16> %a to float
@ -193,7 +193,7 @@ define float @bitcast_v2i16_f32(<2 x i16> %a) {
define float @bitcast_v1i32_f32(<1 x i32> %a) {
; CHECK-LABEL: bitcast_v1i32_f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%b = bitcast <1 x i32> %a to float
@ -212,7 +212,7 @@ define double @bitcast_v8i8_f64(<8 x i8> %a) {
;
; RV64-LABEL: bitcast_v8i8_f64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: ret
%b = bitcast <8 x i8> %a to double
@ -231,7 +231,7 @@ define double @bitcast_v4i16_f64(<4 x i16> %a) {
;
; RV64-LABEL: bitcast_v4i16_f64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: ret
%b = bitcast <4 x i16> %a to double
@ -250,7 +250,7 @@ define double @bitcast_v2i32_f64(<2 x i32> %a) {
;
; RV64-LABEL: bitcast_v2i32_f64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: ret
%b = bitcast <2 x i32> %a to double
@ -269,7 +269,7 @@ define double @bitcast_v1i64_f64(<1 x i64> %a) {
;
; RV64-LABEL: bitcast_v1i64_f64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: ret
%b = bitcast <1 x i64> %a to double

View File

@ -5,7 +5,7 @@
define i16 @bitcast_v1f16_i16(<1 x half> %a) {
; CHECK-LABEL: bitcast_v1f16_i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%b = bitcast <1 x half> %a to i16
@ -15,7 +15,7 @@ define i16 @bitcast_v1f16_i16(<1 x half> %a) {
define half @bitcast_v1f16_f16(<1 x half> %a) {
; CHECK-LABEL: bitcast_v1f16_f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu
; CHECK-NEXT: vfmv.f.s ft0, v8
; CHECK-NEXT: fmv.x.h a0, ft0
; CHECK-NEXT: ret
@ -26,7 +26,7 @@ define half @bitcast_v1f16_f16(<1 x half> %a) {
define i32 @bitcast_v2f16_i32(<2 x half> %a) {
; CHECK-LABEL: bitcast_v2f16_i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%b = bitcast <2 x half> %a to i32
@ -36,7 +36,7 @@ define i32 @bitcast_v2f16_i32(<2 x half> %a) {
define i32 @bitcast_v1f32_i32(<1 x float> %a) {
; CHECK-LABEL: bitcast_v1f32_i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%b = bitcast <1 x float> %a to i32
@ -46,13 +46,13 @@ define i32 @bitcast_v1f32_i32(<1 x float> %a) {
define float @bitcast_v2f16_f32(<2 x half> %a) {
; RV32-FP-LABEL: bitcast_v2f16_f32:
; RV32-FP: # %bb.0:
; RV32-FP-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; RV32-FP-NEXT: vsetivli zero, 0, e32, mf2, ta, mu
; RV32-FP-NEXT: vmv.x.s a0, v8
; RV32-FP-NEXT: ret
;
; RV64-FP-LABEL: bitcast_v2f16_f32:
; RV64-FP: # %bb.0:
; RV64-FP-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; RV64-FP-NEXT: vsetivli zero, 0, e32, mf2, ta, mu
; RV64-FP-NEXT: vfmv.f.s ft0, v8
; RV64-FP-NEXT: fmv.x.w a0, ft0
; RV64-FP-NEXT: ret
@ -63,13 +63,13 @@ define float @bitcast_v2f16_f32(<2 x half> %a) {
define float @bitcast_v1f32_f32(<1 x float> %a) {
; RV32-FP-LABEL: bitcast_v1f32_f32:
; RV32-FP: # %bb.0:
; RV32-FP-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; RV32-FP-NEXT: vsetivli zero, 0, e32, mf2, ta, mu
; RV32-FP-NEXT: vmv.x.s a0, v8
; RV32-FP-NEXT: ret
;
; RV64-FP-LABEL: bitcast_v1f32_f32:
; RV64-FP: # %bb.0:
; RV64-FP-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; RV64-FP-NEXT: vsetivli zero, 0, e32, mf2, ta, mu
; RV64-FP-NEXT: vfmv.f.s ft0, v8
; RV64-FP-NEXT: fmv.x.w a0, ft0
; RV64-FP-NEXT: ret
@ -89,7 +89,7 @@ define i64 @bitcast_v4f16_i64(<4 x half> %a) {
;
; RV64-FP-LABEL: bitcast_v4f16_i64:
; RV64-FP: # %bb.0:
; RV64-FP-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; RV64-FP-NEXT: vsetivli zero, 0, e64, m1, ta, mu
; RV64-FP-NEXT: vmv.x.s a0, v8
; RV64-FP-NEXT: ret
%b = bitcast <4 x half> %a to i64
@ -108,7 +108,7 @@ define i64 @bitcast_v2f32_i64(<2 x float> %a) {
;
; RV64-FP-LABEL: bitcast_v2f32_i64:
; RV64-FP: # %bb.0:
; RV64-FP-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; RV64-FP-NEXT: vsetivli zero, 0, e64, m1, ta, mu
; RV64-FP-NEXT: vmv.x.s a0, v8
; RV64-FP-NEXT: ret
%b = bitcast <2 x float> %a to i64
@ -127,7 +127,7 @@ define i64 @bitcast_v1f64_i64(<1 x double> %a) {
;
; RV64-FP-LABEL: bitcast_v1f64_i64:
; RV64-FP: # %bb.0:
; RV64-FP-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; RV64-FP-NEXT: vsetivli zero, 0, e64, m1, ta, mu
; RV64-FP-NEXT: vmv.x.s a0, v8
; RV64-FP-NEXT: ret
%b = bitcast <1 x double> %a to i64
@ -139,7 +139,7 @@ define double @bitcast_v4f16_f64(<4 x half> %a) {
; RV32-FP: # %bb.0:
; RV32-FP-NEXT: addi sp, sp, -16
; RV32-FP-NEXT: .cfi_def_cfa_offset 16
; RV32-FP-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; RV32-FP-NEXT: vsetivli zero, 0, e64, m1, ta, mu
; RV32-FP-NEXT: vfmv.f.s ft0, v8
; RV32-FP-NEXT: fsd ft0, 8(sp)
; RV32-FP-NEXT: lw a0, 8(sp)
@ -149,7 +149,7 @@ define double @bitcast_v4f16_f64(<4 x half> %a) {
;
; RV64-FP-LABEL: bitcast_v4f16_f64:
; RV64-FP: # %bb.0:
; RV64-FP-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; RV64-FP-NEXT: vsetivli zero, 0, e64, m1, ta, mu
; RV64-FP-NEXT: vmv.x.s a0, v8
; RV64-FP-NEXT: ret
%b = bitcast <4 x half> %a to double
@ -161,7 +161,7 @@ define double @bitcast_v2f32_f64(<2 x float> %a) {
; RV32-FP: # %bb.0:
; RV32-FP-NEXT: addi sp, sp, -16
; RV32-FP-NEXT: .cfi_def_cfa_offset 16
; RV32-FP-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; RV32-FP-NEXT: vsetivli zero, 0, e64, m1, ta, mu
; RV32-FP-NEXT: vfmv.f.s ft0, v8
; RV32-FP-NEXT: fsd ft0, 8(sp)
; RV32-FP-NEXT: lw a0, 8(sp)
@ -171,7 +171,7 @@ define double @bitcast_v2f32_f64(<2 x float> %a) {
;
; RV64-FP-LABEL: bitcast_v2f32_f64:
; RV64-FP: # %bb.0:
; RV64-FP-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; RV64-FP-NEXT: vsetivli zero, 0, e64, m1, ta, mu
; RV64-FP-NEXT: vmv.x.s a0, v8
; RV64-FP-NEXT: ret
%b = bitcast <2 x float> %a to double
@ -183,7 +183,7 @@ define double @bitcast_v1f64_f64(<1 x double> %a) {
; RV32-FP: # %bb.0:
; RV32-FP-NEXT: addi sp, sp, -16
; RV32-FP-NEXT: .cfi_def_cfa_offset 16
; RV32-FP-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; RV32-FP-NEXT: vsetivli zero, 0, e64, m1, ta, mu
; RV32-FP-NEXT: vfmv.f.s ft0, v8
; RV32-FP-NEXT: fsd ft0, 8(sp)
; RV32-FP-NEXT: lw a0, 8(sp)
@ -193,7 +193,7 @@ define double @bitcast_v1f64_f64(<1 x double> %a) {
;
; RV64-FP-LABEL: bitcast_v1f64_f64:
; RV64-FP: # %bb.0:
; RV64-FP-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; RV64-FP-NEXT: vsetivli zero, 0, e64, m1, ta, mu
; RV64-FP-NEXT: vmv.x.s a0, v8
; RV64-FP-NEXT: ret
%b = bitcast <1 x double> %a to double

View File

@ -37,7 +37,7 @@ define <4 x float> @hang_when_merging_stores_after_legalization(<8 x float> %x,
; LMULMAX1: # %bb.0:
; LMULMAX1-NEXT: addi sp, sp, -32
; LMULMAX1-NEXT: .cfi_def_cfa_offset 32
; LMULMAX1-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; LMULMAX1-NEXT: vsetivli zero, 0, e32, m2, ta, mu
; LMULMAX1-NEXT: vfmv.f.s ft0, v10
; LMULMAX1-NEXT: fsw ft0, 24(sp)
; LMULMAX1-NEXT: vfmv.f.s ft0, v8

View File

@ -321,7 +321,7 @@ define <8 x i64> @vrgather_shuffle_vx_v8i64(<8 x i64> %x) {
define <4 x i8> @interleave_shuffles(<4 x i8> %x) {
; CHECK-LABEL: interleave_shuffles:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e8, mf4, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vrgather.vi v25, v8, 1

View File

@ -90,7 +90,7 @@ define <2 x i16> @mgather_v2i16_align1(<2 x i16*> %ptrs, <2 x i1> %m, <2 x i16>
; RV32-NEXT: andi a1, a0, 1
; RV32-NEXT: beqz a1, .LBB4_2
; RV32-NEXT: # %bb.1: # %cond.load
; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; RV32-NEXT: vsetivli zero, 0, e32, mf2, ta, mu
; RV32-NEXT: vmv.x.s a1, v8
; RV32-NEXT: lb a2, 1(a1)
; RV32-NEXT: lbu a1, 0(a1)
@ -137,7 +137,7 @@ define <2 x i16> @mgather_v2i16_align1(<2 x i16*> %ptrs, <2 x i1> %m, <2 x i16>
; RV64-NEXT: andi a1, a0, 1
; RV64-NEXT: beqz a1, .LBB4_2
; RV64-NEXT: # %bb.1: # %cond.load
; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu
; RV64-NEXT: vmv.x.s a1, v8
; RV64-NEXT: lb a2, 1(a1)
; RV64-NEXT: lbu a1, 0(a1)
@ -237,7 +237,7 @@ define <2 x i64> @mgather_v2i64_align4(<2 x i64*> %ptrs, <2 x i1> %m, <2 x i64>
; RV64-NEXT: andi a1, a0, 1
; RV64-NEXT: beqz a1, .LBB5_2
; RV64-NEXT: # %bb.1: # %cond.load
; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu
; RV64-NEXT: vmv.x.s a1, v8
; RV64-NEXT: lwu a2, 4(a1)
; RV64-NEXT: lwu a1, 0(a1)
@ -302,7 +302,7 @@ define void @mscatter_v4i16_align1(<4 x i16> %val, <4 x i16*> %ptrs, <4 x i1> %m
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
; RV32-NEXT: .LBB6_5: # %cond.store
; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; RV32-NEXT: vsetivli zero, 0, e16, mf2, ta, mu
; RV32-NEXT: vmv.x.s a1, v8
; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, mu
; RV32-NEXT: vmv.x.s a2, v9
@ -379,7 +379,7 @@ define void @mscatter_v4i16_align1(<4 x i16> %val, <4 x i16*> %ptrs, <4 x i1> %m
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
; RV64-NEXT: .LBB6_5: # %cond.store
; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; RV64-NEXT: vsetivli zero, 0, e16, mf2, ta, mu
; RV64-NEXT: vmv.x.s a1, v8
; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; RV64-NEXT: vmv.x.s a2, v10
@ -456,7 +456,7 @@ define void @mscatter_v2i32_align2(<2 x i32> %val, <2 x i32*> %ptrs, <2 x i1> %m
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
; RV32-NEXT: .LBB7_3: # %cond.store
; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; RV32-NEXT: vsetivli zero, 0, e32, mf2, ta, mu
; RV32-NEXT: vmv.x.s a1, v8
; RV32-NEXT: vmv.x.s a2, v9
; RV32-NEXT: sh a1, 0(a2)
@ -501,7 +501,7 @@ define void @mscatter_v2i32_align2(<2 x i32> %val, <2 x i32*> %ptrs, <2 x i1> %m
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
; RV64-NEXT: .LBB7_3: # %cond.store
; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; RV64-NEXT: vsetivli zero, 0, e32, mf2, ta, mu
; RV64-NEXT: vmv.x.s a1, v8
; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; RV64-NEXT: vmv.x.s a2, v9
@ -683,7 +683,7 @@ define void @masked_store_v2i32_align2(<2 x i32> %val, <2 x i32>* %a, <2 x i32>
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
; RV32-NEXT: .LBB9_3: # %cond.store
; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; RV32-NEXT: vsetivli zero, 0, e32, mf2, ta, mu
; RV32-NEXT: vmv.x.s a2, v8
; RV32-NEXT: sh a2, 0(a0)
; RV32-NEXT: srli a2, a2, 16
@ -726,7 +726,7 @@ define void @masked_store_v2i32_align2(<2 x i32> %val, <2 x i32>* %a, <2 x i32>
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
; RV64-NEXT: .LBB9_3: # %cond.store
; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; RV64-NEXT: vsetivli zero, 0, e32, mf2, ta, mu
; RV64-NEXT: vmv.x.s a2, v8
; RV64-NEXT: sh a2, 0(a0)
; RV64-NEXT: srli a2, a2, 16

View File

@ -7,7 +7,7 @@ declare half @llvm.riscv.vfmv.f.s.nxv1f16(<vscale x 1 x half>)
define half @intrinsic_vfmv.f.s_s_nxv1f16(<vscale x 1 x half> %0) nounwind {
; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
entry:
@ -20,7 +20,7 @@ declare half @llvm.riscv.vfmv.f.s.nxv2f16(<vscale x 2 x half>)
define half @intrinsic_vfmv.f.s_s_nxv2f16(<vscale x 2 x half> %0) nounwind {
; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv2f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, mf2, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
entry:
@ -33,7 +33,7 @@ declare half @llvm.riscv.vfmv.f.s.nxv4f16(<vscale x 4 x half>)
define half @intrinsic_vfmv.f.s_s_nxv4f16(<vscale x 4 x half> %0) nounwind {
; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv4f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
entry:
@ -46,7 +46,7 @@ declare half @llvm.riscv.vfmv.f.s.nxv8f16(<vscale x 8 x half>)
define half @intrinsic_vfmv.f.s_s_nxv8f16(<vscale x 8 x half> %0) nounwind {
; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv8f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, m2, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
entry:
@ -59,7 +59,7 @@ declare half @llvm.riscv.vfmv.f.s.nxv16f16(<vscale x 16 x half>)
define half @intrinsic_vfmv.f.s_s_nxv16f16(<vscale x 16 x half> %0) nounwind {
; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv16f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
entry:
@ -72,7 +72,7 @@ declare half @llvm.riscv.vfmv.f.s.nxv32f16(<vscale x 32 x half>)
define half @intrinsic_vfmv.f.s_s_nxv32f16(<vscale x 32 x half> %0) nounwind {
; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv32f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, m8, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
entry:
@ -85,7 +85,7 @@ declare float @llvm.riscv.vfmv.f.s.nxv1f32(<vscale x 1 x float>)
define float @intrinsic_vfmv.f.s_s_nxv1f32(<vscale x 1 x float> %0) nounwind {
; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
entry:
@ -98,7 +98,7 @@ declare float @llvm.riscv.vfmv.f.s.nxv2f32(<vscale x 2 x float>)
define float @intrinsic_vfmv.f.s_s_nxv2f32(<vscale x 2 x float> %0) nounwind {
; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
entry:
@ -111,7 +111,7 @@ declare float @llvm.riscv.vfmv.f.s.nxv4f32(<vscale x 4 x float>)
define float @intrinsic_vfmv.f.s_s_nxv4f32(<vscale x 4 x float> %0) nounwind {
; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, m2, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
entry:
@ -124,7 +124,7 @@ declare float @llvm.riscv.vfmv.f.s.nxv8f32(<vscale x 8 x float>)
define float @intrinsic_vfmv.f.s_s_nxv8f32(<vscale x 8 x float> %0) nounwind {
; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv8f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, m4, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
entry:
@ -137,7 +137,7 @@ declare float @llvm.riscv.vfmv.f.s.nxv16f32(<vscale x 16 x float>)
define float @intrinsic_vfmv.f.s_s_nxv16f32(<vscale x 16 x float> %0) nounwind {
; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv16f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
entry:
@ -150,7 +150,7 @@ declare double @llvm.riscv.vfmv.f.s.nxv1f64(<vscale x 1 x double>)
define double @intrinsic_vfmv.f.s_s_nxv1f64(<vscale x 1 x double> %0) nounwind {
; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
entry:
@ -163,7 +163,7 @@ declare double @llvm.riscv.vfmv.f.s.nxv2f64(<vscale x 2 x double>)
define double @intrinsic_vfmv.f.s_s_nxv2f64(<vscale x 2 x double> %0) nounwind {
; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e64, m2, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
entry:
@ -176,7 +176,7 @@ declare double @llvm.riscv.vfmv.f.s.nxv4f64(<vscale x 4 x double>)
define double @intrinsic_vfmv.f.s_s_nxv4f64(<vscale x 4 x double> %0) nounwind {
; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e64, m4, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
entry:
@ -189,7 +189,7 @@ declare double @llvm.riscv.vfmv.f.s.nxv8f64(<vscale x 8 x double>)
define double @intrinsic_vfmv.f.s_s_nxv8f64(<vscale x 8 x double> %0) nounwind {
; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv8f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e64, m8, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
entry:

View File

@ -6,7 +6,7 @@ declare i8 @llvm.riscv.vmv.x.s.nxv1i8(<vscale x 1 x i8>)
define signext i8 @intrinsic_vmv.x.s_s_nxv1i8(<vscale x 1 x i8> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e8, mf8, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
@ -19,7 +19,7 @@ declare i8 @llvm.riscv.vmv.x.s.nxv2i8(<vscale x 2 x i8>)
define signext i8 @intrinsic_vmv.x.s_s_nxv2i8(<vscale x 2 x i8> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e8, mf4, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
@ -32,7 +32,7 @@ declare i8 @llvm.riscv.vmv.x.s.nxv4i8(<vscale x 4 x i8>)
define signext i8 @intrinsic_vmv.x.s_s_nxv4i8(<vscale x 4 x i8> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e8, mf2, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
@ -45,7 +45,7 @@ declare i8 @llvm.riscv.vmv.x.s.nxv8i8(<vscale x 8 x i8>)
define signext i8 @intrinsic_vmv.x.s_s_nxv8i8(<vscale x 8 x i8> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e8, m1, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
@ -58,7 +58,7 @@ declare i8 @llvm.riscv.vmv.x.s.nxv16i8(<vscale x 16 x i8>)
define signext i8 @intrinsic_vmv.x.s_s_nxv16i8(<vscale x 16 x i8> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e8, m2, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
@ -71,7 +71,7 @@ declare i8 @llvm.riscv.vmv.x.s.nxv32i8(<vscale x 32 x i8>)
define signext i8 @intrinsic_vmv.x.s_s_nxv32i8(<vscale x 32 x i8> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e8, m4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e8, m4, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
@ -84,7 +84,7 @@ declare i8 @llvm.riscv.vmv.x.s.nxv64i8(<vscale x 64 x i8>)
define signext i8 @intrinsic_vmv.x.s_s_nxv64i8(<vscale x 64 x i8> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e8, m8, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
@ -97,7 +97,7 @@ declare i16 @llvm.riscv.vmv.x.s.nxv1i16(<vscale x 1 x i16>)
define signext i16 @intrinsic_vmv.x.s_s_nxv1i16(<vscale x 1 x i16> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
@ -110,7 +110,7 @@ declare i16 @llvm.riscv.vmv.x.s.nxv2i16(<vscale x 2 x i16>)
define signext i16 @intrinsic_vmv.x.s_s_nxv2i16(<vscale x 2 x i16> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, mf2, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
@ -123,7 +123,7 @@ declare i16 @llvm.riscv.vmv.x.s.nxv4i16(<vscale x 4 x i16>)
define signext i16 @intrinsic_vmv.x.s_s_nxv4i16(<vscale x 4 x i16> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
@ -136,7 +136,7 @@ declare i16 @llvm.riscv.vmv.x.s.nxv8i16(<vscale x 8 x i16>)
define signext i16 @intrinsic_vmv.x.s_s_nxv8i16(<vscale x 8 x i16> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, m2, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
@ -149,7 +149,7 @@ declare i16 @llvm.riscv.vmv.x.s.nxv16i16(<vscale x 16 x i16>)
define signext i16 @intrinsic_vmv.x.s_s_nxv16i16(<vscale x 16 x i16> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
@ -162,7 +162,7 @@ declare i16 @llvm.riscv.vmv.x.s.nxv32i16( <vscale x 32 x i16>)
define signext i16 @intrinsic_vmv.x.s_s_nxv32i16(<vscale x 32 x i16> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, m8, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
@ -175,7 +175,7 @@ declare i32 @llvm.riscv.vmv.x.s.nxv1i32( <vscale x 1 x i32>)
define i32 @intrinsic_vmv.x.s_s_nxv1i32(<vscale x 1 x i32> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
@ -188,7 +188,7 @@ declare i32 @llvm.riscv.vmv.x.s.nxv2i32( <vscale x 2 x i32>)
define i32 @intrinsic_vmv.x.s_s_nxv2i32(<vscale x 2 x i32> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
@ -201,7 +201,7 @@ declare i32 @llvm.riscv.vmv.x.s.nxv4i32( <vscale x 4 x i32>)
define i32 @intrinsic_vmv.x.s_s_nxv4i32(<vscale x 4 x i32> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, m2, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
@ -214,7 +214,7 @@ declare i32 @llvm.riscv.vmv.x.s.nxv8i32( <vscale x 8 x i32>)
define i32 @intrinsic_vmv.x.s_s_nxv8i32(<vscale x 8 x i32> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, m4, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
@ -227,7 +227,7 @@ declare i32 @llvm.riscv.vmv.x.s.nxv16i32( <vscale x 16 x i32>)
define i32 @intrinsic_vmv.x.s_s_nxv16i32(<vscale x 16 x i32> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:

View File

@ -6,7 +6,7 @@ declare i8 @llvm.riscv.vmv.x.s.nxv1i8(<vscale x 1 x i8>)
define signext i8 @intrinsic_vmv.x.s_s_nxv1i8(<vscale x 1 x i8> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e8, mf8, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
@ -19,7 +19,7 @@ declare i8 @llvm.riscv.vmv.x.s.nxv2i8(<vscale x 2 x i8>)
define signext i8 @intrinsic_vmv.x.s_s_nxv2i8(<vscale x 2 x i8> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e8, mf4, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
@ -32,7 +32,7 @@ declare i8 @llvm.riscv.vmv.x.s.nxv4i8(<vscale x 4 x i8>)
define signext i8 @intrinsic_vmv.x.s_s_nxv4i8(<vscale x 4 x i8> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e8, mf2, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
@ -45,7 +45,7 @@ declare i8 @llvm.riscv.vmv.x.s.nxv8i8(<vscale x 8 x i8>)
define signext i8 @intrinsic_vmv.x.s_s_nxv8i8(<vscale x 8 x i8> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e8, m1, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
@ -58,7 +58,7 @@ declare i8 @llvm.riscv.vmv.x.s.nxv16i8(<vscale x 16 x i8>)
define signext i8 @intrinsic_vmv.x.s_s_nxv16i8(<vscale x 16 x i8> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e8, m2, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
@ -71,7 +71,7 @@ declare i8 @llvm.riscv.vmv.x.s.nxv32i8(<vscale x 32 x i8>)
define signext i8 @intrinsic_vmv.x.s_s_nxv32i8(<vscale x 32 x i8> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e8, m4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e8, m4, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
@ -84,7 +84,7 @@ declare i8 @llvm.riscv.vmv.x.s.nxv64i8(<vscale x 64 x i8>)
define signext i8 @intrinsic_vmv.x.s_s_nxv64i8(<vscale x 64 x i8> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e8, m8, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
@ -97,7 +97,7 @@ declare i16 @llvm.riscv.vmv.x.s.nxv1i16(<vscale x 1 x i16>)
define signext i16 @intrinsic_vmv.x.s_s_nxv1i16(<vscale x 1 x i16> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
@ -110,7 +110,7 @@ declare i16 @llvm.riscv.vmv.x.s.nxv2i16(<vscale x 2 x i16>)
define signext i16 @intrinsic_vmv.x.s_s_nxv2i16(<vscale x 2 x i16> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, mf2, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
@ -123,7 +123,7 @@ declare i16 @llvm.riscv.vmv.x.s.nxv4i16(<vscale x 4 x i16>)
define signext i16 @intrinsic_vmv.x.s_s_nxv4i16(<vscale x 4 x i16> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
@ -136,7 +136,7 @@ declare i16 @llvm.riscv.vmv.x.s.nxv8i16(<vscale x 8 x i16>)
define signext i16 @intrinsic_vmv.x.s_s_nxv8i16(<vscale x 8 x i16> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, m2, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
@ -149,7 +149,7 @@ declare i16 @llvm.riscv.vmv.x.s.nxv16i16(<vscale x 16 x i16>)
define signext i16 @intrinsic_vmv.x.s_s_nxv16i16(<vscale x 16 x i16> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
@ -162,7 +162,7 @@ declare i16 @llvm.riscv.vmv.x.s.nxv32i16( <vscale x 32 x i16>)
define signext i16 @intrinsic_vmv.x.s_s_nxv32i16(<vscale x 32 x i16> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, m8, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
@ -175,7 +175,7 @@ declare i32 @llvm.riscv.vmv.x.s.nxv1i32( <vscale x 1 x i32>)
define signext i32 @intrinsic_vmv.x.s_s_nxv1i32(<vscale x 1 x i32> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
@ -188,7 +188,7 @@ declare i32 @llvm.riscv.vmv.x.s.nxv2i32( <vscale x 2 x i32>)
define signext i32 @intrinsic_vmv.x.s_s_nxv2i32(<vscale x 2 x i32> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
@ -201,7 +201,7 @@ declare i32 @llvm.riscv.vmv.x.s.nxv4i32( <vscale x 4 x i32>)
define signext i32 @intrinsic_vmv.x.s_s_nxv4i32(<vscale x 4 x i32> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, m2, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
@ -214,7 +214,7 @@ declare i32 @llvm.riscv.vmv.x.s.nxv8i32( <vscale x 8 x i32>)
define signext i32 @intrinsic_vmv.x.s_s_nxv8i32(<vscale x 8 x i32> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, m4, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
@ -227,7 +227,7 @@ declare i32 @llvm.riscv.vmv.x.s.nxv16i32( <vscale x 16 x i32>)
define signext i32 @intrinsic_vmv.x.s_s_nxv16i32(<vscale x 16 x i32> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
@ -240,7 +240,7 @@ declare i64 @llvm.riscv.vmv.x.s.nxv1i64( <vscale x 1 x i64>)
define i64 @intrinsic_vmv.x.s_s_nxv1i64(<vscale x 1 x i64> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
@ -253,7 +253,7 @@ declare i64 @llvm.riscv.vmv.x.s.nxv2i64( <vscale x 2 x i64>)
define i64 @intrinsic_vmv.x.s_s_nxv2i64(<vscale x 2 x i64> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e64, m2, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
@ -266,7 +266,7 @@ declare i64 @llvm.riscv.vmv.x.s.nxv4i64( <vscale x 4 x i64>)
define i64 @intrinsic_vmv.x.s_s_nxv4i64(<vscale x 4 x i64> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e64, m4, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:
@ -279,7 +279,7 @@ declare i64 @llvm.riscv.vmv.x.s.nxv8i64(<vscale x 8 x i64>)
define i64 @intrinsic_vmv.x.s_s_nxv8i64(<vscale x 8 x i64> %0) nounwind {
; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e64, m8, ta, mu
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
entry:

View File

@ -224,7 +224,7 @@ body: |
; CHECK-LABEL: name: vmv_x_s
; CHECK: liveins: $v8
; CHECK: [[COPY:%[0-9]+]]:vr = COPY $v8
; CHECK: dead $x0 = PseudoVSETVLI killed $x0, 88, implicit-def $vl, implicit-def $vtype, implicit $vl
; CHECK: dead $x0 = PseudoVSETIVLI 0, 88, implicit-def $vl, implicit-def $vtype
; CHECK: [[PseudoVMV_X_S_M1_:%[0-9]+]]:gpr = PseudoVMV_X_S_M1 [[COPY]], 6, implicit $vtype
; CHECK: $x10 = COPY [[PseudoVMV_X_S_M1_]]
; CHECK: PseudoRET implicit $x10

View File

@ -15,7 +15,7 @@ define <vscale x 1 x half> @intrinsic_vfmv.f.s_s_nxv1f16(<vscale x 1 x half> %0,
; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu
; CHECK-NEXT: vfmv.f.s ft0, v8
; CHECK-NEXT: fsh ft0, 14(sp) # 2-byte Folded Spill
; CHECK-NEXT: #APP
@ -36,7 +36,7 @@ define <vscale x 1 x float> @intrinsic_vfmv.f.s_s_nxv1f32(<vscale x 1 x float> %
; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu
; CHECK-NEXT: vfmv.f.s ft0, v8
; CHECK-NEXT: fsw ft0, 12(sp) # 4-byte Folded Spill
; CHECK-NEXT: #APP
@ -57,7 +57,7 @@ define <vscale x 1 x double> @intrinsic_vfmv.f.s_s_nxv1f64(<vscale x 1 x double>
; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu
; CHECK-NEXT: vfmv.f.s ft0, v8
; CHECK-NEXT: fsd ft0, 8(sp) # 8-byte Folded Spill
; CHECK-NEXT: #APP

View File

@ -727,7 +727,7 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
; RV32MV-NEXT: vmsne.vv v0, v26, v30
; RV32MV-NEXT: vmv.v.i v26, 0
; RV32MV-NEXT: vmerge.vim v26, v26, -1, v0
; RV32MV-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; RV32MV-NEXT: vsetivli zero, 0, e32, m2, ta, mu
; RV32MV-NEXT: vmv.x.s a0, v26
; RV32MV-NEXT: sw a0, 0(s1)
; RV32MV-NEXT: vsetivli zero, 1, e32, m2, ta, mu