; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=4 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX4 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=4 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX4 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1 define void @gather_const_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: gather_const_v16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 16, e8,m1,ta,mu ; CHECK-NEXT: vle8.v v25, (a0) ; CHECK-NEXT: vrgather.vi v26, v25, 12 ; CHECK-NEXT: vse8.v v26, (a0) ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = extractelement <16 x i8> %a, i32 12 %c = insertelement <16 x i8> undef, i8 %b, i32 0 %d = shufflevector <16 x i8> %c, <16 x i8> undef, <16 x i32> zeroinitializer store <16 x i8> %d, <16 x i8>* %x ret void } define void @gather_const_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: gather_const_v8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu ; CHECK-NEXT: vle16.v v25, (a0) ; CHECK-NEXT: vrgather.vi v26, v25, 5 ; CHECK-NEXT: vse16.v v26, (a0) ; CHECK-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %b = extractelement <8 x i16> %a, i32 5 %c = insertelement <8 x i16> undef, i16 %b, i32 0 %d = shufflevector <8 x i16> %c, <8 x i16> undef, <8 x i32> zeroinitializer store <8 x i16> %d, <8 x i16>* %x ret void } define void @gather_const_v4i32(<4 x i32>* %x) { ; CHECK-LABEL: gather_const_v4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu ; CHECK-NEXT: vle32.v v25, (a0) ; CHECK-NEXT: vrgather.vi v26, v25, 3 ; CHECK-NEXT: vse32.v v26, (a0) ; CHECK-NEXT: ret %a = load <4 x i32>, <4 x i32>* %x %b = extractelement <4 x i32> %a, i32 3 %c = insertelement <4 x i32> undef, i32 %b, i32 0 %d = shufflevector <4 x i32> %c, <4 x i32> undef, <4 x i32> zeroinitializer store <4 x i32> %d, <4 x i32>* %x ret void } define void @gather_const_v2i64(<2 x i64>* %x) { ; CHECK-LABEL: gather_const_v2i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a1, 2, e64,m1,ta,mu ; CHECK-NEXT: vle64.v v25, (a0) ; CHECK-NEXT: vrgather.vi v26, v25, 1 ; CHECK-NEXT: vse64.v v26, (a0) ; CHECK-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x %b = extractelement <2 x i64> %a, i32 1 %c = insertelement <2 x i64> undef, i64 %b, i32 0 %d = shufflevector <2 x i64> %c, <2 x i64> undef, <2 x i32> zeroinitializer store <2 x i64> %d, <2 x i64>* %x ret void } define void @gather_const_v64i8(<64 x i8>* %x) { ; LMULMAX4-LABEL: gather_const_v64i8: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: addi a1, zero, 64 ; LMULMAX4-NEXT: vsetvli a1, a1, e8,m4,ta,mu ; LMULMAX4-NEXT: vle8.v v28, (a0) ; LMULMAX4-NEXT: addi a1, zero, 32 ; LMULMAX4-NEXT: vrgather.vx v8, v28, a1 ; LMULMAX4-NEXT: vse8.v v8, (a0) ; LMULMAX4-NEXT: ret ; ; LMULMAX1-LABEL: gather_const_v64i8: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: addi a1, a0, 32 ; LMULMAX1-NEXT: vsetivli a2, 16, e8,m1,ta,mu ; LMULMAX1-NEXT: vle8.v v25, (a1) ; LMULMAX1-NEXT: addi a2, a0, 16 ; LMULMAX1-NEXT: addi a3, a0, 48 ; LMULMAX1-NEXT: vrgather.vi v26, v25, 0 ; LMULMAX1-NEXT: vse8.v v26, (a1) ; LMULMAX1-NEXT: vse8.v v26, (a3) ; LMULMAX1-NEXT: vse8.v v26, (a0) ; LMULMAX1-NEXT: vse8.v v26, (a2) ; LMULMAX1-NEXT: ret %a = load <64 x i8>, <64 x i8>* %x %b = extractelement <64 x i8> %a, i32 32 %c = insertelement <64 x i8> undef, i8 %b, i32 0 %d = shufflevector <64 x i8> %c, <64 x i8> undef, <64 x i32> zeroinitializer store <64 x i8> %d, <64 x i8>* %x ret void } define void @gather_const_v16i16(<32 x i16>* %x) { ; LMULMAX4-LABEL: gather_const_v16i16: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: addi a1, zero, 32 ; LMULMAX4-NEXT: vsetvli a1, a1, e16,m4,ta,mu ; LMULMAX4-NEXT: vle16.v v28, (a0) ; LMULMAX4-NEXT: vrgather.vi v8, v28, 25 ; LMULMAX4-NEXT: vse16.v v8, (a0) ; LMULMAX4-NEXT: ret ; ; LMULMAX1-LABEL: gather_const_v16i16: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: addi a1, a0, 48 ; LMULMAX1-NEXT: vsetivli a2, 8, e16,m1,ta,mu ; LMULMAX1-NEXT: vle16.v v25, (a1) ; LMULMAX1-NEXT: addi a2, a0, 16 ; LMULMAX1-NEXT: addi a3, a0, 32 ; LMULMAX1-NEXT: vrgather.vi v26, v25, 1 ; LMULMAX1-NEXT: vse16.v v26, (a3) ; LMULMAX1-NEXT: vse16.v v26, (a1) ; LMULMAX1-NEXT: vse16.v v26, (a0) ; LMULMAX1-NEXT: vse16.v v26, (a2) ; LMULMAX1-NEXT: ret %a = load <32 x i16>, <32 x i16>* %x %b = extractelement <32 x i16> %a, i32 25 %c = insertelement <32 x i16> undef, i16 %b, i32 0 %d = shufflevector <32 x i16> %c, <32 x i16> undef, <32 x i32> zeroinitializer store <32 x i16> %d, <32 x i16>* %x ret void } define void @gather_const_v16i32(<16 x i32>* %x) { ; LMULMAX4-LABEL: gather_const_v16i32: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli a1, 16, e32,m4,ta,mu ; LMULMAX4-NEXT: vle32.v v28, (a0) ; LMULMAX4-NEXT: vrgather.vi v8, v28, 9 ; LMULMAX4-NEXT: vse32.v v8, (a0) ; LMULMAX4-NEXT: ret ; ; LMULMAX1-LABEL: gather_const_v16i32: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: addi a1, a0, 32 ; LMULMAX1-NEXT: vsetivli a2, 4, e32,m1,ta,mu ; LMULMAX1-NEXT: vle32.v v25, (a1) ; LMULMAX1-NEXT: addi a2, a0, 16 ; LMULMAX1-NEXT: addi a3, a0, 48 ; LMULMAX1-NEXT: vrgather.vi v26, v25, 1 ; LMULMAX1-NEXT: vse32.v v26, (a1) ; LMULMAX1-NEXT: vse32.v v26, (a3) ; LMULMAX1-NEXT: vse32.v v26, (a0) ; LMULMAX1-NEXT: vse32.v v26, (a2) ; LMULMAX1-NEXT: ret %a = load <16 x i32>, <16 x i32>* %x %b = extractelement <16 x i32> %a, i32 9 %c = insertelement <16 x i32> undef, i32 %b, i32 0 %d = shufflevector <16 x i32> %c, <16 x i32> undef, <16 x i32> zeroinitializer store <16 x i32> %d, <16 x i32>* %x ret void } define void @gather_const_v8i64(<8 x i64>* %x) { ; LMULMAX4-LABEL: gather_const_v8i64: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: vsetivli a1, 8, e64,m4,ta,mu ; LMULMAX4-NEXT: vle64.v v28, (a0) ; LMULMAX4-NEXT: vrgather.vi v8, v28, 3 ; LMULMAX4-NEXT: vse64.v v8, (a0) ; LMULMAX4-NEXT: ret ; ; LMULMAX1-LABEL: gather_const_v8i64: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vsetivli a2, 2, e64,m1,ta,mu ; LMULMAX1-NEXT: vle64.v v25, (a1) ; LMULMAX1-NEXT: addi a2, a0, 48 ; LMULMAX1-NEXT: addi a3, a0, 32 ; LMULMAX1-NEXT: vrgather.vi v26, v25, 1 ; LMULMAX1-NEXT: vse64.v v26, (a3) ; LMULMAX1-NEXT: vse64.v v26, (a2) ; LMULMAX1-NEXT: vse64.v v26, (a0) ; LMULMAX1-NEXT: vse64.v v26, (a1) ; LMULMAX1-NEXT: ret %a = load <8 x i64>, <8 x i64>* %x %b = extractelement <8 x i64> %a, i32 3 %c = insertelement <8 x i64> undef, i64 %b, i32 0 %d = shufflevector <8 x i64> %c, <8 x i64> undef, <8 x i32> zeroinitializer store <8 x i64> %d, <8 x i64>* %x ret void }