1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-18 18:42:46 +02:00

[RISCV] Add support for fixed vector vselect

This patch adds support for fixed-length vector vselect. It does so by
lowering them to a custom unmasked VSELECT_VL node with a vector length
operand.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D96768
This commit is contained in:
Fraser Cormack 2021-02-15 16:38:25 +00:00
parent 3342619547
commit 73b0930caf
4 changed files with 354 additions and 1 deletions

View File

@ -558,6 +558,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::SMAX, VT, Custom);
setOperationAction(ISD::UMIN, VT, Custom);
setOperationAction(ISD::UMAX, VT, Custom);
setOperationAction(ISD::VSELECT, VT, Custom);
}
for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
@ -587,6 +589,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
for (auto CC : VFPCCToExpand)
setCondCodeAction(CC, VT, Expand);
setOperationAction(ISD::VSELECT, VT, Custom);
}
}
}
@ -1258,6 +1262,8 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL);
case ISD::UMAX:
return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL);
case ISD::VSELECT:
return lowerFixedLengthVectorSelectToRVV(Op, DAG);
}
}
@ -2247,6 +2253,31 @@ SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV(
return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true);
}
SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
SDValue Op, SelectionDAG &DAG) const {
MVT VT = Op.getSimpleValueType();
MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
MVT I1ContainerVT =
MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
SDValue CC =
convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget);
SDValue Op1 =
convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
SDValue Op2 =
convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget);
SDLoc DL(Op);
SDValue Mask, VL;
std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
SDValue Select =
DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL);
return convertFromScalableVector(VT, Select, DAG, Subtarget);
}
SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG,
unsigned NewOpc,
bool HasMask) const {
@ -4906,6 +4937,7 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(UMIN_VL)
NODE_NAME_CASE(UMAX_VL)
NODE_NAME_CASE(SETCC_VL)
NODE_NAME_CASE(VSELECT_VL)
NODE_NAME_CASE(VMAND_VL)
NODE_NAME_CASE(VMOR_VL)
NODE_NAME_CASE(VMXOR_VL)

View File

@ -175,6 +175,9 @@ enum NodeType : unsigned {
// operand is VL.
SETCC_VL,
// Vector select with an additional VL operand. This operation is unmasked.
VSELECT_VL,
// Mask binary operators.
VMAND_VL,
VMOR_VL,
@ -410,6 +413,8 @@ private:
SDValue lowerFixedLengthVectorLogicOpToRVV(SDValue Op, SelectionDAG &DAG,
unsigned MaskOpc,
unsigned VecOpc) const;
SDValue lowerFixedLengthVectorSelectToRVV(SDValue Op,
SelectionDAG &DAG) const;
SDValue lowerToScalableOp(SDValue Op, SelectionDAG &DAG, unsigned NewOpc,
bool HasMask = true) const;

View File

@ -111,6 +111,15 @@ def riscv_vrgather_vx_vl : SDNode<"RISCVISD::VRGATHER_VX_VL",
SDTCisSameNumEltsAs<0, 3>,
SDTCisVT<4, XLenVT>]>>;
def riscv_vselect_vl : SDNode<"RISCVISD::VSELECT_VL",
SDTypeProfile<1, 4, [SDTCisVec<0>,
SDTCisVec<1>,
SDTCisSameNumEltsAs<0, 1>,
SDTCVecEltisVT<1, i1>,
SDTCisSameAs<0, 2>,
SDTCisSameAs<2, 3>,
SDTCisVT<4, XLenVT>]>>;
def SDT_RISCVMaskBinOp_VL : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>,
SDTCisSameAs<0, 2>,
SDTCVecEltisVT<0, i1>,
@ -441,6 +450,31 @@ defm "" : VPatBinaryVL_VV_VX<riscv_sdiv_vl, "PseudoVDIV">;
defm "" : VPatBinaryVL_VV_VX<riscv_urem_vl, "PseudoVREMU">;
defm "" : VPatBinaryVL_VV_VX<riscv_srem_vl, "PseudoVREM">;
// 12.16. Vector Integer Merge Instructions
foreach vti = AllIntegerVectors in {
def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
vti.RegClass:$rs1,
vti.RegClass:$rs2,
(XLenVT (VLOp GPR:$vl)))),
(!cast<Instruction>("PseudoVMERGE_VVM_"#vti.LMul.MX)
vti.RegClass:$rs2, vti.RegClass:$rs1, VMV0:$vm,
GPR:$vl, vti.SEW)>;
def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
(SplatPat XLenVT:$rs1),
vti.RegClass:$rs2,
(XLenVT (VLOp GPR:$vl)))),
(!cast<Instruction>("PseudoVMERGE_VXM_"#vti.LMul.MX)
vti.RegClass:$rs2, GPR:$rs1, VMV0:$vm, GPR:$vl, vti.SEW)>;
def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
(SplatPat_simm5 simm5:$rs1),
vti.RegClass:$rs2,
(XLenVT (VLOp GPR:$vl)))),
(!cast<Instruction>("PseudoVMERGE_VIM_"#vti.LMul.MX)
vti.RegClass:$rs2, simm5:$rs1, VMV0:$vm, GPR:$vl, vti.SEW)>;
}
// 12.17. Vector Integer Move Instructions
foreach vti = AllIntegerVectors in {
def : Pat<(vti.Vector (riscv_vmv_v_x_vl GPR:$rs2, (XLenVT (VLOp GPR:$vl)))),
@ -609,8 +643,35 @@ foreach vti = AllFloatVectors in {
vti.RegClass:$rs, vti.RegClass:$rs, GPR:$vl, vti.SEW)>;
}
// 14.16. Vector Floating-Point Move Instruction
foreach fvti = AllFloatVectors in {
// Floating-point vselects:
// 12.16. Vector Integer Merge Instructions
// 14.13. Vector Floating-Point Merge Instruction
def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask VMV0:$vm),
fvti.RegClass:$rs1,
fvti.RegClass:$rs2,
(XLenVT (VLOp GPR:$vl)))),
(!cast<Instruction>("PseudoVMERGE_VVM_"#fvti.LMul.MX)
fvti.RegClass:$rs2, fvti.RegClass:$rs1, VMV0:$vm,
GPR:$vl, fvti.SEW)>;
def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask VMV0:$vm),
(SplatFPOp fvti.ScalarRegClass:$rs1),
fvti.RegClass:$rs2,
(XLenVT (VLOp GPR:$vl)))),
(!cast<Instruction>("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX)
fvti.RegClass:$rs2,
(fvti.Scalar fvti.ScalarRegClass:$rs1),
VMV0:$vm, GPR:$vl, fvti.SEW)>;
def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask VMV0:$vm),
(SplatFPOp (fvti.Scalar fpimm0)),
fvti.RegClass:$rs2,
(XLenVT (VLOp GPR:$vl)))),
(!cast<Instruction>("PseudoVMERGE_VIM_"#fvti.LMul.MX)
fvti.RegClass:$rs2, 0, VMV0:$vm, GPR:$vl, fvti.SEW)>;
// 14.16. Vector Floating-Point Move Instruction
// If we're splatting fpimm0, use vmv.v.x vd, x0.
def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl
(fvti.Scalar (fpimm0)), (XLenVT (VLOp GPR:$vl)))),

View File

@ -0,0 +1,255 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+experimental-v,+experimental-zfh,+f,+d -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+experimental-v,+experimental-zfh,+f,+d -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
define void @vselect_vv_v8i32(<8 x i32>* %a, <8 x i32>* %b, <8 x i1>* %cc, <8 x i32>* %z) {
; CHECK-LABEL: vselect_vv_v8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a4, zero, 8
; CHECK-NEXT: vsetvli a5, a4, e32,m2,ta,mu
; CHECK-NEXT: vle32.v v26, (a0)
; CHECK-NEXT: vle32.v v28, (a1)
; CHECK-NEXT: vsetvli a0, a4, e8,m1,ta,mu
; CHECK-NEXT: vle1.v v0, (a2)
; CHECK-NEXT: vsetvli a0, a4, e32,m2,ta,mu
; CHECK-NEXT: vmerge.vvm v26, v28, v26, v0
; CHECK-NEXT: vse32.v v26, (a3)
; CHECK-NEXT: ret
%va = load <8 x i32>, <8 x i32>* %a
%vb = load <8 x i32>, <8 x i32>* %b
%vcc = load <8 x i1>, <8 x i1>* %cc
%vsel = select <8 x i1> %vcc, <8 x i32> %va, <8 x i32> %vb
store <8 x i32> %vsel, <8 x i32>* %z
ret void
}
define void @vselect_vx_v8i32(i32 %a, <8 x i32>* %b, <8 x i1>* %cc, <8 x i32>* %z) {
; CHECK-LABEL: vselect_vx_v8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a4, zero, 8
; CHECK-NEXT: vsetvli a5, a4, e32,m2,ta,mu
; CHECK-NEXT: vle32.v v26, (a1)
; CHECK-NEXT: vsetvli a1, a4, e8,m1,ta,mu
; CHECK-NEXT: vle1.v v0, (a2)
; CHECK-NEXT: vsetvli a1, a4, e32,m2,ta,mu
; CHECK-NEXT: vmerge.vxm v26, v26, a0, v0
; CHECK-NEXT: vse32.v v26, (a3)
; CHECK-NEXT: ret
%vb = load <8 x i32>, <8 x i32>* %b
%ahead = insertelement <8 x i32> undef, i32 %a, i32 0
%va = shufflevector <8 x i32> %ahead, <8 x i32> undef, <8 x i32> zeroinitializer
%vcc = load <8 x i1>, <8 x i1>* %cc
%vsel = select <8 x i1> %vcc, <8 x i32> %va, <8 x i32> %vb
store <8 x i32> %vsel, <8 x i32>* %z
ret void
}
define void @vselect_vi_v8i32(<8 x i32>* %b, <8 x i1>* %cc, <8 x i32>* %z) {
; CHECK-LABEL: vselect_vi_v8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a3, zero, 8
; CHECK-NEXT: vsetvli a4, a3, e32,m2,ta,mu
; CHECK-NEXT: vle32.v v26, (a0)
; CHECK-NEXT: vsetvli a0, a3, e8,m1,ta,mu
; CHECK-NEXT: vle1.v v0, (a1)
; CHECK-NEXT: vsetvli a0, a3, e32,m2,ta,mu
; CHECK-NEXT: vmerge.vim v26, v26, -1, v0
; CHECK-NEXT: vse32.v v26, (a2)
; CHECK-NEXT: ret
%vb = load <8 x i32>, <8 x i32>* %b
%a = insertelement <8 x i32> undef, i32 -1, i32 0
%va = shufflevector <8 x i32> %a, <8 x i32> undef, <8 x i32> zeroinitializer
%vcc = load <8 x i1>, <8 x i1>* %cc
%vsel = select <8 x i1> %vcc, <8 x i32> %va, <8 x i32> %vb
store <8 x i32> %vsel, <8 x i32>* %z
ret void
}
define void @vselect_vv_v8f32(<8 x float>* %a, <8 x float>* %b, <8 x i1>* %cc, <8 x float>* %z) {
; CHECK-LABEL: vselect_vv_v8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a4, zero, 8
; CHECK-NEXT: vsetvli a5, a4, e32,m2,ta,mu
; CHECK-NEXT: vle32.v v26, (a0)
; CHECK-NEXT: vle32.v v28, (a1)
; CHECK-NEXT: vsetvli a0, a4, e8,m1,ta,mu
; CHECK-NEXT: vle1.v v0, (a2)
; CHECK-NEXT: vsetvli a0, a4, e32,m2,ta,mu
; CHECK-NEXT: vmerge.vvm v26, v28, v26, v0
; CHECK-NEXT: vse32.v v26, (a3)
; CHECK-NEXT: ret
%va = load <8 x float>, <8 x float>* %a
%vb = load <8 x float>, <8 x float>* %b
%vcc = load <8 x i1>, <8 x i1>* %cc
%vsel = select <8 x i1> %vcc, <8 x float> %va, <8 x float> %vb
store <8 x float> %vsel, <8 x float>* %z
ret void
}
define void @vselect_vx_v8f32(float %a, <8 x float>* %b, <8 x i1>* %cc, <8 x float>* %z) {
; CHECK-LABEL: vselect_vx_v8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a3, zero, 8
; CHECK-NEXT: vsetvli a4, a3, e32,m2,ta,mu
; CHECK-NEXT: vle32.v v26, (a0)
; CHECK-NEXT: vsetvli a0, a3, e8,m1,ta,mu
; CHECK-NEXT: vle1.v v0, (a1)
; CHECK-NEXT: vsetvli a0, a3, e32,m2,ta,mu
; CHECK-NEXT: vfmerge.vfm v26, v26, fa0, v0
; CHECK-NEXT: vse32.v v26, (a2)
; CHECK-NEXT: ret
%vb = load <8 x float>, <8 x float>* %b
%ahead = insertelement <8 x float> undef, float %a, i32 0
%va = shufflevector <8 x float> %ahead, <8 x float> undef, <8 x i32> zeroinitializer
%vcc = load <8 x i1>, <8 x i1>* %cc
%vsel = select <8 x i1> %vcc, <8 x float> %va, <8 x float> %vb
store <8 x float> %vsel, <8 x float>* %z
ret void
}
define void @vselect_vfpzero_v8f32(<8 x float>* %b, <8 x i1>* %cc, <8 x float>* %z) {
; CHECK-LABEL: vselect_vfpzero_v8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a3, zero, 8
; CHECK-NEXT: vsetvli a4, a3, e32,m2,ta,mu
; CHECK-NEXT: vle32.v v26, (a0)
; CHECK-NEXT: vsetvli a0, a3, e8,m1,ta,mu
; CHECK-NEXT: vle1.v v0, (a1)
; CHECK-NEXT: vsetvli a0, a3, e32,m2,ta,mu
; CHECK-NEXT: vmerge.vim v26, v26, 0, v0
; CHECK-NEXT: vse32.v v26, (a2)
; CHECK-NEXT: ret
%vb = load <8 x float>, <8 x float>* %b
%a = insertelement <8 x float> undef, float 0.0, i32 0
%va = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> zeroinitializer
%vcc = load <8 x i1>, <8 x i1>* %cc
%vsel = select <8 x i1> %vcc, <8 x float> %va, <8 x float> %vb
store <8 x float> %vsel, <8 x float>* %z
ret void
}
define void @vselect_vv_v16i16(<16 x i16>* %a, <16 x i16>* %b, <16 x i1>* %cc, <16 x i16>* %z) {
; CHECK-LABEL: vselect_vv_v16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a4, zero, 16
; CHECK-NEXT: vsetvli a5, a4, e16,m2,ta,mu
; CHECK-NEXT: vle16.v v26, (a0)
; CHECK-NEXT: vle16.v v28, (a1)
; CHECK-NEXT: vsetvli a0, a4, e8,m1,ta,mu
; CHECK-NEXT: vle1.v v0, (a2)
; CHECK-NEXT: vsetvli a0, a4, e16,m2,ta,mu
; CHECK-NEXT: vmerge.vvm v26, v28, v26, v0
; CHECK-NEXT: vse16.v v26, (a3)
; CHECK-NEXT: ret
%va = load <16 x i16>, <16 x i16>* %a
%vb = load <16 x i16>, <16 x i16>* %b
%vcc = load <16 x i1>, <16 x i1>* %cc
%vsel = select <16 x i1> %vcc, <16 x i16> %va, <16 x i16> %vb
store <16 x i16> %vsel, <16 x i16>* %z
ret void
}
define void @vselect_vx_v16i16(i16 signext %a, <16 x i16>* %b, <16 x i1>* %cc, <16 x i16>* %z) {
; CHECK-LABEL: vselect_vx_v16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a4, zero, 16
; CHECK-NEXT: vsetvli a5, a4, e16,m2,ta,mu
; CHECK-NEXT: vle16.v v26, (a1)
; CHECK-NEXT: vsetvli a1, a4, e8,m1,ta,mu
; CHECK-NEXT: vle1.v v0, (a2)
; CHECK-NEXT: vsetvli a1, a4, e16,m2,ta,mu
; CHECK-NEXT: vmerge.vxm v26, v26, a0, v0
; CHECK-NEXT: vse16.v v26, (a3)
; CHECK-NEXT: ret
%vb = load <16 x i16>, <16 x i16>* %b
%ahead = insertelement <16 x i16> undef, i16 %a, i32 0
%va = shufflevector <16 x i16> %ahead, <16 x i16> undef, <16 x i32> zeroinitializer
%vcc = load <16 x i1>, <16 x i1>* %cc
%vsel = select <16 x i1> %vcc, <16 x i16> %va, <16 x i16> %vb
store <16 x i16> %vsel, <16 x i16>* %z
ret void
}
define void @vselect_vi_v16i16(<16 x i16>* %b, <16 x i1>* %cc, <16 x i16>* %z) {
; CHECK-LABEL: vselect_vi_v16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a3, zero, 16
; CHECK-NEXT: vsetvli a4, a3, e16,m2,ta,mu
; CHECK-NEXT: vle16.v v26, (a0)
; CHECK-NEXT: vsetvli a0, a3, e8,m1,ta,mu
; CHECK-NEXT: vle1.v v0, (a1)
; CHECK-NEXT: vsetvli a0, a3, e16,m2,ta,mu
; CHECK-NEXT: vmerge.vim v26, v26, 4, v0
; CHECK-NEXT: vse16.v v26, (a2)
; CHECK-NEXT: ret
%vb = load <16 x i16>, <16 x i16>* %b
%a = insertelement <16 x i16> undef, i16 4, i32 0
%va = shufflevector <16 x i16> %a, <16 x i16> undef, <16 x i32> zeroinitializer
%vcc = load <16 x i1>, <16 x i1>* %cc
%vsel = select <16 x i1> %vcc, <16 x i16> %va, <16 x i16> %vb
store <16 x i16> %vsel, <16 x i16>* %z
ret void
}
define void @vselect_vv_v32f16(<32 x half>* %a, <32 x half>* %b, <32 x i1>* %cc, <32 x half>* %z) {
; CHECK-LABEL: vselect_vv_v32f16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a4, zero, 32
; CHECK-NEXT: vsetvli a5, a4, e16,m4,ta,mu
; CHECK-NEXT: vle16.v v28, (a0)
; CHECK-NEXT: vle16.v v8, (a1)
; CHECK-NEXT: vsetvli a0, a4, e8,m2,ta,mu
; CHECK-NEXT: vle1.v v0, (a2)
; CHECK-NEXT: vsetvli a0, a4, e16,m4,ta,mu
; CHECK-NEXT: vmerge.vvm v28, v8, v28, v0
; CHECK-NEXT: vse16.v v28, (a3)
; CHECK-NEXT: ret
%va = load <32 x half>, <32 x half>* %a
%vb = load <32 x half>, <32 x half>* %b
%vcc = load <32 x i1>, <32 x i1>* %cc
%vsel = select <32 x i1> %vcc, <32 x half> %va, <32 x half> %vb
store <32 x half> %vsel, <32 x half>* %z
ret void
}
define void @vselect_vx_v32f16(half %a, <32 x half>* %b, <32 x i1>* %cc, <32 x half>* %z) {
; CHECK-LABEL: vselect_vx_v32f16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a3, zero, 32
; CHECK-NEXT: vsetvli a4, a3, e16,m4,ta,mu
; CHECK-NEXT: vle16.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a3, e8,m2,ta,mu
; CHECK-NEXT: vle1.v v0, (a1)
; CHECK-NEXT: vsetvli a0, a3, e16,m4,ta,mu
; CHECK-NEXT: vfmerge.vfm v28, v28, fa0, v0
; CHECK-NEXT: vse16.v v28, (a2)
; CHECK-NEXT: ret
%vb = load <32 x half>, <32 x half>* %b
%ahead = insertelement <32 x half> undef, half %a, i32 0
%va = shufflevector <32 x half> %ahead, <32 x half> undef, <32 x i32> zeroinitializer
%vcc = load <32 x i1>, <32 x i1>* %cc
%vsel = select <32 x i1> %vcc, <32 x half> %va, <32 x half> %vb
store <32 x half> %vsel, <32 x half>* %z
ret void
}
define void @vselect_vfpzero_v32f16(<32 x half>* %b, <32 x i1>* %cc, <32 x half>* %z) {
; CHECK-LABEL: vselect_vfpzero_v32f16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a3, zero, 32
; CHECK-NEXT: vsetvli a4, a3, e16,m4,ta,mu
; CHECK-NEXT: vle16.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a3, e8,m2,ta,mu
; CHECK-NEXT: vle1.v v0, (a1)
; CHECK-NEXT: vsetvli a0, a3, e16,m4,ta,mu
; CHECK-NEXT: vmerge.vim v28, v28, 0, v0
; CHECK-NEXT: vse16.v v28, (a2)
; CHECK-NEXT: ret
%vb = load <32 x half>, <32 x half>* %b
%a = insertelement <32 x half> undef, half 0.0, i32 0
%va = shufflevector <32 x half> %a, <32 x half> undef, <32 x i32> zeroinitializer
%vcc = load <32 x i1>, <32 x i1>* %cc
%vsel = select <32 x i1> %vcc, <32 x half> %va, <32 x half> %vb
store <32 x half> %vsel, <32 x half>* %z
ret void
}