1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-26 12:43:36 +01:00

[SLPVectorizer] Add initial alternate opcode support for cast instructions.

We currently only support binary instructions in the alternate opcode shuffles.

This patch is an initial attempt at adding cast instructions as well, this raises several issues that we probably want to address as we continue to generalize the alternate mechanism:

1 - Duplication of cost determination - we should probably add scalar/vector costs helper functions and get BoUpSLP::getEntryCost to use them instead of determining costs directly.
2 - Support alternate instructions with the same opcode (e.g. casts with different src types) - alternate vectorization of calls with different IntrinsicIDs will require this.
3 - Allow alternates to be a different instruction type - mixing binary/cast/call etc.
4 - Allow passthrough of unsupported alternate instructions - related to PR30787/D28907 'copyable' elements.

Differential Revision: https://reviews.llvm.org/D49135

llvm-svn: 336804
This commit is contained in:
Simon Pilgrim 2018-07-11 13:34:09 +00:00
parent 594edf22ad
commit 17f835882b
2 changed files with 209 additions and 74 deletions

View File

@ -353,16 +353,22 @@ static InstructionsState getSameOpcode(ArrayRef<Value *> VL,
if (llvm::any_of(VL, [](Value *V) { return !isa<Instruction>(V); })) if (llvm::any_of(VL, [](Value *V) { return !isa<Instruction>(V); }))
return InstructionsState(VL[BaseIndex], nullptr, nullptr); return InstructionsState(VL[BaseIndex], nullptr, nullptr);
bool IsCastOp = isa<CastInst>(VL[BaseIndex]);
bool IsBinOp = isa<BinaryOperator>(VL[BaseIndex]); bool IsBinOp = isa<BinaryOperator>(VL[BaseIndex]);
unsigned Opcode = cast<Instruction>(VL[BaseIndex])->getOpcode(); unsigned Opcode = cast<Instruction>(VL[BaseIndex])->getOpcode();
unsigned AltOpcode = Opcode; unsigned AltOpcode = Opcode;
unsigned AltIndex = BaseIndex; unsigned AltIndex = BaseIndex;
// Check for one alternate opcode from another BinaryOperator. // Check for one alternate opcode from another BinaryOperator.
// TODO - can we support other operators (casts etc.)? // TODO - generalize to support all operators (calls etc.).
for (int Cnt = 0, E = VL.size(); Cnt < E; Cnt++) { for (int Cnt = 0, E = VL.size(); Cnt < E; Cnt++) {
unsigned InstOpcode = cast<Instruction>(VL[Cnt])->getOpcode(); unsigned InstOpcode = cast<Instruction>(VL[Cnt])->getOpcode();
if (InstOpcode != Opcode && InstOpcode != AltOpcode) { if (InstOpcode != Opcode && InstOpcode != AltOpcode) {
if (Opcode == AltOpcode && IsCastOp && isa<CastInst>(VL[Cnt])) {
AltOpcode = InstOpcode;
AltIndex = Cnt;
continue;
}
if (Opcode == AltOpcode && IsBinOp && isa<BinaryOperator>(VL[Cnt])) { if (Opcode == AltOpcode && IsBinOp && isa<BinaryOperator>(VL[Cnt])) {
AltOpcode = InstOpcode; AltOpcode = InstOpcode;
AltIndex = Cnt; AltIndex = Cnt;
@ -2363,32 +2369,45 @@ int BoUpSLP::getEntryCost(TreeEntry *E) {
return ReuseShuffleCost + VecCallCost - ScalarCallCost; return ReuseShuffleCost + VecCallCost - ScalarCallCost;
} }
case Instruction::ShuffleVector: { case Instruction::ShuffleVector: {
assert(S.isAltShuffle() && Instruction::isBinaryOp(S.getOpcode()) && assert(S.isAltShuffle() &&
Instruction::isBinaryOp(S.getAltOpcode()) && ((Instruction::isBinaryOp(S.getOpcode()) &&
Instruction::isBinaryOp(S.getAltOpcode())) ||
(Instruction::isCast(S.getOpcode()) &&
Instruction::isCast(S.getAltOpcode()))) &&
"Invalid Shuffle Vector Operand"); "Invalid Shuffle Vector Operand");
int ScalarCost = 0; int ScalarCost = 0;
if (NeedToShuffleReuses) { if (NeedToShuffleReuses) {
for (unsigned Idx : E->ReuseShuffleIndices) { for (unsigned Idx : E->ReuseShuffleIndices) {
Instruction *I = cast<Instruction>(VL[Idx]); Instruction *I = cast<Instruction>(VL[Idx]);
ReuseShuffleCost -= ReuseShuffleCost -= TTI->getInstructionCost(
TTI->getArithmeticInstrCost(I->getOpcode(), ScalarTy); I, TargetTransformInfo::TCK_RecipThroughput);
} }
for (Value *V : VL) { for (Value *V : VL) {
Instruction *I = cast<Instruction>(V); Instruction *I = cast<Instruction>(V);
ReuseShuffleCost += ReuseShuffleCost += TTI->getInstructionCost(
TTI->getArithmeticInstrCost(I->getOpcode(), ScalarTy); I, TargetTransformInfo::TCK_RecipThroughput);
} }
} }
int VecCost = 0; int VecCost = 0;
for (Value *i : VL) { for (Value *i : VL) {
Instruction *I = cast<Instruction>(i); Instruction *I = cast<Instruction>(i);
assert(S.isOpcodeOrAlt(I) && "Unexpected main/alternate opcode"); assert(S.isOpcodeOrAlt(I) && "Unexpected main/alternate opcode");
ScalarCost += TTI->getArithmeticInstrCost(I->getOpcode(), ScalarTy); ScalarCost += TTI->getInstructionCost(
I, TargetTransformInfo::TCK_RecipThroughput);
} }
// VecCost is equal to sum of the cost of creating 2 vectors // VecCost is equal to sum of the cost of creating 2 vectors
// and the cost of creating shuffle. // and the cost of creating shuffle.
VecCost = TTI->getArithmeticInstrCost(S.getOpcode(), VecTy); if (Instruction::isBinaryOp(S.getOpcode())) {
VecCost += TTI->getArithmeticInstrCost(S.getAltOpcode(), VecTy); VecCost = TTI->getArithmeticInstrCost(S.getOpcode(), VecTy);
VecCost += TTI->getArithmeticInstrCost(S.getAltOpcode(), VecTy);
} else {
Type *Src0SclTy = S.MainOp->getOperand(0)->getType();
Type *Src1SclTy = S.AltOp->getOperand(0)->getType();
VectorType *Src0Ty = VectorType::get(Src0SclTy, VL.size());
VectorType *Src1Ty = VectorType::get(Src1SclTy, VL.size());
VecCost = TTI->getCastInstrCost(S.getOpcode(), VecTy, Src0Ty);
VecCost += TTI->getCastInstrCost(S.getAltOpcode(), VecTy, Src1Ty);
}
VecCost += TTI->getShuffleCost(TargetTransformInfo::SK_Select, VecTy, 0); VecCost += TTI->getShuffleCost(TargetTransformInfo::SK_Select, VecTy, 0);
return ReuseShuffleCost + VecCost - ScalarCost; return ReuseShuffleCost + VecCost - ScalarCost;
} }
@ -3470,30 +3489,47 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
} }
case Instruction::ShuffleVector: { case Instruction::ShuffleVector: {
ValueList LHSVL, RHSVL; ValueList LHSVL, RHSVL;
assert(S.isAltShuffle() && Instruction::isBinaryOp(S.getOpcode()) && assert(S.isAltShuffle() &&
Instruction::isBinaryOp(S.getAltOpcode()) && ((Instruction::isBinaryOp(S.getOpcode()) &&
Instruction::isBinaryOp(S.getAltOpcode())) ||
(Instruction::isCast(S.getOpcode()) &&
Instruction::isCast(S.getAltOpcode()))) &&
"Invalid Shuffle Vector Operand"); "Invalid Shuffle Vector Operand");
reorderAltShuffleOperands(S, E->Scalars, LHSVL, RHSVL);
setInsertPointAfterBundle(E->Scalars, S);
Value *LHS = vectorizeTree(LHSVL); Value *LHS, *RHS;
Value *RHS = vectorizeTree(RHSVL); if (Instruction::isBinaryOp(S.getOpcode())) {
reorderAltShuffleOperands(S, E->Scalars, LHSVL, RHSVL);
setInsertPointAfterBundle(E->Scalars, S);
LHS = vectorizeTree(LHSVL);
RHS = vectorizeTree(RHSVL);
} else {
ValueList INVL;
for (Value *V : E->Scalars)
INVL.push_back(cast<Instruction>(V)->getOperand(0));
setInsertPointAfterBundle(E->Scalars, S);
LHS = vectorizeTree(INVL);
}
if (E->VectorizedValue) { if (E->VectorizedValue) {
LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
return E->VectorizedValue; return E->VectorizedValue;
} }
// Create a vector of LHS op1 RHS Value *V0, *V1;
Value *V0 = Builder.CreateBinOp( if (Instruction::isBinaryOp(S.getOpcode())) {
V0 = Builder.CreateBinOp(
static_cast<Instruction::BinaryOps>(S.getOpcode()), LHS, RHS); static_cast<Instruction::BinaryOps>(S.getOpcode()), LHS, RHS);
V1 = Builder.CreateBinOp(
// Create a vector of LHS op2 RHS
Value *V1 = Builder.CreateBinOp(
static_cast<Instruction::BinaryOps>(S.getAltOpcode()), LHS, RHS); static_cast<Instruction::BinaryOps>(S.getAltOpcode()), LHS, RHS);
} else {
V0 = Builder.CreateCast(
static_cast<Instruction::CastOps>(S.getOpcode()), LHS, VecTy);
V1 = Builder.CreateCast(
static_cast<Instruction::CastOps>(S.getAltOpcode()), LHS, VecTy);
}
// Create shuffle to take alternate operations from the vector. // Create shuffle to take alternate operations from the vector.
// Also, gather up odd and even scalar ops to propagate IR flags to // Also, gather up main and alt scalar ops to propagate IR flags to
// each vector operation. // each vector operation.
ValueList OpScalars, AltScalars; ValueList OpScalars, AltScalars;
unsigned e = E->Scalars.size(); unsigned e = E->Scalars.size();

View File

@ -7,32 +7,71 @@
; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skx -basicaa -slp-vectorizer -instcombine -S | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512BW ; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skx -basicaa -slp-vectorizer -instcombine -S | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512BW
define <8 x float> @sitofp_uitofp(<8 x i32> %a) { define <8 x float> @sitofp_uitofp(<8 x i32> %a) {
; CHECK-LABEL: @sitofp_uitofp( ; SSE-LABEL: @sitofp_uitofp(
; CHECK-NEXT: [[A0:%.*]] = extractelement <8 x i32> [[A:%.*]], i32 0 ; SSE-NEXT: [[A0:%.*]] = extractelement <8 x i32> [[A:%.*]], i32 0
; CHECK-NEXT: [[A1:%.*]] = extractelement <8 x i32> [[A]], i32 1 ; SSE-NEXT: [[A1:%.*]] = extractelement <8 x i32> [[A]], i32 1
; CHECK-NEXT: [[A2:%.*]] = extractelement <8 x i32> [[A]], i32 2 ; SSE-NEXT: [[A2:%.*]] = extractelement <8 x i32> [[A]], i32 2
; CHECK-NEXT: [[A3:%.*]] = extractelement <8 x i32> [[A]], i32 3 ; SSE-NEXT: [[A3:%.*]] = extractelement <8 x i32> [[A]], i32 3
; CHECK-NEXT: [[A4:%.*]] = extractelement <8 x i32> [[A]], i32 4 ; SSE-NEXT: [[A4:%.*]] = extractelement <8 x i32> [[A]], i32 4
; CHECK-NEXT: [[A5:%.*]] = extractelement <8 x i32> [[A]], i32 5 ; SSE-NEXT: [[A5:%.*]] = extractelement <8 x i32> [[A]], i32 5
; CHECK-NEXT: [[A6:%.*]] = extractelement <8 x i32> [[A]], i32 6 ; SSE-NEXT: [[A6:%.*]] = extractelement <8 x i32> [[A]], i32 6
; CHECK-NEXT: [[A7:%.*]] = extractelement <8 x i32> [[A]], i32 7 ; SSE-NEXT: [[A7:%.*]] = extractelement <8 x i32> [[A]], i32 7
; CHECK-NEXT: [[AB0:%.*]] = sitofp i32 [[A0]] to float ; SSE-NEXT: [[AB0:%.*]] = sitofp i32 [[A0]] to float
; CHECK-NEXT: [[AB1:%.*]] = sitofp i32 [[A1]] to float ; SSE-NEXT: [[AB1:%.*]] = sitofp i32 [[A1]] to float
; CHECK-NEXT: [[AB2:%.*]] = sitofp i32 [[A2]] to float ; SSE-NEXT: [[AB2:%.*]] = sitofp i32 [[A2]] to float
; CHECK-NEXT: [[AB3:%.*]] = sitofp i32 [[A3]] to float ; SSE-NEXT: [[AB3:%.*]] = sitofp i32 [[A3]] to float
; CHECK-NEXT: [[AB4:%.*]] = uitofp i32 [[A4]] to float ; SSE-NEXT: [[AB4:%.*]] = uitofp i32 [[A4]] to float
; CHECK-NEXT: [[AB5:%.*]] = uitofp i32 [[A5]] to float ; SSE-NEXT: [[AB5:%.*]] = uitofp i32 [[A5]] to float
; CHECK-NEXT: [[AB6:%.*]] = uitofp i32 [[A6]] to float ; SSE-NEXT: [[AB6:%.*]] = uitofp i32 [[A6]] to float
; CHECK-NEXT: [[AB7:%.*]] = uitofp i32 [[A7]] to float ; SSE-NEXT: [[AB7:%.*]] = uitofp i32 [[A7]] to float
; CHECK-NEXT: [[R0:%.*]] = insertelement <8 x float> undef, float [[AB0]], i32 0 ; SSE-NEXT: [[R0:%.*]] = insertelement <8 x float> undef, float [[AB0]], i32 0
; CHECK-NEXT: [[R1:%.*]] = insertelement <8 x float> [[R0]], float [[AB1]], i32 1 ; SSE-NEXT: [[R1:%.*]] = insertelement <8 x float> [[R0]], float [[AB1]], i32 1
; CHECK-NEXT: [[R2:%.*]] = insertelement <8 x float> [[R1]], float [[AB2]], i32 2 ; SSE-NEXT: [[R2:%.*]] = insertelement <8 x float> [[R1]], float [[AB2]], i32 2
; CHECK-NEXT: [[R3:%.*]] = insertelement <8 x float> [[R2]], float [[AB3]], i32 3 ; SSE-NEXT: [[R3:%.*]] = insertelement <8 x float> [[R2]], float [[AB3]], i32 3
; CHECK-NEXT: [[R4:%.*]] = insertelement <8 x float> [[R3]], float [[AB4]], i32 4 ; SSE-NEXT: [[R4:%.*]] = insertelement <8 x float> [[R3]], float [[AB4]], i32 4
; CHECK-NEXT: [[R5:%.*]] = insertelement <8 x float> [[R4]], float [[AB5]], i32 5 ; SSE-NEXT: [[R5:%.*]] = insertelement <8 x float> [[R4]], float [[AB5]], i32 5
; CHECK-NEXT: [[R6:%.*]] = insertelement <8 x float> [[R5]], float [[AB6]], i32 6 ; SSE-NEXT: [[R6:%.*]] = insertelement <8 x float> [[R5]], float [[AB6]], i32 6
; CHECK-NEXT: [[R7:%.*]] = insertelement <8 x float> [[R6]], float [[AB7]], i32 7 ; SSE-NEXT: [[R7:%.*]] = insertelement <8 x float> [[R6]], float [[AB7]], i32 7
; CHECK-NEXT: ret <8 x float> [[R7]] ; SSE-NEXT: ret <8 x float> [[R7]]
;
; SLM-LABEL: @sitofp_uitofp(
; SLM-NEXT: [[A0:%.*]] = extractelement <8 x i32> [[A:%.*]], i32 0
; SLM-NEXT: [[A1:%.*]] = extractelement <8 x i32> [[A]], i32 1
; SLM-NEXT: [[A2:%.*]] = extractelement <8 x i32> [[A]], i32 2
; SLM-NEXT: [[A3:%.*]] = extractelement <8 x i32> [[A]], i32 3
; SLM-NEXT: [[A4:%.*]] = extractelement <8 x i32> [[A]], i32 4
; SLM-NEXT: [[A5:%.*]] = extractelement <8 x i32> [[A]], i32 5
; SLM-NEXT: [[A6:%.*]] = extractelement <8 x i32> [[A]], i32 6
; SLM-NEXT: [[A7:%.*]] = extractelement <8 x i32> [[A]], i32 7
; SLM-NEXT: [[AB0:%.*]] = sitofp i32 [[A0]] to float
; SLM-NEXT: [[AB1:%.*]] = sitofp i32 [[A1]] to float
; SLM-NEXT: [[AB2:%.*]] = sitofp i32 [[A2]] to float
; SLM-NEXT: [[AB3:%.*]] = sitofp i32 [[A3]] to float
; SLM-NEXT: [[AB4:%.*]] = uitofp i32 [[A4]] to float
; SLM-NEXT: [[AB5:%.*]] = uitofp i32 [[A5]] to float
; SLM-NEXT: [[AB6:%.*]] = uitofp i32 [[A6]] to float
; SLM-NEXT: [[AB7:%.*]] = uitofp i32 [[A7]] to float
; SLM-NEXT: [[R0:%.*]] = insertelement <8 x float> undef, float [[AB0]], i32 0
; SLM-NEXT: [[R1:%.*]] = insertelement <8 x float> [[R0]], float [[AB1]], i32 1
; SLM-NEXT: [[R2:%.*]] = insertelement <8 x float> [[R1]], float [[AB2]], i32 2
; SLM-NEXT: [[R3:%.*]] = insertelement <8 x float> [[R2]], float [[AB3]], i32 3
; SLM-NEXT: [[R4:%.*]] = insertelement <8 x float> [[R3]], float [[AB4]], i32 4
; SLM-NEXT: [[R5:%.*]] = insertelement <8 x float> [[R4]], float [[AB5]], i32 5
; SLM-NEXT: [[R6:%.*]] = insertelement <8 x float> [[R5]], float [[AB6]], i32 6
; SLM-NEXT: [[R7:%.*]] = insertelement <8 x float> [[R6]], float [[AB7]], i32 7
; SLM-NEXT: ret <8 x float> [[R7]]
;
; AVX-LABEL: @sitofp_uitofp(
; AVX-NEXT: [[TMP1:%.*]] = sitofp <8 x i32> [[A:%.*]] to <8 x float>
; AVX-NEXT: [[TMP2:%.*]] = uitofp <8 x i32> [[A]] to <8 x float>
; AVX-NEXT: [[R7:%.*]] = shufflevector <8 x float> [[TMP1]], <8 x float> [[TMP2]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 12, i32 13, i32 14, i32 15>
; AVX-NEXT: ret <8 x float> [[R7]]
;
; AVX512-LABEL: @sitofp_uitofp(
; AVX512-NEXT: [[TMP1:%.*]] = sitofp <8 x i32> [[A:%.*]] to <8 x float>
; AVX512-NEXT: [[TMP2:%.*]] = uitofp <8 x i32> [[A]] to <8 x float>
; AVX512-NEXT: [[R7:%.*]] = shufflevector <8 x float> [[TMP1]], <8 x float> [[TMP2]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 12, i32 13, i32 14, i32 15>
; AVX512-NEXT: ret <8 x float> [[R7]]
; ;
%a0 = extractelement <8 x i32> %a, i32 0 %a0 = extractelement <8 x i32> %a, i32 0
%a1 = extractelement <8 x i32> %a, i32 1 %a1 = extractelement <8 x i32> %a, i32 1
@ -62,32 +101,92 @@ define <8 x float> @sitofp_uitofp(<8 x i32> %a) {
} }
define <8 x i32> @fptosi_fptoui(<8 x float> %a) { define <8 x i32> @fptosi_fptoui(<8 x float> %a) {
; CHECK-LABEL: @fptosi_fptoui( ; SSE-LABEL: @fptosi_fptoui(
; CHECK-NEXT: [[A0:%.*]] = extractelement <8 x float> [[A:%.*]], i32 0 ; SSE-NEXT: [[A0:%.*]] = extractelement <8 x float> [[A:%.*]], i32 0
; CHECK-NEXT: [[A1:%.*]] = extractelement <8 x float> [[A]], i32 1 ; SSE-NEXT: [[A1:%.*]] = extractelement <8 x float> [[A]], i32 1
; CHECK-NEXT: [[A2:%.*]] = extractelement <8 x float> [[A]], i32 2 ; SSE-NEXT: [[A2:%.*]] = extractelement <8 x float> [[A]], i32 2
; CHECK-NEXT: [[A3:%.*]] = extractelement <8 x float> [[A]], i32 3 ; SSE-NEXT: [[A3:%.*]] = extractelement <8 x float> [[A]], i32 3
; CHECK-NEXT: [[A4:%.*]] = extractelement <8 x float> [[A]], i32 4 ; SSE-NEXT: [[A4:%.*]] = extractelement <8 x float> [[A]], i32 4
; CHECK-NEXT: [[A5:%.*]] = extractelement <8 x float> [[A]], i32 5 ; SSE-NEXT: [[A5:%.*]] = extractelement <8 x float> [[A]], i32 5
; CHECK-NEXT: [[A6:%.*]] = extractelement <8 x float> [[A]], i32 6 ; SSE-NEXT: [[A6:%.*]] = extractelement <8 x float> [[A]], i32 6
; CHECK-NEXT: [[A7:%.*]] = extractelement <8 x float> [[A]], i32 7 ; SSE-NEXT: [[A7:%.*]] = extractelement <8 x float> [[A]], i32 7
; CHECK-NEXT: [[AB0:%.*]] = fptosi float [[A0]] to i32 ; SSE-NEXT: [[AB0:%.*]] = fptosi float [[A0]] to i32
; CHECK-NEXT: [[AB1:%.*]] = fptosi float [[A1]] to i32 ; SSE-NEXT: [[AB1:%.*]] = fptosi float [[A1]] to i32
; CHECK-NEXT: [[AB2:%.*]] = fptosi float [[A2]] to i32 ; SSE-NEXT: [[AB2:%.*]] = fptosi float [[A2]] to i32
; CHECK-NEXT: [[AB3:%.*]] = fptosi float [[A3]] to i32 ; SSE-NEXT: [[AB3:%.*]] = fptosi float [[A3]] to i32
; CHECK-NEXT: [[AB4:%.*]] = fptoui float [[A4]] to i32 ; SSE-NEXT: [[AB4:%.*]] = fptoui float [[A4]] to i32
; CHECK-NEXT: [[AB5:%.*]] = fptoui float [[A5]] to i32 ; SSE-NEXT: [[AB5:%.*]] = fptoui float [[A5]] to i32
; CHECK-NEXT: [[AB6:%.*]] = fptoui float [[A6]] to i32 ; SSE-NEXT: [[AB6:%.*]] = fptoui float [[A6]] to i32
; CHECK-NEXT: [[AB7:%.*]] = fptoui float [[A7]] to i32 ; SSE-NEXT: [[AB7:%.*]] = fptoui float [[A7]] to i32
; CHECK-NEXT: [[R0:%.*]] = insertelement <8 x i32> undef, i32 [[AB0]], i32 0 ; SSE-NEXT: [[R0:%.*]] = insertelement <8 x i32> undef, i32 [[AB0]], i32 0
; CHECK-NEXT: [[R1:%.*]] = insertelement <8 x i32> [[R0]], i32 [[AB1]], i32 1 ; SSE-NEXT: [[R1:%.*]] = insertelement <8 x i32> [[R0]], i32 [[AB1]], i32 1
; CHECK-NEXT: [[R2:%.*]] = insertelement <8 x i32> [[R1]], i32 [[AB2]], i32 2 ; SSE-NEXT: [[R2:%.*]] = insertelement <8 x i32> [[R1]], i32 [[AB2]], i32 2
; CHECK-NEXT: [[R3:%.*]] = insertelement <8 x i32> [[R2]], i32 [[AB3]], i32 3 ; SSE-NEXT: [[R3:%.*]] = insertelement <8 x i32> [[R2]], i32 [[AB3]], i32 3
; CHECK-NEXT: [[R4:%.*]] = insertelement <8 x i32> [[R3]], i32 [[AB4]], i32 4 ; SSE-NEXT: [[R4:%.*]] = insertelement <8 x i32> [[R3]], i32 [[AB4]], i32 4
; CHECK-NEXT: [[R5:%.*]] = insertelement <8 x i32> [[R4]], i32 [[AB5]], i32 5 ; SSE-NEXT: [[R5:%.*]] = insertelement <8 x i32> [[R4]], i32 [[AB5]], i32 5
; CHECK-NEXT: [[R6:%.*]] = insertelement <8 x i32> [[R5]], i32 [[AB6]], i32 6 ; SSE-NEXT: [[R6:%.*]] = insertelement <8 x i32> [[R5]], i32 [[AB6]], i32 6
; CHECK-NEXT: [[R7:%.*]] = insertelement <8 x i32> [[R6]], i32 [[AB7]], i32 7 ; SSE-NEXT: [[R7:%.*]] = insertelement <8 x i32> [[R6]], i32 [[AB7]], i32 7
; CHECK-NEXT: ret <8 x i32> [[R7]] ; SSE-NEXT: ret <8 x i32> [[R7]]
;
; SLM-LABEL: @fptosi_fptoui(
; SLM-NEXT: [[A0:%.*]] = extractelement <8 x float> [[A:%.*]], i32 0
; SLM-NEXT: [[A1:%.*]] = extractelement <8 x float> [[A]], i32 1
; SLM-NEXT: [[A2:%.*]] = extractelement <8 x float> [[A]], i32 2
; SLM-NEXT: [[A3:%.*]] = extractelement <8 x float> [[A]], i32 3
; SLM-NEXT: [[A4:%.*]] = extractelement <8 x float> [[A]], i32 4
; SLM-NEXT: [[A5:%.*]] = extractelement <8 x float> [[A]], i32 5
; SLM-NEXT: [[A6:%.*]] = extractelement <8 x float> [[A]], i32 6
; SLM-NEXT: [[A7:%.*]] = extractelement <8 x float> [[A]], i32 7
; SLM-NEXT: [[AB0:%.*]] = fptosi float [[A0]] to i32
; SLM-NEXT: [[AB1:%.*]] = fptosi float [[A1]] to i32
; SLM-NEXT: [[AB2:%.*]] = fptosi float [[A2]] to i32
; SLM-NEXT: [[AB3:%.*]] = fptosi float [[A3]] to i32
; SLM-NEXT: [[AB4:%.*]] = fptoui float [[A4]] to i32
; SLM-NEXT: [[AB5:%.*]] = fptoui float [[A5]] to i32
; SLM-NEXT: [[AB6:%.*]] = fptoui float [[A6]] to i32
; SLM-NEXT: [[AB7:%.*]] = fptoui float [[A7]] to i32
; SLM-NEXT: [[R0:%.*]] = insertelement <8 x i32> undef, i32 [[AB0]], i32 0
; SLM-NEXT: [[R1:%.*]] = insertelement <8 x i32> [[R0]], i32 [[AB1]], i32 1
; SLM-NEXT: [[R2:%.*]] = insertelement <8 x i32> [[R1]], i32 [[AB2]], i32 2
; SLM-NEXT: [[R3:%.*]] = insertelement <8 x i32> [[R2]], i32 [[AB3]], i32 3
; SLM-NEXT: [[R4:%.*]] = insertelement <8 x i32> [[R3]], i32 [[AB4]], i32 4
; SLM-NEXT: [[R5:%.*]] = insertelement <8 x i32> [[R4]], i32 [[AB5]], i32 5
; SLM-NEXT: [[R6:%.*]] = insertelement <8 x i32> [[R5]], i32 [[AB6]], i32 6
; SLM-NEXT: [[R7:%.*]] = insertelement <8 x i32> [[R6]], i32 [[AB7]], i32 7
; SLM-NEXT: ret <8 x i32> [[R7]]
;
; AVX-LABEL: @fptosi_fptoui(
; AVX-NEXT: [[A0:%.*]] = extractelement <8 x float> [[A:%.*]], i32 0
; AVX-NEXT: [[A1:%.*]] = extractelement <8 x float> [[A]], i32 1
; AVX-NEXT: [[A2:%.*]] = extractelement <8 x float> [[A]], i32 2
; AVX-NEXT: [[A3:%.*]] = extractelement <8 x float> [[A]], i32 3
; AVX-NEXT: [[A4:%.*]] = extractelement <8 x float> [[A]], i32 4
; AVX-NEXT: [[A5:%.*]] = extractelement <8 x float> [[A]], i32 5
; AVX-NEXT: [[A6:%.*]] = extractelement <8 x float> [[A]], i32 6
; AVX-NEXT: [[A7:%.*]] = extractelement <8 x float> [[A]], i32 7
; AVX-NEXT: [[AB0:%.*]] = fptosi float [[A0]] to i32
; AVX-NEXT: [[AB1:%.*]] = fptosi float [[A1]] to i32
; AVX-NEXT: [[AB2:%.*]] = fptosi float [[A2]] to i32
; AVX-NEXT: [[AB3:%.*]] = fptosi float [[A3]] to i32
; AVX-NEXT: [[AB4:%.*]] = fptoui float [[A4]] to i32
; AVX-NEXT: [[AB5:%.*]] = fptoui float [[A5]] to i32
; AVX-NEXT: [[AB6:%.*]] = fptoui float [[A6]] to i32
; AVX-NEXT: [[AB7:%.*]] = fptoui float [[A7]] to i32
; AVX-NEXT: [[R0:%.*]] = insertelement <8 x i32> undef, i32 [[AB0]], i32 0
; AVX-NEXT: [[R1:%.*]] = insertelement <8 x i32> [[R0]], i32 [[AB1]], i32 1
; AVX-NEXT: [[R2:%.*]] = insertelement <8 x i32> [[R1]], i32 [[AB2]], i32 2
; AVX-NEXT: [[R3:%.*]] = insertelement <8 x i32> [[R2]], i32 [[AB3]], i32 3
; AVX-NEXT: [[R4:%.*]] = insertelement <8 x i32> [[R3]], i32 [[AB4]], i32 4
; AVX-NEXT: [[R5:%.*]] = insertelement <8 x i32> [[R4]], i32 [[AB5]], i32 5
; AVX-NEXT: [[R6:%.*]] = insertelement <8 x i32> [[R5]], i32 [[AB6]], i32 6
; AVX-NEXT: [[R7:%.*]] = insertelement <8 x i32> [[R6]], i32 [[AB7]], i32 7
; AVX-NEXT: ret <8 x i32> [[R7]]
;
; AVX512-LABEL: @fptosi_fptoui(
; AVX512-NEXT: [[TMP1:%.*]] = fptosi <8 x float> [[A:%.*]] to <8 x i32>
; AVX512-NEXT: [[TMP2:%.*]] = fptoui <8 x float> [[A]] to <8 x i32>
; AVX512-NEXT: [[R7:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> [[TMP2]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 12, i32 13, i32 14, i32 15>
; AVX512-NEXT: ret <8 x i32> [[R7]]
; ;
%a0 = extractelement <8 x float> %a, i32 0 %a0 = extractelement <8 x float> %a, i32 0
%a1 = extractelement <8 x float> %a, i32 1 %a1 = extractelement <8 x float> %a, i32 1