mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-26 04:32:44 +01:00
[Legalize] Add legalizations for VECREDUCE_SEQ_FADD
Add Legalization support for VECREDUCE_SEQ_FADD, so that we don't need to depend on ExpandReductionsPass. Differential Revision: https://reviews.llvm.org/D90247
This commit is contained in:
parent
1e61e7c7e0
commit
7b7e236aab
@ -4447,6 +4447,9 @@ public:
|
|||||||
/// only the first Count elements of the vector are used.
|
/// only the first Count elements of the vector are used.
|
||||||
SDValue expandVecReduce(SDNode *Node, SelectionDAG &DAG) const;
|
SDValue expandVecReduce(SDNode *Node, SelectionDAG &DAG) const;
|
||||||
|
|
||||||
|
/// Expand a VECREDUCE_SEQ_* into an explicit ordered calculation.
|
||||||
|
SDValue expandVecReduceSeq(SDNode *Node, SelectionDAG &DAG) const;
|
||||||
|
|
||||||
/// Expand an SREM or UREM using SDIV/UDIV or SDIVREM/UDIVREM, if legal.
|
/// Expand an SREM or UREM using SDIV/UDIV or SDIVREM/UDIVREM, if legal.
|
||||||
/// Returns true if the expansion was successful.
|
/// Returns true if the expansion was successful.
|
||||||
bool expandREM(SDNode *Node, SDValue &Result, SelectionDAG &DAG) const;
|
bool expandREM(SDNode *Node, SDValue &Result, SelectionDAG &DAG) const;
|
||||||
|
@ -1165,6 +1165,10 @@ void SelectionDAGLegalize::LegalizeOp(SDNode *Node) {
|
|||||||
Action = TLI.getOperationAction(
|
Action = TLI.getOperationAction(
|
||||||
Node->getOpcode(), Node->getOperand(0).getValueType());
|
Node->getOpcode(), Node->getOperand(0).getValueType());
|
||||||
break;
|
break;
|
||||||
|
case ISD::VECREDUCE_SEQ_FADD:
|
||||||
|
Action = TLI.getOperationAction(
|
||||||
|
Node->getOpcode(), Node->getOperand(1).getValueType());
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
if (Node->getOpcode() >= ISD::BUILTIN_OP_END) {
|
if (Node->getOpcode() >= ISD::BUILTIN_OP_END) {
|
||||||
Action = TargetLowering::Legal;
|
Action = TargetLowering::Legal;
|
||||||
|
@ -140,6 +140,9 @@ void DAGTypeLegalizer::SoftenFloatResult(SDNode *N, unsigned ResNo) {
|
|||||||
case ISD::VECREDUCE_FMAX:
|
case ISD::VECREDUCE_FMAX:
|
||||||
R = SoftenFloatRes_VECREDUCE(N);
|
R = SoftenFloatRes_VECREDUCE(N);
|
||||||
break;
|
break;
|
||||||
|
case ISD::VECREDUCE_SEQ_FADD:
|
||||||
|
R = SoftenFloatRes_VECREDUCE_SEQ(N);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
// If R is null, the sub-method took care of registering the result.
|
// If R is null, the sub-method took care of registering the result.
|
||||||
@ -784,6 +787,10 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_VECREDUCE(SDNode *N) {
|
|||||||
return SDValue();
|
return SDValue();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SDValue DAGTypeLegalizer::SoftenFloatRes_VECREDUCE_SEQ(SDNode *N) {
|
||||||
|
ReplaceValueWith(SDValue(N, 0), TLI.expandVecReduceSeq(N, DAG));
|
||||||
|
return SDValue();
|
||||||
|
}
|
||||||
|
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
// Convert Float Operand to Integer
|
// Convert Float Operand to Integer
|
||||||
@ -2254,6 +2261,9 @@ void DAGTypeLegalizer::PromoteFloatResult(SDNode *N, unsigned ResNo) {
|
|||||||
case ISD::VECREDUCE_FMAX:
|
case ISD::VECREDUCE_FMAX:
|
||||||
R = PromoteFloatRes_VECREDUCE(N);
|
R = PromoteFloatRes_VECREDUCE(N);
|
||||||
break;
|
break;
|
||||||
|
case ISD::VECREDUCE_SEQ_FADD:
|
||||||
|
R = PromoteFloatRes_VECREDUCE_SEQ(N);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (R.getNode())
|
if (R.getNode())
|
||||||
@ -2494,6 +2504,11 @@ SDValue DAGTypeLegalizer::PromoteFloatRes_VECREDUCE(SDNode *N) {
|
|||||||
return SDValue();
|
return SDValue();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SDValue DAGTypeLegalizer::PromoteFloatRes_VECREDUCE_SEQ(SDNode *N) {
|
||||||
|
ReplaceValueWith(SDValue(N, 0), TLI.expandVecReduceSeq(N, DAG));
|
||||||
|
return SDValue();
|
||||||
|
}
|
||||||
|
|
||||||
SDValue DAGTypeLegalizer::BitcastToInt_ATOMIC_SWAP(SDNode *N) {
|
SDValue DAGTypeLegalizer::BitcastToInt_ATOMIC_SWAP(SDNode *N) {
|
||||||
EVT VT = N->getValueType(0);
|
EVT VT = N->getValueType(0);
|
||||||
|
|
||||||
@ -2608,6 +2623,9 @@ void DAGTypeLegalizer::SoftPromoteHalfResult(SDNode *N, unsigned ResNo) {
|
|||||||
case ISD::VECREDUCE_FMAX:
|
case ISD::VECREDUCE_FMAX:
|
||||||
R = SoftPromoteHalfRes_VECREDUCE(N);
|
R = SoftPromoteHalfRes_VECREDUCE(N);
|
||||||
break;
|
break;
|
||||||
|
case ISD::VECREDUCE_SEQ_FADD:
|
||||||
|
R = SoftPromoteHalfRes_VECREDUCE_SEQ(N);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (R.getNode())
|
if (R.getNode())
|
||||||
@ -2806,6 +2824,12 @@ SDValue DAGTypeLegalizer::SoftPromoteHalfRes_VECREDUCE(SDNode *N) {
|
|||||||
return SDValue();
|
return SDValue();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SDValue DAGTypeLegalizer::SoftPromoteHalfRes_VECREDUCE_SEQ(SDNode *N) {
|
||||||
|
// Expand and soften.
|
||||||
|
ReplaceValueWith(SDValue(N, 0), TLI.expandVecReduceSeq(N, DAG));
|
||||||
|
return SDValue();
|
||||||
|
}
|
||||||
|
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
// Half Operand Soft Promotion
|
// Half Operand Soft Promotion
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
|
@ -551,6 +551,7 @@ private:
|
|||||||
SDValue SoftenFloatRes_VAARG(SDNode *N);
|
SDValue SoftenFloatRes_VAARG(SDNode *N);
|
||||||
SDValue SoftenFloatRes_XINT_TO_FP(SDNode *N);
|
SDValue SoftenFloatRes_XINT_TO_FP(SDNode *N);
|
||||||
SDValue SoftenFloatRes_VECREDUCE(SDNode *N);
|
SDValue SoftenFloatRes_VECREDUCE(SDNode *N);
|
||||||
|
SDValue SoftenFloatRes_VECREDUCE_SEQ(SDNode *N);
|
||||||
|
|
||||||
// Convert Float Operand to Integer.
|
// Convert Float Operand to Integer.
|
||||||
bool SoftenFloatOperand(SDNode *N, unsigned OpNo);
|
bool SoftenFloatOperand(SDNode *N, unsigned OpNo);
|
||||||
@ -670,6 +671,7 @@ private:
|
|||||||
SDValue BitcastToInt_ATOMIC_SWAP(SDNode *N);
|
SDValue BitcastToInt_ATOMIC_SWAP(SDNode *N);
|
||||||
SDValue PromoteFloatRes_XINT_TO_FP(SDNode *N);
|
SDValue PromoteFloatRes_XINT_TO_FP(SDNode *N);
|
||||||
SDValue PromoteFloatRes_VECREDUCE(SDNode *N);
|
SDValue PromoteFloatRes_VECREDUCE(SDNode *N);
|
||||||
|
SDValue PromoteFloatRes_VECREDUCE_SEQ(SDNode *N);
|
||||||
|
|
||||||
bool PromoteFloatOperand(SDNode *N, unsigned OpNo);
|
bool PromoteFloatOperand(SDNode *N, unsigned OpNo);
|
||||||
SDValue PromoteFloatOp_BITCAST(SDNode *N, unsigned OpNo);
|
SDValue PromoteFloatOp_BITCAST(SDNode *N, unsigned OpNo);
|
||||||
@ -708,6 +710,7 @@ private:
|
|||||||
SDValue SoftPromoteHalfRes_XINT_TO_FP(SDNode *N);
|
SDValue SoftPromoteHalfRes_XINT_TO_FP(SDNode *N);
|
||||||
SDValue SoftPromoteHalfRes_UNDEF(SDNode *N);
|
SDValue SoftPromoteHalfRes_UNDEF(SDNode *N);
|
||||||
SDValue SoftPromoteHalfRes_VECREDUCE(SDNode *N);
|
SDValue SoftPromoteHalfRes_VECREDUCE(SDNode *N);
|
||||||
|
SDValue SoftPromoteHalfRes_VECREDUCE_SEQ(SDNode *N);
|
||||||
|
|
||||||
bool SoftPromoteHalfOperand(SDNode *N, unsigned OpNo);
|
bool SoftPromoteHalfOperand(SDNode *N, unsigned OpNo);
|
||||||
SDValue SoftPromoteHalfOp_BITCAST(SDNode *N);
|
SDValue SoftPromoteHalfOp_BITCAST(SDNode *N);
|
||||||
@ -774,6 +777,7 @@ private:
|
|||||||
SDValue ScalarizeVecOp_FP_ROUND(SDNode *N, unsigned OpNo);
|
SDValue ScalarizeVecOp_FP_ROUND(SDNode *N, unsigned OpNo);
|
||||||
SDValue ScalarizeVecOp_STRICT_FP_ROUND(SDNode *N, unsigned OpNo);
|
SDValue ScalarizeVecOp_STRICT_FP_ROUND(SDNode *N, unsigned OpNo);
|
||||||
SDValue ScalarizeVecOp_VECREDUCE(SDNode *N);
|
SDValue ScalarizeVecOp_VECREDUCE(SDNode *N);
|
||||||
|
SDValue ScalarizeVecOp_VECREDUCE_SEQ(SDNode *N);
|
||||||
|
|
||||||
//===--------------------------------------------------------------------===//
|
//===--------------------------------------------------------------------===//
|
||||||
// Vector Splitting Support: LegalizeVectorTypes.cpp
|
// Vector Splitting Support: LegalizeVectorTypes.cpp
|
||||||
@ -829,6 +833,7 @@ private:
|
|||||||
bool SplitVectorOperand(SDNode *N, unsigned OpNo);
|
bool SplitVectorOperand(SDNode *N, unsigned OpNo);
|
||||||
SDValue SplitVecOp_VSELECT(SDNode *N, unsigned OpNo);
|
SDValue SplitVecOp_VSELECT(SDNode *N, unsigned OpNo);
|
||||||
SDValue SplitVecOp_VECREDUCE(SDNode *N, unsigned OpNo);
|
SDValue SplitVecOp_VECREDUCE(SDNode *N, unsigned OpNo);
|
||||||
|
SDValue SplitVecOp_VECREDUCE_SEQ(SDNode *N);
|
||||||
SDValue SplitVecOp_UnaryOp(SDNode *N);
|
SDValue SplitVecOp_UnaryOp(SDNode *N);
|
||||||
SDValue SplitVecOp_TruncateHelper(SDNode *N);
|
SDValue SplitVecOp_TruncateHelper(SDNode *N);
|
||||||
|
|
||||||
@ -915,6 +920,7 @@ private:
|
|||||||
SDValue WidenVecOp_Convert(SDNode *N);
|
SDValue WidenVecOp_Convert(SDNode *N);
|
||||||
SDValue WidenVecOp_FCOPYSIGN(SDNode *N);
|
SDValue WidenVecOp_FCOPYSIGN(SDNode *N);
|
||||||
SDValue WidenVecOp_VECREDUCE(SDNode *N);
|
SDValue WidenVecOp_VECREDUCE(SDNode *N);
|
||||||
|
SDValue WidenVecOp_VECREDUCE_SEQ(SDNode *N);
|
||||||
|
|
||||||
/// Helper function to generate a set of operations to perform
|
/// Helper function to generate a set of operations to perform
|
||||||
/// a vector operation for a wider type.
|
/// a vector operation for a wider type.
|
||||||
|
@ -471,10 +471,6 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
|
|||||||
Node->getValueType(0), Scale);
|
Node->getValueType(0), Scale);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case ISD::VECREDUCE_SEQ_FADD:
|
|
||||||
Action = TLI.getOperationAction(Node->getOpcode(),
|
|
||||||
Node->getOperand(1).getValueType());
|
|
||||||
break;
|
|
||||||
case ISD::SINT_TO_FP:
|
case ISD::SINT_TO_FP:
|
||||||
case ISD::UINT_TO_FP:
|
case ISD::UINT_TO_FP:
|
||||||
case ISD::VECREDUCE_ADD:
|
case ISD::VECREDUCE_ADD:
|
||||||
@ -493,6 +489,10 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
|
|||||||
Action = TLI.getOperationAction(Node->getOpcode(),
|
Action = TLI.getOperationAction(Node->getOpcode(),
|
||||||
Node->getOperand(0).getValueType());
|
Node->getOperand(0).getValueType());
|
||||||
break;
|
break;
|
||||||
|
case ISD::VECREDUCE_SEQ_FADD:
|
||||||
|
Action = TLI.getOperationAction(Node->getOpcode(),
|
||||||
|
Node->getOperand(1).getValueType());
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
LLVM_DEBUG(dbgs() << "\nLegalizing vector op: "; Node->dump(&DAG));
|
LLVM_DEBUG(dbgs() << "\nLegalizing vector op: "; Node->dump(&DAG));
|
||||||
@ -874,6 +874,9 @@ void VectorLegalizer::Expand(SDNode *Node, SmallVectorImpl<SDValue> &Results) {
|
|||||||
case ISD::VECREDUCE_FMIN:
|
case ISD::VECREDUCE_FMIN:
|
||||||
Results.push_back(TLI.expandVecReduce(Node, DAG));
|
Results.push_back(TLI.expandVecReduce(Node, DAG));
|
||||||
return;
|
return;
|
||||||
|
case ISD::VECREDUCE_SEQ_FADD:
|
||||||
|
Results.push_back(TLI.expandVecReduceSeq(Node, DAG));
|
||||||
|
return;
|
||||||
case ISD::SREM:
|
case ISD::SREM:
|
||||||
case ISD::UREM:
|
case ISD::UREM:
|
||||||
ExpandREM(Node, Results);
|
ExpandREM(Node, Results);
|
||||||
|
@ -623,6 +623,9 @@ bool DAGTypeLegalizer::ScalarizeVectorOperand(SDNode *N, unsigned OpNo) {
|
|||||||
case ISD::VECREDUCE_FMIN:
|
case ISD::VECREDUCE_FMIN:
|
||||||
Res = ScalarizeVecOp_VECREDUCE(N);
|
Res = ScalarizeVecOp_VECREDUCE(N);
|
||||||
break;
|
break;
|
||||||
|
case ISD::VECREDUCE_SEQ_FADD:
|
||||||
|
Res = ScalarizeVecOp_VECREDUCE_SEQ(N);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -803,6 +806,17 @@ SDValue DAGTypeLegalizer::ScalarizeVecOp_VECREDUCE(SDNode *N) {
|
|||||||
return Res;
|
return Res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SDValue DAGTypeLegalizer::ScalarizeVecOp_VECREDUCE_SEQ(SDNode *N) {
|
||||||
|
SDValue AccOp = N->getOperand(0);
|
||||||
|
SDValue VecOp = N->getOperand(1);
|
||||||
|
|
||||||
|
unsigned BaseOpc = ISD::getVecReduceBaseOpcode(N->getOpcode());
|
||||||
|
|
||||||
|
SDValue Op = GetScalarizedVector(VecOp);
|
||||||
|
return DAG.getNode(BaseOpc, SDLoc(N), N->getValueType(0),
|
||||||
|
AccOp, Op, N->getFlags());
|
||||||
|
}
|
||||||
|
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
// Result Vector Splitting
|
// Result Vector Splitting
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
@ -2075,6 +2089,9 @@ bool DAGTypeLegalizer::SplitVectorOperand(SDNode *N, unsigned OpNo) {
|
|||||||
case ISD::VECREDUCE_FMIN:
|
case ISD::VECREDUCE_FMIN:
|
||||||
Res = SplitVecOp_VECREDUCE(N, OpNo);
|
Res = SplitVecOp_VECREDUCE(N, OpNo);
|
||||||
break;
|
break;
|
||||||
|
case ISD::VECREDUCE_SEQ_FADD:
|
||||||
|
Res = SplitVecOp_VECREDUCE_SEQ(N);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2150,6 +2167,28 @@ SDValue DAGTypeLegalizer::SplitVecOp_VECREDUCE(SDNode *N, unsigned OpNo) {
|
|||||||
return DAG.getNode(N->getOpcode(), dl, ResVT, Partial, N->getFlags());
|
return DAG.getNode(N->getOpcode(), dl, ResVT, Partial, N->getFlags());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SDValue DAGTypeLegalizer::SplitVecOp_VECREDUCE_SEQ(SDNode *N) {
|
||||||
|
EVT ResVT = N->getValueType(0);
|
||||||
|
SDValue Lo, Hi;
|
||||||
|
SDLoc dl(N);
|
||||||
|
|
||||||
|
SDValue AccOp = N->getOperand(0);
|
||||||
|
SDValue VecOp = N->getOperand(1);
|
||||||
|
SDNodeFlags Flags = N->getFlags();
|
||||||
|
|
||||||
|
EVT VecVT = VecOp.getValueType();
|
||||||
|
assert(VecVT.isVector() && "Can only split reduce vector operand");
|
||||||
|
GetSplitVector(VecOp, Lo, Hi);
|
||||||
|
EVT LoOpVT, HiOpVT;
|
||||||
|
std::tie(LoOpVT, HiOpVT) = DAG.GetSplitDestVTs(VecVT);
|
||||||
|
|
||||||
|
// Reduce low half.
|
||||||
|
SDValue Partial = DAG.getNode(N->getOpcode(), dl, ResVT, AccOp, Lo, Flags);
|
||||||
|
|
||||||
|
// Reduce high half, using low half result as initial value.
|
||||||
|
return DAG.getNode(N->getOpcode(), dl, ResVT, Partial, Hi, Flags);
|
||||||
|
}
|
||||||
|
|
||||||
SDValue DAGTypeLegalizer::SplitVecOp_UnaryOp(SDNode *N) {
|
SDValue DAGTypeLegalizer::SplitVecOp_UnaryOp(SDNode *N) {
|
||||||
// The result has a legal vector type, but the input needs splitting.
|
// The result has a legal vector type, but the input needs splitting.
|
||||||
EVT ResVT = N->getValueType(0);
|
EVT ResVT = N->getValueType(0);
|
||||||
@ -4318,6 +4357,9 @@ bool DAGTypeLegalizer::WidenVectorOperand(SDNode *N, unsigned OpNo) {
|
|||||||
case ISD::VECREDUCE_FMIN:
|
case ISD::VECREDUCE_FMIN:
|
||||||
Res = WidenVecOp_VECREDUCE(N);
|
Res = WidenVecOp_VECREDUCE(N);
|
||||||
break;
|
break;
|
||||||
|
case ISD::VECREDUCE_SEQ_FADD:
|
||||||
|
Res = WidenVecOp_VECREDUCE_SEQ(N);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
// If Res is null, the sub-method took care of registering the result.
|
// If Res is null, the sub-method took care of registering the result.
|
||||||
@ -4757,8 +4799,9 @@ SDValue DAGTypeLegalizer::WidenVecOp_VECREDUCE(SDNode *N) {
|
|||||||
EVT ElemVT = OrigVT.getVectorElementType();
|
EVT ElemVT = OrigVT.getVectorElementType();
|
||||||
SDNodeFlags Flags = N->getFlags();
|
SDNodeFlags Flags = N->getFlags();
|
||||||
|
|
||||||
SDValue NeutralElem = DAG.getNeutralElement(
|
unsigned Opc = N->getOpcode();
|
||||||
ISD::getVecReduceBaseOpcode(N->getOpcode()), dl, ElemVT, Flags);
|
unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Opc);
|
||||||
|
SDValue NeutralElem = DAG.getNeutralElement(BaseOpc, dl, ElemVT, Flags);
|
||||||
assert(NeutralElem && "Neutral element must exist");
|
assert(NeutralElem && "Neutral element must exist");
|
||||||
|
|
||||||
// Pad the vector with the neutral element.
|
// Pad the vector with the neutral element.
|
||||||
@ -4768,7 +4811,32 @@ SDValue DAGTypeLegalizer::WidenVecOp_VECREDUCE(SDNode *N) {
|
|||||||
Op = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, WideVT, Op, NeutralElem,
|
Op = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, WideVT, Op, NeutralElem,
|
||||||
DAG.getVectorIdxConstant(Idx, dl));
|
DAG.getVectorIdxConstant(Idx, dl));
|
||||||
|
|
||||||
return DAG.getNode(N->getOpcode(), dl, N->getValueType(0), Op, Flags);
|
return DAG.getNode(Opc, dl, N->getValueType(0), Op, Flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
SDValue DAGTypeLegalizer::WidenVecOp_VECREDUCE_SEQ(SDNode *N) {
|
||||||
|
SDLoc dl(N);
|
||||||
|
SDValue AccOp = N->getOperand(0);
|
||||||
|
SDValue VecOp = N->getOperand(1);
|
||||||
|
SDValue Op = GetWidenedVector(VecOp);
|
||||||
|
|
||||||
|
EVT OrigVT = VecOp.getValueType();
|
||||||
|
EVT WideVT = Op.getValueType();
|
||||||
|
EVT ElemVT = OrigVT.getVectorElementType();
|
||||||
|
SDNodeFlags Flags = N->getFlags();
|
||||||
|
|
||||||
|
unsigned Opc = N->getOpcode();
|
||||||
|
unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Opc);
|
||||||
|
SDValue NeutralElem = DAG.getNeutralElement(BaseOpc, dl, ElemVT, Flags);
|
||||||
|
|
||||||
|
// Pad the vector with the neutral element.
|
||||||
|
unsigned OrigElts = OrigVT.getVectorNumElements();
|
||||||
|
unsigned WideElts = WideVT.getVectorNumElements();
|
||||||
|
for (unsigned Idx = OrigElts; Idx < WideElts; Idx++)
|
||||||
|
Op = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, WideVT, Op, NeutralElem,
|
||||||
|
DAG.getVectorIdxConstant(Idx, dl));
|
||||||
|
|
||||||
|
return DAG.getNode(Opc, dl, N->getValueType(0), AccOp, Op, Flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
SDValue DAGTypeLegalizer::WidenVecOp_VSELECT(SDNode *N) {
|
SDValue DAGTypeLegalizer::WidenVecOp_VSELECT(SDNode *N) {
|
||||||
|
@ -338,6 +338,7 @@ ISD::NodeType ISD::getVecReduceBaseOpcode(unsigned VecReduceOpcode) {
|
|||||||
default:
|
default:
|
||||||
llvm_unreachable("Expected VECREDUCE opcode");
|
llvm_unreachable("Expected VECREDUCE opcode");
|
||||||
case ISD::VECREDUCE_FADD:
|
case ISD::VECREDUCE_FADD:
|
||||||
|
case ISD::VECREDUCE_SEQ_FADD:
|
||||||
return ISD::FADD;
|
return ISD::FADD;
|
||||||
case ISD::VECREDUCE_FMUL:
|
case ISD::VECREDUCE_FMUL:
|
||||||
return ISD::FMUL;
|
return ISD::FMUL;
|
||||||
|
@ -8030,6 +8030,28 @@ SDValue TargetLowering::expandVecReduce(SDNode *Node, SelectionDAG &DAG) const {
|
|||||||
return Res;
|
return Res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SDValue TargetLowering::expandVecReduceSeq(SDNode *Node, SelectionDAG &DAG) const {
|
||||||
|
SDLoc dl(Node);
|
||||||
|
SDValue AccOp = Node->getOperand(0);
|
||||||
|
SDValue VecOp = Node->getOperand(1);
|
||||||
|
SDNodeFlags Flags = Node->getFlags();
|
||||||
|
|
||||||
|
EVT VT = VecOp.getValueType();
|
||||||
|
EVT EltVT = VT.getVectorElementType();
|
||||||
|
unsigned NumElts = VT.getVectorNumElements();
|
||||||
|
|
||||||
|
SmallVector<SDValue, 8> Ops;
|
||||||
|
DAG.ExtractVectorElements(VecOp, Ops, 0, NumElts);
|
||||||
|
|
||||||
|
unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Node->getOpcode());
|
||||||
|
|
||||||
|
SDValue Res = AccOp;
|
||||||
|
for (unsigned i = 0; i < NumElts; i++)
|
||||||
|
Res = DAG.getNode(BaseOpcode, dl, EltVT, Res, Ops[i], Flags);
|
||||||
|
|
||||||
|
return Res;
|
||||||
|
}
|
||||||
|
|
||||||
bool TargetLowering::expandREM(SDNode *Node, SDValue &Result,
|
bool TargetLowering::expandREM(SDNode *Node, SDValue &Result,
|
||||||
SelectionDAG &DAG) const {
|
SelectionDAG &DAG) const {
|
||||||
EVT VT = Node->getValueType(0);
|
EVT VT = Node->getValueType(0);
|
||||||
|
@ -733,6 +733,7 @@ void TargetLoweringBase::initActions() {
|
|||||||
setOperationAction(ISD::VECREDUCE_UMIN, VT, Expand);
|
setOperationAction(ISD::VECREDUCE_UMIN, VT, Expand);
|
||||||
setOperationAction(ISD::VECREDUCE_FMAX, VT, Expand);
|
setOperationAction(ISD::VECREDUCE_FMAX, VT, Expand);
|
||||||
setOperationAction(ISD::VECREDUCE_FMIN, VT, Expand);
|
setOperationAction(ISD::VECREDUCE_FMIN, VT, Expand);
|
||||||
|
setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Expand);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Most targets ignore the @llvm.prefetch intrinsic.
|
// Most targets ignore the @llvm.prefetch intrinsic.
|
||||||
|
@ -777,14 +777,6 @@ public:
|
|||||||
return !useSVEForFixedLengthVectors();
|
return !useSVEForFixedLengthVectors();
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: Move useSVEForFixedLengthVectors*() back to private scope once
|
|
||||||
// reduction legalization is complete.
|
|
||||||
bool useSVEForFixedLengthVectors() const;
|
|
||||||
// Normally SVE is only used for byte size vectors that do not fit within a
|
|
||||||
// NEON vector. This changes when OverrideNEON is true, allowing SVE to be
|
|
||||||
// used for 64bit and 128bit vectors as well.
|
|
||||||
bool useSVEForFixedLengthVectorVT(EVT VT, bool OverrideNEON = false) const;
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/// Keep a pointer to the AArch64Subtarget around so that we can
|
/// Keep a pointer to the AArch64Subtarget around so that we can
|
||||||
/// make the right decision when generating code for different targets.
|
/// make the right decision when generating code for different targets.
|
||||||
@ -1012,6 +1004,12 @@ private:
|
|||||||
|
|
||||||
bool shouldLocalize(const MachineInstr &MI,
|
bool shouldLocalize(const MachineInstr &MI,
|
||||||
const TargetTransformInfo *TTI) const override;
|
const TargetTransformInfo *TTI) const override;
|
||||||
|
|
||||||
|
bool useSVEForFixedLengthVectors() const;
|
||||||
|
// Normally SVE is only used for byte size vectors that do not fit within a
|
||||||
|
// NEON vector. This changes when OverrideNEON is true, allowing SVE to be
|
||||||
|
// used for 64bit and 128bit vectors as well.
|
||||||
|
bool useSVEForFixedLengthVectorVT(EVT VT, bool OverrideNEON = false) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
namespace AArch64 {
|
namespace AArch64 {
|
||||||
|
@ -223,17 +223,8 @@ public:
|
|||||||
|
|
||||||
bool shouldExpandReduction(const IntrinsicInst *II) const {
|
bool shouldExpandReduction(const IntrinsicInst *II) const {
|
||||||
switch (II->getIntrinsicID()) {
|
switch (II->getIntrinsicID()) {
|
||||||
case Intrinsic::vector_reduce_fadd: {
|
|
||||||
Value *VecOp = II->getArgOperand(1);
|
|
||||||
EVT VT = TLI->getValueType(getDataLayout(), VecOp->getType());
|
|
||||||
if (ST->hasSVE() &&
|
|
||||||
TLI->useSVEForFixedLengthVectorVT(VT, /*OverrideNEON=*/true))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
return !II->getFastMathFlags().allowReassoc();
|
|
||||||
}
|
|
||||||
case Intrinsic::vector_reduce_fmul:
|
case Intrinsic::vector_reduce_fmul:
|
||||||
// We don't have legalization support for ordered FP reductions.
|
// We don't have legalization support for ordered FMUL reductions.
|
||||||
return !II->getFastMathFlags().allowReassoc();
|
return !II->getFastMathFlags().allowReassoc();
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
@ -195,9 +195,8 @@ public:
|
|||||||
|
|
||||||
bool shouldExpandReduction(const IntrinsicInst *II) const {
|
bool shouldExpandReduction(const IntrinsicInst *II) const {
|
||||||
switch (II->getIntrinsicID()) {
|
switch (II->getIntrinsicID()) {
|
||||||
case Intrinsic::vector_reduce_fadd:
|
|
||||||
case Intrinsic::vector_reduce_fmul:
|
case Intrinsic::vector_reduce_fmul:
|
||||||
// We don't have legalization support for ordered FP reductions.
|
// We don't have legalization support for ordered FMUL reductions.
|
||||||
return !II->getFastMathFlags().allowReassoc();
|
return !II->getFastMathFlags().allowReassoc();
|
||||||
default:
|
default:
|
||||||
// Don't expand anything else, let legalization deal with it.
|
// Don't expand anything else, let legalization deal with it.
|
||||||
|
@ -63,8 +63,13 @@ define half @fadda_v32f16(half %start, <32 x half>* %a) #0 {
|
|||||||
; VBITS_GE_512-NEXT: ret
|
; VBITS_GE_512-NEXT: ret
|
||||||
|
|
||||||
; Ensure sensible type legalisation.
|
; Ensure sensible type legalisation.
|
||||||
; VBITS_EQ_256-COUNT-32: fadd
|
; VBITS_EQ_256: add x8, x0, #32
|
||||||
; VBITS_EQ_256: ret
|
; VBITS_EQ_256-NEXT: ptrue [[PG:p[0-9]+]].h, vl16
|
||||||
|
; VBITS_EQ_256-DAG: ld1h { [[LO:z[0-9]+]].h }, [[PG]]/z, [x0]
|
||||||
|
; VBITS_EQ_256-DAG: ld1h { [[HI:z[0-9]+]].h }, [[PG]]/z, [x8]
|
||||||
|
; VBITS_EQ_256-NEXT: fadda h0, [[PG]], h0, [[LO]].h
|
||||||
|
; VBITS_EQ_256-NEXT: fadda h0, [[PG]], h0, [[HI]].h
|
||||||
|
; VBITS_EQ_256-NEXT: ret
|
||||||
%op = load <32 x half>, <32 x half>* %a
|
%op = load <32 x half>, <32 x half>* %a
|
||||||
%res = call half @llvm.vector.reduce.fadd.v32f16(half %start, <32 x half> %op)
|
%res = call half @llvm.vector.reduce.fadd.v32f16(half %start, <32 x half> %op)
|
||||||
ret half %res
|
ret half %res
|
||||||
@ -131,8 +136,13 @@ define float @fadda_v16f32(float %start, <16 x float>* %a) #0 {
|
|||||||
; VBITS_GE_512-NEXT: ret
|
; VBITS_GE_512-NEXT: ret
|
||||||
|
|
||||||
; Ensure sensible type legalisation.
|
; Ensure sensible type legalisation.
|
||||||
; VBITS_EQ_256-COUNT-16: fadd
|
; VBITS_EQ_256: add x8, x0, #32
|
||||||
; VBITS_EQ_256: ret
|
; VBITS_EQ_256-NEXT: ptrue [[PG:p[0-9]+]].s, vl8
|
||||||
|
; VBITS_EQ_256-DAG: ld1w { [[LO:z[0-9]+]].s }, [[PG]]/z, [x0]
|
||||||
|
; VBITS_EQ_256-DAG: ld1w { [[HI:z[0-9]+]].s }, [[PG]]/z, [x8]
|
||||||
|
; VBITS_EQ_256-NEXT: fadda s0, [[PG]], s0, [[LO]].s
|
||||||
|
; VBITS_EQ_256-NEXT: fadda s0, [[PG]], s0, [[HI]].s
|
||||||
|
; VBITS_EQ_256-NEXT: ret
|
||||||
%op = load <16 x float>, <16 x float>* %a
|
%op = load <16 x float>, <16 x float>* %a
|
||||||
%res = call float @llvm.vector.reduce.fadd.v16f32(float %start, <16 x float> %op)
|
%res = call float @llvm.vector.reduce.fadd.v16f32(float %start, <16 x float> %op)
|
||||||
ret float %res
|
ret float %res
|
||||||
@ -199,8 +209,13 @@ define double @fadda_v8f64(double %start, <8 x double>* %a) #0 {
|
|||||||
; VBITS_GE_512-NEXT: ret
|
; VBITS_GE_512-NEXT: ret
|
||||||
|
|
||||||
; Ensure sensible type legalisation.
|
; Ensure sensible type legalisation.
|
||||||
; VBITS_EQ_256-COUNT-8: fadd
|
; VBITS_EQ_256: add x8, x0, #32
|
||||||
; VBITS_EQ_256: ret
|
; VBITS_EQ_256-NEXT: ptrue [[PG:p[0-9]+]].d, vl4
|
||||||
|
; VBITS_EQ_256-DAG: ld1d { [[LO:z[0-9]+]].d }, [[PG]]/z, [x0]
|
||||||
|
; VBITS_EQ_256-DAG: ld1d { [[HI:z[0-9]+]].d }, [[PG]]/z, [x8]
|
||||||
|
; VBITS_EQ_256-NEXT: fadda d0, [[PG]], d0, [[LO]].d
|
||||||
|
; VBITS_EQ_256-NEXT: fadda d0, [[PG]], d0, [[HI]].d
|
||||||
|
; VBITS_EQ_256-NEXT: ret
|
||||||
%op = load <8 x double>, <8 x double>* %a
|
%op = load <8 x double>, <8 x double>* %a
|
||||||
%res = call double @llvm.vector.reduce.fadd.v8f64(double %start, <8 x double> %op)
|
%res = call double @llvm.vector.reduce.fadd.v8f64(double %start, <8 x double> %op)
|
||||||
ret double %res
|
ret double %res
|
||||||
|
@ -108,9 +108,9 @@ define float @test_v3f32(<3 x float> %a, float %s) nounwind {
|
|||||||
define float @test_v3f32_neutral(<3 x float> %a) nounwind {
|
define float @test_v3f32_neutral(<3 x float> %a) nounwind {
|
||||||
; CHECK-LABEL: test_v3f32_neutral:
|
; CHECK-LABEL: test_v3f32_neutral:
|
||||||
; CHECK: // %bb.0:
|
; CHECK: // %bb.0:
|
||||||
; CHECK-NEXT: faddp s1, v0.2s
|
; CHECK-NEXT: mov s1, v0.s[2]
|
||||||
; CHECK-NEXT: mov s0, v0.s[2]
|
; CHECK-NEXT: faddp s0, v0.2s
|
||||||
; CHECK-NEXT: fadd s0, s1, s0
|
; CHECK-NEXT: fadd s0, s0, s1
|
||||||
; CHECK-NEXT: ret
|
; CHECK-NEXT: ret
|
||||||
%b = call float @llvm.vector.reduce.fadd.f32.v3f32(float -0.0, <3 x float> %a)
|
%b = call float @llvm.vector.reduce.fadd.f32.v3f32(float -0.0, <3 x float> %a)
|
||||||
ret float %b
|
ret float %b
|
||||||
@ -173,34 +173,34 @@ define fp128 @test_v2f128_neutral(<2 x fp128> %a) nounwind {
|
|||||||
define float @test_v16f32(<16 x float> %a, float %s) nounwind {
|
define float @test_v16f32(<16 x float> %a, float %s) nounwind {
|
||||||
; CHECK-LABEL: test_v16f32:
|
; CHECK-LABEL: test_v16f32:
|
||||||
; CHECK: // %bb.0:
|
; CHECK: // %bb.0:
|
||||||
; CHECK-NEXT: fadd s4, s4, s0
|
; CHECK-NEXT: mov s22, v0.s[3]
|
||||||
; CHECK-NEXT: mov s5, v0.s[1]
|
; CHECK-NEXT: mov s23, v0.s[2]
|
||||||
; CHECK-NEXT: fadd s4, s4, s5
|
; CHECK-NEXT: mov s24, v0.s[1]
|
||||||
; CHECK-NEXT: mov s5, v0.s[2]
|
|
||||||
; CHECK-NEXT: mov s0, v0.s[3]
|
|
||||||
; CHECK-NEXT: fadd s4, s4, s5
|
|
||||||
; CHECK-NEXT: fadd s0, s4, s0
|
; CHECK-NEXT: fadd s0, s4, s0
|
||||||
; CHECK-NEXT: mov s5, v1.s[1]
|
; CHECK-NEXT: fadd s0, s0, s24
|
||||||
|
; CHECK-NEXT: fadd s0, s0, s23
|
||||||
|
; CHECK-NEXT: fadd s0, s0, s22
|
||||||
|
; CHECK-NEXT: mov s21, v1.s[1]
|
||||||
; CHECK-NEXT: fadd s0, s0, s1
|
; CHECK-NEXT: fadd s0, s0, s1
|
||||||
; CHECK-NEXT: mov s4, v1.s[2]
|
; CHECK-NEXT: mov s20, v1.s[2]
|
||||||
; CHECK-NEXT: fadd s0, s0, s5
|
; CHECK-NEXT: fadd s0, s0, s21
|
||||||
; CHECK-NEXT: mov s1, v1.s[3]
|
; CHECK-NEXT: mov s19, v1.s[3]
|
||||||
; CHECK-NEXT: fadd s0, s0, s4
|
; CHECK-NEXT: fadd s0, s0, s20
|
||||||
; CHECK-NEXT: fadd s0, s0, s1
|
; CHECK-NEXT: fadd s0, s0, s19
|
||||||
; CHECK-NEXT: mov s5, v2.s[1]
|
; CHECK-NEXT: mov s18, v2.s[1]
|
||||||
; CHECK-NEXT: fadd s0, s0, s2
|
; CHECK-NEXT: fadd s0, s0, s2
|
||||||
; CHECK-NEXT: mov s4, v2.s[2]
|
; CHECK-NEXT: mov s17, v2.s[2]
|
||||||
; CHECK-NEXT: fadd s0, s0, s5
|
; CHECK-NEXT: fadd s0, s0, s18
|
||||||
; CHECK-NEXT: mov s1, v2.s[3]
|
; CHECK-NEXT: mov s16, v2.s[3]
|
||||||
; CHECK-NEXT: fadd s0, s0, s4
|
; CHECK-NEXT: fadd s0, s0, s17
|
||||||
; CHECK-NEXT: fadd s0, s0, s1
|
; CHECK-NEXT: fadd s0, s0, s16
|
||||||
; CHECK-NEXT: mov s2, v3.s[1]
|
; CHECK-NEXT: mov s7, v3.s[1]
|
||||||
; CHECK-NEXT: fadd s0, s0, s3
|
; CHECK-NEXT: fadd s0, s0, s3
|
||||||
; CHECK-NEXT: mov s5, v3.s[2]
|
; CHECK-NEXT: mov s6, v3.s[2]
|
||||||
; CHECK-NEXT: fadd s0, s0, s2
|
; CHECK-NEXT: fadd s0, s0, s7
|
||||||
|
; CHECK-NEXT: mov s5, v3.s[3]
|
||||||
|
; CHECK-NEXT: fadd s0, s0, s6
|
||||||
; CHECK-NEXT: fadd s0, s0, s5
|
; CHECK-NEXT: fadd s0, s0, s5
|
||||||
; CHECK-NEXT: mov s1, v3.s[3]
|
|
||||||
; CHECK-NEXT: fadd s0, s0, s1
|
|
||||||
; CHECK-NEXT: ret
|
; CHECK-NEXT: ret
|
||||||
%b = call float @llvm.vector.reduce.fadd.f32.v16f32(float %s, <16 x float> %a)
|
%b = call float @llvm.vector.reduce.fadd.f32.v16f32(float %s, <16 x float> %a)
|
||||||
ret float %b
|
ret float %b
|
||||||
@ -209,32 +209,32 @@ define float @test_v16f32(<16 x float> %a, float %s) nounwind {
|
|||||||
define float @test_v16f32_neutral(<16 x float> %a) nounwind {
|
define float @test_v16f32_neutral(<16 x float> %a) nounwind {
|
||||||
; CHECK-LABEL: test_v16f32_neutral:
|
; CHECK-LABEL: test_v16f32_neutral:
|
||||||
; CHECK: // %bb.0:
|
; CHECK: // %bb.0:
|
||||||
; CHECK-NEXT: faddp s4, v0.2s
|
; CHECK-NEXT: mov s21, v0.s[3]
|
||||||
; CHECK-NEXT: mov s5, v0.s[2]
|
; CHECK-NEXT: mov s22, v0.s[2]
|
||||||
; CHECK-NEXT: mov s0, v0.s[3]
|
; CHECK-NEXT: faddp s0, v0.2s
|
||||||
; CHECK-NEXT: fadd s4, s4, s5
|
; CHECK-NEXT: fadd s0, s0, s22
|
||||||
; CHECK-NEXT: fadd s0, s4, s0
|
; CHECK-NEXT: fadd s0, s0, s21
|
||||||
; CHECK-NEXT: mov s5, v1.s[1]
|
; CHECK-NEXT: mov s20, v1.s[1]
|
||||||
; CHECK-NEXT: fadd s0, s0, s1
|
; CHECK-NEXT: fadd s0, s0, s1
|
||||||
; CHECK-NEXT: mov s4, v1.s[2]
|
; CHECK-NEXT: mov s19, v1.s[2]
|
||||||
; CHECK-NEXT: fadd s0, s0, s5
|
; CHECK-NEXT: fadd s0, s0, s20
|
||||||
; CHECK-NEXT: mov s1, v1.s[3]
|
; CHECK-NEXT: mov s18, v1.s[3]
|
||||||
; CHECK-NEXT: fadd s0, s0, s4
|
; CHECK-NEXT: fadd s0, s0, s19
|
||||||
; CHECK-NEXT: fadd s0, s0, s1
|
; CHECK-NEXT: fadd s0, s0, s18
|
||||||
; CHECK-NEXT: mov s5, v2.s[1]
|
; CHECK-NEXT: mov s17, v2.s[1]
|
||||||
; CHECK-NEXT: fadd s0, s0, s2
|
; CHECK-NEXT: fadd s0, s0, s2
|
||||||
; CHECK-NEXT: mov s4, v2.s[2]
|
; CHECK-NEXT: mov s16, v2.s[2]
|
||||||
; CHECK-NEXT: fadd s0, s0, s5
|
; CHECK-NEXT: fadd s0, s0, s17
|
||||||
; CHECK-NEXT: mov s1, v2.s[3]
|
; CHECK-NEXT: mov s7, v2.s[3]
|
||||||
; CHECK-NEXT: fadd s0, s0, s4
|
; CHECK-NEXT: fadd s0, s0, s16
|
||||||
; CHECK-NEXT: fadd s0, s0, s1
|
; CHECK-NEXT: fadd s0, s0, s7
|
||||||
; CHECK-NEXT: mov s2, v3.s[1]
|
; CHECK-NEXT: mov s6, v3.s[1]
|
||||||
; CHECK-NEXT: fadd s0, s0, s3
|
; CHECK-NEXT: fadd s0, s0, s3
|
||||||
; CHECK-NEXT: mov s5, v3.s[2]
|
; CHECK-NEXT: mov s5, v3.s[2]
|
||||||
; CHECK-NEXT: fadd s0, s0, s2
|
; CHECK-NEXT: fadd s0, s0, s6
|
||||||
|
; CHECK-NEXT: mov s4, v3.s[3]
|
||||||
; CHECK-NEXT: fadd s0, s0, s5
|
; CHECK-NEXT: fadd s0, s0, s5
|
||||||
; CHECK-NEXT: mov s1, v3.s[3]
|
; CHECK-NEXT: fadd s0, s0, s4
|
||||||
; CHECK-NEXT: fadd s0, s0, s1
|
|
||||||
; CHECK-NEXT: ret
|
; CHECK-NEXT: ret
|
||||||
%b = call float @llvm.vector.reduce.fadd.f32.v16f32(float -0.0, <16 x float> %a)
|
%b = call float @llvm.vector.reduce.fadd.f32.v16f32(float -0.0, <16 x float> %a)
|
||||||
ret float %b
|
ret float %b
|
||||||
|
Loading…
Reference in New Issue
Block a user