1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-25 04:02:41 +01:00

Allow targets to specify a the type of the RHS of a shift parameterized on the type of the LHS.

llvm-svn: 126518
This commit is contained in:
Owen Anderson 2011-02-25 21:41:48 +00:00
parent ce1bb42907
commit bd26993873
23 changed files with 295 additions and 278 deletions

View File

@ -111,7 +111,7 @@ public:
bool isBigEndian() const { return !IsLittleEndian; }
bool isLittleEndian() const { return IsLittleEndian; }
MVT getPointerTy() const { return PointerTy; }
MVT getShiftAmountTy() const { return ShiftAmountTy; }
virtual MVT getShiftAmountTy(EVT LHSTy) const;
/// isSelectExpensive - Return true if the select operation is expensive for
/// this target.
@ -210,7 +210,7 @@ public:
/// ValueTypeActions - For each value type, keep a LegalizeAction enum
/// that indicates how instruction selection should deal with the type.
uint8_t ValueTypeActions[MVT::LAST_VALUETYPE];
LegalizeAction getExtendedTypeAction(EVT VT) const {
// Handle non-vector integers.
if (!VT.isVector()) {
@ -260,17 +260,17 @@ public:
ValueTypeActionImpl() {
std::fill(ValueTypeActions, array_endof(ValueTypeActions), 0);
}
LegalizeAction getTypeAction(EVT VT) const {
if (!VT.isExtended())
return getTypeAction(VT.getSimpleVT());
return getExtendedTypeAction(VT);
}
LegalizeAction getTypeAction(MVT VT) const {
return (LegalizeAction)ValueTypeActions[VT.SimpleTy];
}
void setTypeAction(EVT VT, LegalizeAction Action) {
unsigned I = VT.getSimpleVT().SimpleTy;
ValueTypeActions[I] = Action;
@ -291,7 +291,7 @@ public:
LegalizeAction getTypeAction(MVT VT) const {
return ValueTypeActions.getTypeAction(VT);
}
/// getTypeToTransformTo - For types supported by the target, this is an
/// identity function. For types that must be promoted to larger types, this
/// returns the larger type to promote to. For integer types that are larger
@ -324,7 +324,7 @@ public:
EVT NVT = VT.getRoundIntegerType(Context);
if (NVT == VT) // Size is a power of two - expand to half the size.
return EVT::getIntegerVT(Context, VT.getSizeInBits() / 2);
// Promote to a power of two size, avoiding multi-step promotion.
return getTypeAction(NVT) == Promote ?
getTypeToTransformTo(Context, NVT) : NVT;
@ -997,10 +997,6 @@ public:
//
protected:
/// setShiftAmountType - Describe the type that should be used for shift
/// amounts. This type defaults to the pointer type.
void setShiftAmountType(MVT VT) { ShiftAmountTy = VT; }
/// setBooleanContents - Specify how the target extends the result of a
/// boolean value from i1 to a wider type. See getBooleanContents.
void setBooleanContents(BooleanContent Ty) { BooleanContents = Ty; }
@ -1047,12 +1043,12 @@ protected:
/// SelectIsExpensive - Tells the code generator not to expand operations
/// into sequences that use the select operations if possible.
void setSelectIsExpensive(bool isExpensive = true) {
SelectIsExpensive = isExpensive;
void setSelectIsExpensive(bool isExpensive = true) {
SelectIsExpensive = isExpensive;
}
/// JumpIsExpensive - Tells the code generator not to expand sequence of
/// operations into a seperate sequences that increases the amount of
/// JumpIsExpensive - Tells the code generator not to expand sequence of
/// operations into a seperate sequences that increases the amount of
/// flow control.
void setJumpIsExpensive(bool isExpensive = true) {
JumpIsExpensive = isExpensive;
@ -1369,7 +1365,7 @@ public:
CW_Good = 1, // Good weight.
CW_Better = 2, // Better weight.
CW_Best = 3, // Best weight.
// Well-known weights.
CW_SpecificReg = CW_Okay, // Specific register operands.
CW_Register = CW_Good, // Register operands.
@ -1422,21 +1418,21 @@ public:
CallOperandVal(0), ConstraintVT(MVT::Other) {
}
};
typedef std::vector<AsmOperandInfo> AsmOperandInfoVector;
/// ParseConstraints - Split up the constraint string from the inline
/// assembly value into the specific constraints and their prefixes,
/// and also tie in the associated operand values.
/// If this returns an empty vector, and if the constraint string itself
/// isn't empty, there was an error parsing.
virtual AsmOperandInfoVector ParseConstraints(ImmutableCallSite CS) const;
/// Examine constraint type and operand type and determine a weight value.
/// The operand object must already have been set up with the operand type.
virtual ConstraintWeight getMultipleConstraintMatchWeight(
AsmOperandInfo &info, int maIndex) const;
/// Examine constraint string and operand type and determine a weight value.
/// The operand object must already have been set up with the operand type.
virtual ConstraintWeight getSingleConstraintMatchWeight(
@ -1446,7 +1442,7 @@ public:
/// type to use for the specific AsmOperandInfo, setting
/// OpInfo.ConstraintCode and OpInfo.ConstraintType. If the actual operand
/// being passed in is available, it can be passed in as Op, otherwise an
/// empty SDValue can be passed.
/// empty SDValue can be passed.
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
SDValue Op,
SelectionDAG *DAG = 0) const;
@ -1660,10 +1656,6 @@ private:
/// llvm.longjmp. Defaults to false.
bool UseUnderscoreLongJmp;
/// ShiftAmountTy - The type to use for shift amounts, usually i8 or whatever
/// PointerTy is.
MVT ShiftAmountTy;
/// BooleanContents - Information about the contents of the high-bits in
/// boolean values held in a type wider than i1. See getBooleanContents.
BooleanContent BooleanContents;

View File

@ -279,8 +279,8 @@ namespace {
/// getShiftAmountTy - Returns a type large enough to hold any valid
/// shift amount - before type legalization these can be huge.
EVT getShiftAmountTy() {
return LegalTypes ? TLI.getShiftAmountTy() : TLI.getPointerTy();
EVT getShiftAmountTy(EVT LHSTy) {
return LegalTypes ? TLI.getShiftAmountTy(LHSTy) : TLI.getPointerTy();
}
/// isTypeLegal - This method returns true if we are running before type
@ -670,7 +670,7 @@ SDValue DAGCombiner::PromoteOperand(SDValue Op, EVT PVT, bool &Replace) {
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
EVT MemVT = LD->getMemoryVT();
ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(LD)
? (TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT) ? ISD::ZEXTLOAD
? (TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT) ? ISD::ZEXTLOAD
: ISD::EXTLOAD)
: LD->getExtensionType();
Replace = true;
@ -894,7 +894,7 @@ bool DAGCombiner::PromoteLoad(SDValue Op) {
LoadSDNode *LD = cast<LoadSDNode>(N);
EVT MemVT = LD->getMemoryVT();
ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(LD)
? (TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT) ? ISD::ZEXTLOAD
? (TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT) ? ISD::ZEXTLOAD
: ISD::EXTLOAD)
: LD->getExtensionType();
SDValue NewLD = DAG.getExtLoad(ExtType, dl, PVT,
@ -1521,7 +1521,7 @@ SDValue DAGCombiner::visitADDE(SDNode *N) {
// Since it may not be valid to emit a fold to zero for vector initializers
// check if we can before folding.
static SDValue tryFoldToZero(DebugLoc DL, const TargetLowering &TLI, EVT VT,
SelectionDAG &DAG, bool LegalOperations) {
SelectionDAG &DAG, bool LegalOperations) {
if (!VT.isVector()) {
return DAG.getConstant(0, VT);
} else if (!LegalOperations || TLI.isOperationLegal(ISD::BUILD_VECTOR, VT)) {
@ -1647,7 +1647,7 @@ SDValue DAGCombiner::visitMUL(SDNode *N) {
if (N1C && N1C->getAPIntValue().isPowerOf2())
return DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, N0,
DAG.getConstant(N1C->getAPIntValue().logBase2(),
getShiftAmountTy()));
getShiftAmountTy(N0.getValueType())));
// fold (mul x, -(1 << c)) -> -(x << c) or (-x) << c
if (N1C && (-N1C->getAPIntValue()).isPowerOf2()) {
unsigned Log2Val = (-N1C->getAPIntValue()).logBase2();
@ -1656,7 +1656,8 @@ SDValue DAGCombiner::visitMUL(SDNode *N) {
return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT,
DAG.getConstant(0, VT),
DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, N0,
DAG.getConstant(Log2Val, getShiftAmountTy())));
DAG.getConstant(Log2Val,
getShiftAmountTy(N0.getValueType()))));
}
// (mul (shl X, c1), c2) -> (mul X, c2 << c1)
if (N1C && N0.getOpcode() == ISD::SHL &&
@ -1753,18 +1754,18 @@ SDValue DAGCombiner::visitSDIV(SDNode *N) {
// Splat the sign bit into the register
SDValue SGN = DAG.getNode(ISD::SRA, N->getDebugLoc(), VT, N0,
DAG.getConstant(VT.getSizeInBits()-1,
getShiftAmountTy()));
getShiftAmountTy(N0.getValueType())));
AddToWorkList(SGN.getNode());
// Add (N0 < 0) ? abs2 - 1 : 0;
SDValue SRL = DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, SGN,
DAG.getConstant(VT.getSizeInBits() - lg2,
getShiftAmountTy()));
getShiftAmountTy(SGN.getValueType())));
SDValue ADD = DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N0, SRL);
AddToWorkList(SRL.getNode());
AddToWorkList(ADD.getNode()); // Divide by pow2
SDValue SRA = DAG.getNode(ISD::SRA, N->getDebugLoc(), VT, ADD,
DAG.getConstant(lg2, getShiftAmountTy()));
DAG.getConstant(lg2, getShiftAmountTy(ADD.getValueType())));
// If we're dividing by a positive value, we're done. Otherwise, we must
// negate the result.
@ -1814,7 +1815,7 @@ SDValue DAGCombiner::visitUDIV(SDNode *N) {
if (N1C && N1C->getAPIntValue().isPowerOf2())
return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0,
DAG.getConstant(N1C->getAPIntValue().logBase2(),
getShiftAmountTy()));
getShiftAmountTy(N0.getValueType())));
// fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2
if (N1.getOpcode() == ISD::SHL) {
if (ConstantSDNode *SHC = dyn_cast<ConstantSDNode>(N1.getOperand(0))) {
@ -1955,7 +1956,7 @@ SDValue DAGCombiner::visitMULHS(SDNode *N) {
if (N1C && N1C->getAPIntValue() == 1)
return DAG.getNode(ISD::SRA, N->getDebugLoc(), N0.getValueType(), N0,
DAG.getConstant(N0.getValueType().getSizeInBits() - 1,
getShiftAmountTy()));
getShiftAmountTy(N0.getValueType())));
// fold (mulhs x, undef) -> 0
if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)
return DAG.getConstant(0, VT);
@ -1971,11 +1972,11 @@ SDValue DAGCombiner::visitMULHS(SDNode *N) {
N1 = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N1);
N1 = DAG.getNode(ISD::MUL, DL, NewVT, N0, N1);
N1 = DAG.getNode(ISD::SRL, DL, NewVT, N1,
DAG.getConstant(SimpleSize, getShiftAmountTy()));
DAG.getConstant(SimpleSize, getShiftAmountTy(N1.getValueType())));
return DAG.getNode(ISD::TRUNCATE, DL, VT, N1);
}
}
return SDValue();
}
@ -2007,11 +2008,11 @@ SDValue DAGCombiner::visitMULHU(SDNode *N) {
N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N1);
N1 = DAG.getNode(ISD::MUL, DL, NewVT, N0, N1);
N1 = DAG.getNode(ISD::SRL, DL, NewVT, N1,
DAG.getConstant(SimpleSize, getShiftAmountTy()));
DAG.getConstant(SimpleSize, getShiftAmountTy(N1.getValueType())));
return DAG.getNode(ISD::TRUNCATE, DL, VT, N1);
}
}
return SDValue();
}
@ -2090,14 +2091,14 @@ SDValue DAGCombiner::visitSMUL_LOHI(SDNode *N) {
Lo = DAG.getNode(ISD::MUL, DL, NewVT, Lo, Hi);
// Compute the high part as N1.
Hi = DAG.getNode(ISD::SRL, DL, NewVT, Lo,
DAG.getConstant(SimpleSize, getShiftAmountTy()));
DAG.getConstant(SimpleSize, getShiftAmountTy(Lo.getValueType())));
Hi = DAG.getNode(ISD::TRUNCATE, DL, VT, Hi);
// Compute the low part as N0.
Lo = DAG.getNode(ISD::TRUNCATE, DL, VT, Lo);
return CombineTo(N, Lo, Hi);
}
}
return SDValue();
}
@ -2107,7 +2108,7 @@ SDValue DAGCombiner::visitUMUL_LOHI(SDNode *N) {
EVT VT = N->getValueType(0);
DebugLoc DL = N->getDebugLoc();
// If the type twice as wide is legal, transform the mulhu to a wider multiply
// plus a shift.
if (VT.isSimple() && !VT.isVector()) {
@ -2120,14 +2121,14 @@ SDValue DAGCombiner::visitUMUL_LOHI(SDNode *N) {
Lo = DAG.getNode(ISD::MUL, DL, NewVT, Lo, Hi);
// Compute the high part as N1.
Hi = DAG.getNode(ISD::SRL, DL, NewVT, Lo,
DAG.getConstant(SimpleSize, getShiftAmountTy()));
DAG.getConstant(SimpleSize, getShiftAmountTy(Lo.getValueType())));
Hi = DAG.getNode(ISD::TRUNCATE, DL, VT, Hi);
// Compute the low part as N0.
Lo = DAG.getNode(ISD::TRUNCATE, DL, VT, Lo);
return CombineTo(N, Lo, Hi);
}
}
return SDValue();
}
@ -3004,7 +3005,7 @@ SDValue DAGCombiner::visitSHL(SDNode *N) {
N0.getOpcode() == ISD::SIGN_EXTEND) &&
N0.getOperand(0).getOpcode() == ISD::SHL &&
isa<ConstantSDNode>(N0.getOperand(0)->getOperand(1))) {
uint64_t c1 =
uint64_t c1 =
cast<ConstantSDNode>(N0.getOperand(0)->getOperand(1))->getZExtValue();
uint64_t c2 = N1C->getZExtValue();
EVT InnerShiftVT = N0.getOperand(0).getValueType();
@ -3133,7 +3134,8 @@ SDValue DAGCombiner::visitSRA(SDNode *N) {
TLI.isOperationLegalOrCustom(ISD::TRUNCATE, VT) &&
TLI.isTruncateFree(VT, TruncVT)) {
SDValue Amt = DAG.getConstant(ShiftAmt, getShiftAmountTy());
SDValue Amt = DAG.getConstant(ShiftAmt,
getShiftAmountTy(N0.getOperand(0).getValueType()));
SDValue Shift = DAG.getNode(ISD::SRL, N0.getDebugLoc(), VT,
N0.getOperand(0), Amt);
SDValue Trunc = DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(), TruncVT,
@ -3180,7 +3182,7 @@ SDValue DAGCombiner::visitSRA(SDNode *N) {
LargeShiftAmt->getZExtValue()) {
SDValue Amt =
DAG.getConstant(LargeShiftAmt->getZExtValue() + N1C->getZExtValue(),
getShiftAmountTy());
getShiftAmountTy(N0.getOperand(0).getOperand(0).getValueType()));
SDValue SRA = DAG.getNode(ISD::SRA, N->getDebugLoc(), LargeVT,
N0.getOperand(0).getOperand(0), Amt);
return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, SRA);
@ -3245,7 +3247,7 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
if (N1C && N0.getOpcode() == ISD::TRUNCATE &&
N0.getOperand(0).getOpcode() == ISD::SRL &&
isa<ConstantSDNode>(N0.getOperand(0)->getOperand(1))) {
uint64_t c1 =
uint64_t c1 =
cast<ConstantSDNode>(N0.getOperand(0)->getOperand(1))->getZExtValue();
uint64_t c2 = N1C->getZExtValue();
EVT InnerShiftVT = N0.getOperand(0).getValueType();
@ -3256,7 +3258,7 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
if (c1 + c2 >= InnerShiftSize)
return DAG.getConstant(0, VT);
return DAG.getNode(ISD::TRUNCATE, N0->getDebugLoc(), VT,
DAG.getNode(ISD::SRL, N0->getDebugLoc(), InnerShiftVT,
DAG.getNode(ISD::SRL, N0->getDebugLoc(), InnerShiftVT,
N0.getOperand(0)->getOperand(0),
DAG.getConstant(c1 + c2, ShiftCountVT)));
}
@ -3320,7 +3322,7 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
if (ShAmt) {
Op = DAG.getNode(ISD::SRL, N0.getDebugLoc(), VT, Op,
DAG.getConstant(ShAmt, getShiftAmountTy()));
DAG.getConstant(ShAmt, getShiftAmountTy(Op.getValueType())));
AddToWorkList(Op.getNode());
}
@ -4025,11 +4027,11 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
}
DebugLoc DL = N->getDebugLoc();
// Ensure that the shift amount is wide enough for the shifted value.
// Ensure that the shift amount is wide enough for the shifted value.
if (VT.getSizeInBits() >= 256)
ShAmt = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, ShAmt);
return DAG.getNode(N0.getOpcode(), DL, VT,
DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0)),
ShAmt);
@ -4278,12 +4280,12 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
return SDValue();
unsigned EVTBits = ExtVT.getSizeInBits();
// Do not generate loads of non-round integer types since these can
// be expensive (and would be wrong if the type is not byte sized).
if (!ExtVT.isRound())
return SDValue();
unsigned ShAmt = 0;
if (N0.getOpcode() == ISD::SRL && N0.hasOneUse()) {
if (ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
@ -4298,7 +4300,7 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
// At this point, we must have a load or else we can't do the transform.
if (!isa<LoadSDNode>(N0)) return SDValue();
// If the shift amount is larger than the input type then we're not
// accessing any of the loaded bytes. If the load was a zextload/extload
// then the result of the shift+trunc is zero/undef (handled elsewhere).
@ -4319,18 +4321,18 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
N0 = N0.getOperand(0);
}
}
// If we haven't found a load, we can't narrow it. Don't transform one with
// multiple uses, this would require adding a new load.
if (!isa<LoadSDNode>(N0) || !N0.hasOneUse() ||
// Don't change the width of a volatile load.
cast<LoadSDNode>(N0)->isVolatile())
return SDValue();
// Verify that we are actually reducing a load width here.
if (cast<LoadSDNode>(N0)->getMemoryVT().getSizeInBits() < EVTBits)
return SDValue();
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
EVT PtrType = N0.getOperand(1).getValueType();
@ -4368,7 +4370,7 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
// Shift the result left, if we've swallowed a left shift.
SDValue Result = Load;
if (ShLeftAmt != 0) {
EVT ShImmTy = getShiftAmountTy();
EVT ShImmTy = getShiftAmountTy(Result.getValueType());
if (!isUIntN(ShImmTy.getSizeInBits(), ShLeftAmt))
ShImmTy = VT;
Result = DAG.getNode(ISD::SHL, N0.getDebugLoc(), VT,
@ -5984,7 +5986,8 @@ ShrinkLoadReplaceStoreWithStore(const std::pair<unsigned, unsigned> &MaskInfo,
// shifted by ByteShift and truncated down to NumBytes.
if (ByteShift)
IVal = DAG.getNode(ISD::SRL, IVal->getDebugLoc(), IVal.getValueType(), IVal,
DAG.getConstant(ByteShift*8, DC->getShiftAmountTy()));
DAG.getConstant(ByteShift*8,
DC->getShiftAmountTy(IVal.getValueType())));
// Figure out the offset for the store and the alignment of the access.
unsigned StOffset;
@ -6399,7 +6402,7 @@ SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
EVT VT = InVec.getValueType();
// If we can't generate a legal BUILD_VECTOR, exit
// If we can't generate a legal BUILD_VECTOR, exit
if (LegalOperations && !TLI.isOperationLegal(ISD::BUILD_VECTOR, VT))
return SDValue();
@ -7107,7 +7110,8 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
if (N2C && ((N2C->getAPIntValue() & (N2C->getAPIntValue()-1)) == 0)) {
unsigned ShCtV = N2C->getAPIntValue().logBase2();
ShCtV = XType.getSizeInBits()-ShCtV-1;
SDValue ShCt = DAG.getConstant(ShCtV, getShiftAmountTy());
SDValue ShCt = DAG.getConstant(ShCtV,
getShiftAmountTy(N0.getValueType()));
SDValue Shift = DAG.getNode(ISD::SRL, N0.getDebugLoc(),
XType, N0, ShCt);
AddToWorkList(Shift.getNode());
@ -7123,7 +7127,7 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
SDValue Shift = DAG.getNode(ISD::SRA, N0.getDebugLoc(),
XType, N0,
DAG.getConstant(XType.getSizeInBits()-1,
getShiftAmountTy()));
getShiftAmountTy(N0.getValueType())));
AddToWorkList(Shift.getNode());
if (XType.bitsGT(AType)) {
@ -7151,13 +7155,15 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
// Shift the tested bit over the sign bit.
APInt AndMask = ConstAndRHS->getAPIntValue();
SDValue ShlAmt =
DAG.getConstant(AndMask.countLeadingZeros(), getShiftAmountTy());
DAG.getConstant(AndMask.countLeadingZeros(),
getShiftAmountTy(AndLHS.getValueType()));
SDValue Shl = DAG.getNode(ISD::SHL, N0.getDebugLoc(), VT, AndLHS, ShlAmt);
// Now arithmetic right shift it all the way over, so the result is either
// all-ones, or zero.
SDValue ShrAmt =
DAG.getConstant(AndMask.getBitWidth()-1, getShiftAmountTy());
DAG.getConstant(AndMask.getBitWidth()-1,
getShiftAmountTy(Shl.getValueType()));
SDValue Shr = DAG.getNode(ISD::SRA, N0.getDebugLoc(), VT, Shl, ShrAmt);
return DAG.getNode(ISD::AND, DL, VT, Shr, N3);
@ -7201,7 +7207,7 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
// shl setcc result by log2 n2c
return DAG.getNode(ISD::SHL, DL, N2.getValueType(), Temp,
DAG.getConstant(N2C->getAPIntValue().logBase2(),
getShiftAmountTy()));
getShiftAmountTy(Temp.getValueType())));
}
// Check to see if this is the equivalent of setcc
@ -7224,7 +7230,7 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
SDValue Ctlz = DAG.getNode(ISD::CTLZ, N0.getDebugLoc(), XType, N0);
return DAG.getNode(ISD::SRL, DL, XType, Ctlz,
DAG.getConstant(Log2_32(XType.getSizeInBits()),
getShiftAmountTy()));
getShiftAmountTy(Ctlz.getValueType())));
}
// fold (setgt X, 0) -> (srl (and (-X, ~X), size(X)-1))
if (N1C && N1C->isNullValue() && CC == ISD::SETGT) {
@ -7234,13 +7240,13 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
return DAG.getNode(ISD::SRL, DL, XType,
DAG.getNode(ISD::AND, DL, XType, NegN0, NotN0),
DAG.getConstant(XType.getSizeInBits()-1,
getShiftAmountTy()));
getShiftAmountTy(XType)));
}
// fold (setgt X, -1) -> (xor (srl (X, size(X)-1), 1))
if (N1C && N1C->isAllOnesValue() && CC == ISD::SETGT) {
SDValue Sign = DAG.getNode(ISD::SRL, N0.getDebugLoc(), XType, N0,
DAG.getConstant(XType.getSizeInBits()-1,
getShiftAmountTy()));
getShiftAmountTy(N0.getValueType())));
return DAG.getNode(ISD::XOR, DL, XType, Sign, DAG.getConstant(1, XType));
}
}
@ -7267,7 +7273,7 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
SDValue Shift = DAG.getNode(ISD::SRA, N0.getDebugLoc(), XType,
N0,
DAG.getConstant(XType.getSizeInBits()-1,
getShiftAmountTy()));
getShiftAmountTy(N0.getValueType())));
SDValue Add = DAG.getNode(ISD::ADD, N0.getDebugLoc(),
XType, N0, Shift);
AddToWorkList(Shift.getNode());

View File

@ -87,7 +87,7 @@ class SelectionDAGLegalize {
// If someone requests legalization of the new node, return itself.
if (From != To)
LegalizedNodes.insert(std::make_pair(To, To));
// Transfer SDDbgValues.
DAG.TransferDbgValues(From, To);
}
@ -498,7 +498,8 @@ SDValue ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG,
int IncrementSize = NumBits / 8;
// Divide the stored value in two parts.
SDValue ShiftAmount = DAG.getConstant(NumBits, TLI.getShiftAmountTy());
SDValue ShiftAmount = DAG.getConstant(NumBits,
TLI.getShiftAmountTy(Val.getValueType()));
SDValue Lo = Val;
SDValue Hi = DAG.getNode(ISD::SRL, dl, VT, Val, ShiftAmount);
@ -645,7 +646,8 @@ SDValue ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG,
}
// aggregate the two parts
SDValue ShiftAmount = DAG.getConstant(NumBits, TLI.getShiftAmountTy());
SDValue ShiftAmount = DAG.getConstant(NumBits,
TLI.getShiftAmountTy(Hi.getValueType()));
SDValue Result = DAG.getNode(ISD::SHL, dl, VT, Hi, ShiftAmount);
Result = DAG.getNode(ISD::OR, dl, VT, Result, Lo);
@ -1264,7 +1266,8 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
// Move the top bits to the right place.
Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi,
DAG.getConstant(RoundWidth, TLI.getShiftAmountTy()));
DAG.getConstant(RoundWidth,
TLI.getShiftAmountTy(Hi.getValueType())));
// Join the hi and lo parts.
Result = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi);
@ -1293,7 +1296,8 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
// Move the top bits to the right place.
Hi = DAG.getNode(ISD::SHL, dl, Hi.getValueType(), Hi,
DAG.getConstant(ExtraWidth, TLI.getShiftAmountTy()));
DAG.getConstant(ExtraWidth,
TLI.getShiftAmountTy(Hi.getValueType())));
// Join the hi and lo parts.
Result = DAG.getNode(ISD::OR, dl, Node->getValueType(0), Lo, Hi);
@ -1482,7 +1486,8 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2,
DAG.getIntPtrConstant(IncrementSize));
Hi = DAG.getNode(ISD::SRL, dl, Tmp3.getValueType(), Tmp3,
DAG.getConstant(RoundWidth, TLI.getShiftAmountTy()));
DAG.getConstant(RoundWidth,
TLI.getShiftAmountTy(Tmp3.getValueType())));
Hi = DAG.getTruncStore(Tmp1, dl, Hi, Tmp2,
ST->getPointerInfo().getWithOffset(IncrementSize),
ExtraVT, isVolatile, isNonTemporal,
@ -1492,7 +1497,8 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
// TRUNCSTORE:i24 X -> TRUNCSTORE:i16 (srl X, 8), TRUNCSTORE@+2:i8 X
// Store the top RoundWidth bits.
Hi = DAG.getNode(ISD::SRL, dl, Tmp3.getValueType(), Tmp3,
DAG.getConstant(ExtraWidth, TLI.getShiftAmountTy()));
DAG.getConstant(ExtraWidth,
TLI.getShiftAmountTy(Tmp3.getValueType())));
Hi = DAG.getTruncStore(Tmp1, dl, Hi, Tmp2, ST->getPointerInfo(),
RoundVT, isVolatile, isNonTemporal, Alignment);
@ -1727,7 +1733,8 @@ SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode* Node) {
assert(BitShift < LoadTy.getSizeInBits() && "Pointer advanced wrong?");
if (BitShift)
SignBit = DAG.getNode(ISD::SHL, dl, LoadTy, SignBit,
DAG.getConstant(BitShift,TLI.getShiftAmountTy()));
DAG.getConstant(BitShift,
TLI.getShiftAmountTy(SignBit.getValueType())));
}
}
// Now get the sign bit proper, by seeing whether the value is negative.
@ -2207,7 +2214,8 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
if (!isSigned) {
SDValue Fast = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Op0);
SDValue ShiftConst = DAG.getConstant(1, TLI.getShiftAmountTy());
SDValue ShiftConst =
DAG.getConstant(1, TLI.getShiftAmountTy(Op0.getValueType()));
SDValue Shr = DAG.getNode(ISD::SRL, dl, MVT::i64, Op0, ShiftConst);
SDValue AndConst = DAG.getConstant(1, MVT::i64);
SDValue And = DAG.getNode(ISD::AND, dl, MVT::i64, Op0, AndConst);
@ -2226,7 +2234,6 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
}
// Otherwise, implement the fully general conversion.
EVT SHVT = TLI.getShiftAmountTy();
SDValue And = DAG.getNode(ISD::AND, dl, MVT::i64, Op0,
DAG.getConstant(UINT64_C(0xfffffffffffff800), MVT::i64));
@ -2241,6 +2248,7 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
Op0, DAG.getConstant(UINT64_C(0x0020000000000000), MVT::i64),
ISD::SETUGE);
SDValue Sel2 = DAG.getNode(ISD::SELECT, dl, MVT::i64, Ge, Sel, Op0);
EVT SHVT = TLI.getShiftAmountTy(Sel2.getValueType());
SDValue Sh = DAG.getNode(ISD::SRL, dl, MVT::i64, Sel2,
DAG.getConstant(32, SHVT));
@ -2387,7 +2395,7 @@ SDValue SelectionDAGLegalize::PromoteLegalFP_TO_INT(SDValue LegalOp,
///
SDValue SelectionDAGLegalize::ExpandBSWAP(SDValue Op, DebugLoc dl) {
EVT VT = Op.getValueType();
EVT SHVT = TLI.getShiftAmountTy();
EVT SHVT = TLI.getShiftAmountTy(VT);
SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8;
switch (VT.getSimpleVT().SimpleTy) {
default: assert(0 && "Unhandled Expand type in BSWAP!");
@ -2450,7 +2458,7 @@ SDValue SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDValue Op,
default: assert(0 && "Cannot expand this yet!");
case ISD::CTPOP: {
EVT VT = Op.getValueType();
EVT ShVT = TLI.getShiftAmountTy();
EVT ShVT = TLI.getShiftAmountTy(VT);
unsigned Len = VT.getSizeInBits();
assert(VT.isInteger() && Len <= 128 && Len % 8 == 0 &&
@ -2487,7 +2495,7 @@ SDValue SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDValue Op,
Op = DAG.getNode(ISD::SRL, dl, VT,
DAG.getNode(ISD::MUL, dl, VT, Op, Mask01),
DAG.getConstant(Len - 8, ShVT));
return Op;
}
case ISD::CTLZ: {
@ -2501,7 +2509,7 @@ SDValue SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDValue Op,
//
// but see also: http://www.hackersdelight.org/HDcode/nlz.cc
EVT VT = Op.getValueType();
EVT ShVT = TLI.getShiftAmountTy();
EVT ShVT = TLI.getShiftAmountTy(VT);
unsigned len = VT.getSizeInBits();
for (unsigned i = 0; (1U << i) <= (len / 2); ++i) {
SDValue Tmp3 = DAG.getConstant(1ULL << i, ShVT);
@ -2737,7 +2745,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
// SAR. However, it is doubtful that any exist.
EVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
EVT VT = Node->getValueType(0);
EVT ShiftAmountTy = TLI.getShiftAmountTy();
EVT ShiftAmountTy = TLI.getShiftAmountTy(VT);
if (VT.isVector())
ShiftAmountTy = VT;
unsigned BitsDiff = VT.getScalarType().getSizeInBits() -
@ -2901,7 +2909,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
// 1 -> Hi
Tmp1 = DAG.getNode(ISD::SRL, dl, OpTy, Node->getOperand(0),
DAG.getConstant(OpTy.getSizeInBits()/2,
TLI.getShiftAmountTy()));
TLI.getShiftAmountTy(Node->getOperand(0).getValueType())));
Tmp1 = DAG.getNode(ISD::TRUNCATE, dl, Node->getValueType(0), Tmp1);
} else {
// 0 -> Lo
@ -3260,7 +3268,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Cannot expand this operation!");
LHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, LHS);
RHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, RHS);
SDValue Ret = ExpandLibCall(LC, Node, isSigned);
BottomHalf = DAG.getNode(ISD::TRUNCATE, dl, VT, Ret);
TopHalf = DAG.getNode(ISD::SRL, dl, Ret.getValueType(), Ret,
@ -3268,7 +3276,8 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
TopHalf = DAG.getNode(ISD::TRUNCATE, dl, VT, TopHalf);
}
if (isSigned) {
Tmp1 = DAG.getConstant(VT.getSizeInBits() - 1, TLI.getShiftAmountTy());
Tmp1 = DAG.getConstant(VT.getSizeInBits() - 1,
TLI.getShiftAmountTy(BottomHalf.getValueType()));
Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, Tmp1);
TopHalf = DAG.getSetCC(dl, TLI.getSetCCResultType(VT), TopHalf, Tmp1,
ISD::SETNE);
@ -3286,7 +3295,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
Tmp2 = DAG.getNode(ISD::ANY_EXTEND, dl, PairTy, Node->getOperand(1));
Tmp2 = DAG.getNode(ISD::SHL, dl, PairTy, Tmp2,
DAG.getConstant(PairTy.getSizeInBits()/2,
TLI.getShiftAmountTy()));
TLI.getShiftAmountTy(PairTy)));
Results.push_back(DAG.getNode(ISD::OR, dl, PairTy, Tmp1, Tmp2));
break;
}
@ -3464,7 +3473,7 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node,
Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Node->getOperand(0));
Tmp1 = DAG.getNode(ISD::BSWAP, dl, NVT, Tmp1);
Tmp1 = DAG.getNode(ISD::SRL, dl, NVT, Tmp1,
DAG.getConstant(DiffBits, TLI.getShiftAmountTy()));
DAG.getConstant(DiffBits, TLI.getShiftAmountTy(NVT)));
Results.push_back(Tmp1);
break;
}

View File

@ -177,25 +177,27 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FCOPYSIGN(SDNode *N) {
// First get the sign bit of second operand.
SDValue SignBit = DAG.getNode(ISD::SHL, dl, RVT, DAG.getConstant(1, RVT),
DAG.getConstant(RSize - 1,
TLI.getShiftAmountTy()));
TLI.getShiftAmountTy(RVT)));
SignBit = DAG.getNode(ISD::AND, dl, RVT, RHS, SignBit);
// Shift right or sign-extend it if the two operands have different types.
int SizeDiff = RVT.getSizeInBits() - LVT.getSizeInBits();
if (SizeDiff > 0) {
SignBit = DAG.getNode(ISD::SRL, dl, RVT, SignBit,
DAG.getConstant(SizeDiff, TLI.getShiftAmountTy()));
DAG.getConstant(SizeDiff,
TLI.getShiftAmountTy(SignBit.getValueType())));
SignBit = DAG.getNode(ISD::TRUNCATE, dl, LVT, SignBit);
} else if (SizeDiff < 0) {
SignBit = DAG.getNode(ISD::ANY_EXTEND, dl, LVT, SignBit);
SignBit = DAG.getNode(ISD::SHL, dl, LVT, SignBit,
DAG.getConstant(-SizeDiff, TLI.getShiftAmountTy()));
DAG.getConstant(-SizeDiff,
TLI.getShiftAmountTy(SignBit.getValueType())));
}
// Clear the sign bit of the first operand.
SDValue Mask = DAG.getNode(ISD::SHL, dl, LVT, DAG.getConstant(1, LVT),
DAG.getConstant(LSize - 1,
TLI.getShiftAmountTy()));
TLI.getShiftAmountTy(LVT)));
Mask = DAG.getNode(ISD::SUB, dl, LVT, Mask, DAG.getConstant(1, LVT));
LHS = DAG.getNode(ISD::AND, dl, LVT, LHS, Mask);

View File

@ -1420,7 +1420,7 @@ SDValue SelectionDAG::getMDNode(const MDNode *MD) {
/// the target's desired shift amount type.
SDValue SelectionDAG::getShiftAmountOperand(SDValue Op) {
EVT OpTy = Op.getValueType();
MVT ShTy = TLI.getShiftAmountTy();
MVT ShTy = TLI.getShiftAmountTy(OpTy);
if (OpTy == ShTy || OpTy.isVector()) return Op;
ISD::NodeType Opcode = OpTy.bitsGT(ShTy) ? ISD::TRUNCATE : ISD::ZERO_EXTEND;
@ -2048,7 +2048,7 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
return;
}
break;
default:
// Allow the target to implement this method for its nodes.
if (Op.getOpcode() >= ISD::BUILTIN_OP_END) {
@ -2292,12 +2292,12 @@ bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const {
if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) ||
!isa<ConstantSDNode>(Op.getOperand(1)))
return false;
if (Op.getOpcode() == ISD::OR &&
if (Op.getOpcode() == ISD::OR &&
!MaskedValueIsZero(Op.getOperand(0),
cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue()))
return false;
return true;
}
@ -2748,7 +2748,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
// i8, which is easy to fall into in generic code that uses
// TLI.getShiftAmount().
assert(N2.getValueType().getSizeInBits() >=
Log2_32_Ceil(N1.getValueType().getSizeInBits()) &&
Log2_32_Ceil(N1.getValueType().getSizeInBits()) &&
"Invalid use of small shift amount with oversized value!");
// Always fold shifts of i1 values so the code generator doesn't need to

View File

@ -909,7 +909,7 @@ void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
Val.getResNo(), Offset, dl, DbgSDNodeOrder);
DAG.AddDbgValue(SDV, Val.getNode(), false);
}
} else
} else
DEBUG(dbgs() << "Dropping debug info for " << DI);
DanglingDebugInfoMap[V] = DanglingDebugInfo();
}
@ -1418,7 +1418,7 @@ void SelectionDAGBuilder::visitBr(const BranchInst &I) {
// jle foo
//
if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
if (!TLI.isJumpExpensive() &&
if (!TLI.isJumpExpensive() &&
BOp->hasOneUse() &&
(BOp->getOpcode() == Instruction::And ||
BOp->getOpcode() == Instruction::Or)) {
@ -2409,19 +2409,19 @@ void SelectionDAGBuilder::visitBinary(const User &I, unsigned OpCode) {
void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
SDValue Op1 = getValue(I.getOperand(0));
SDValue Op2 = getValue(I.getOperand(1));
MVT ShiftTy = TLI.getShiftAmountTy();
MVT ShiftTy = TLI.getShiftAmountTy(Op2.getValueType());
// Coerce the shift amount to the right type if we can.
if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
unsigned ShiftSize = ShiftTy.getSizeInBits();
unsigned Op2Size = Op2.getValueType().getSizeInBits();
DebugLoc DL = getCurDebugLoc();
// If the operand is smaller than the shift count type, promote it.
if (ShiftSize > Op2Size)
Op2 = DAG.getNode(ISD::ZERO_EXTEND, DL, ShiftTy, Op2);
// If the operand is larger than the shift count type but the shift
// count type has enough bits to represent any shift value, truncate
// it now. This is a common case and it exposes the truncate to

View File

@ -563,7 +563,7 @@ TargetLowering::TargetLowering(const TargetMachine &tm,
setOperationAction(ISD::TRAP, MVT::Other, Expand);
IsLittleEndian = TD->isLittleEndian();
ShiftAmountTy = PointerTy = MVT::getIntegerVT(8*TD->getPointerSize());
PointerTy = MVT::getIntegerVT(8*TD->getPointerSize());
memset(RegClassForVT, 0,MVT::LAST_VALUETYPE*sizeof(TargetRegisterClass*));
memset(TargetDAGCombineArray, 0, array_lengthof(TargetDAGCombineArray));
maxStoresPerMemset = maxStoresPerMemcpy = maxStoresPerMemmove = 8;
@ -596,6 +596,10 @@ TargetLowering::~TargetLowering() {
delete &TLOF;
}
MVT TargetLowering::getShiftAmountTy(EVT LHSTy) const {
return MVT::getIntegerVT(8*TD->getPointerSize());
}
/// canOpTrap - Returns true if the operation can trap for the value type.
/// VT must be a legal type.
bool TargetLowering::canOpTrap(unsigned Op, EVT VT) const {
@ -1401,7 +1405,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
BitWidth - InnerVT.getSizeInBits()) &
DemandedMask) == 0 &&
isTypeDesirableForOp(ISD::SHL, InnerVT)) {
EVT ShTy = getShiftAmountTy();
EVT ShTy = getShiftAmountTy(InnerVT);
if (!APInt(BitWidth, ShAmt).isIntN(ShTy.getSizeInBits()))
ShTy = InnerVT;
SDValue NarrowShl =
@ -2188,7 +2192,7 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
if (ConstantSDNode *AndRHS =
dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
EVT ShiftTy = DCI.isBeforeLegalize() ?
getPointerTy() : getShiftAmountTy();
getPointerTy() : getShiftAmountTy(N0.getValueType());
if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0 --> (X & 8) >> 3
// Perform the xform if the AND RHS is a single bit.
if (AndRHS->getAPIntValue().isPowerOf2()) {
@ -2359,7 +2363,7 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
// (Z-X) == X --> Z == X<<1
SDValue SH = DAG.getNode(ISD::SHL, dl, N1.getValueType(),
N1,
DAG.getConstant(1, getShiftAmountTy()));
DAG.getConstant(1, getShiftAmountTy(N1.getValueType())));
if (!DCI.isCalledByLegalizer())
DCI.AddToWorklist(SH.getNode());
return DAG.getSetCC(dl, VT, N0.getOperand(0), SH, Cond);
@ -2381,7 +2385,7 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
assert(N1.getOpcode() == ISD::SUB && "Unexpected operation!");
// X == (Z-X) --> X<<1 == Z
SDValue SH = DAG.getNode(ISD::SHL, dl, N1.getValueType(), N0,
DAG.getConstant(1, getShiftAmountTy()));
DAG.getConstant(1, getShiftAmountTy(N0.getValueType())));
if (!DCI.isCalledByLegalizer())
DCI.AddToWorklist(SH.getNode());
return DAG.getSetCC(dl, VT, SH, N1.getOperand(0), Cond);
@ -2493,7 +2497,7 @@ bool TargetLowering::isGAPlusOffset(SDNode *N, const GlobalValue *&GA,
}
}
}
return false;
}
@ -3141,14 +3145,14 @@ SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG,
// Shift right algebraic if shift value is nonzero
if (magics.s > 0) {
Q = DAG.getNode(ISD::SRA, dl, VT, Q,
DAG.getConstant(magics.s, getShiftAmountTy()));
DAG.getConstant(magics.s, getShiftAmountTy(Q.getValueType())));
if (Created)
Created->push_back(Q.getNode());
}
// Extract the sign bit and add it to the quotient
SDValue T =
DAG.getNode(ISD::SRL, dl, VT, Q, DAG.getConstant(VT.getSizeInBits()-1,
getShiftAmountTy()));
getShiftAmountTy(Q.getValueType())));
if (Created)
Created->push_back(T.getNode());
return DAG.getNode(ISD::ADD, dl, VT, Q, T);
@ -3192,19 +3196,19 @@ SDValue TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG,
assert(magics.s < N1C->getAPIntValue().getBitWidth() &&
"We shouldn't generate an undefined shift!");
return DAG.getNode(ISD::SRL, dl, VT, Q,
DAG.getConstant(magics.s, getShiftAmountTy()));
DAG.getConstant(magics.s, getShiftAmountTy(Q.getValueType())));
} else {
SDValue NPQ = DAG.getNode(ISD::SUB, dl, VT, N->getOperand(0), Q);
if (Created)
Created->push_back(NPQ.getNode());
NPQ = DAG.getNode(ISD::SRL, dl, VT, NPQ,
DAG.getConstant(1, getShiftAmountTy()));
DAG.getConstant(1, getShiftAmountTy(NPQ.getValueType())));
if (Created)
Created->push_back(NPQ.getNode());
NPQ = DAG.getNode(ISD::ADD, dl, VT, NPQ, Q);
if (Created)
Created->push_back(NPQ.getNode());
return DAG.getNode(ISD::SRL, dl, VT, NPQ,
DAG.getConstant(magics.s-1, getShiftAmountTy()));
DAG.getConstant(magics.s-1, getShiftAmountTy(NPQ.getValueType())));
}
}

View File

@ -48,7 +48,6 @@ AlphaTargetLowering::AlphaTargetLowering(TargetMachine &TM)
: TargetLowering(TM, new TargetLoweringObjectFileELF()) {
// Set up the TargetLowering object.
//I am having problems with shr n i8 1
setShiftAmountType(MVT::i64);
setBooleanContents(ZeroOrOneBooleanContent);
addRegisterClass(MVT::i64, Alpha::GPRCRegisterClass);

View File

@ -31,25 +31,25 @@ namespace llvm {
/// GPRelHi/GPRelLo - These represent the high and low 16-bit
/// parts of a global address respectively.
GPRelHi, GPRelLo,
GPRelHi, GPRelLo,
/// RetLit - Literal Relocation of a Global
RelLit,
/// GlobalRetAddr - used to restore the return address
GlobalRetAddr,
/// CALL - Normal call.
CALL,
/// DIVCALL - used for special library calls for div and rem
DivCall,
/// return flag operand
RET_FLAG,
/// CHAIN = COND_BRANCH CHAIN, OPC, (G|F)PRC, DESTBB [, INFLAG] - This
/// corresponds to the COND_BRANCH pseudo instruction.
/// corresponds to the COND_BRANCH pseudo instruction.
/// *PRC is the input register to compare to zero,
/// OPC is the branch opcode to use (e.g. Alpha::BEQ),
/// DESTBB is the destination block to branch to, and INFLAG is
@ -62,7 +62,9 @@ namespace llvm {
class AlphaTargetLowering : public TargetLowering {
public:
explicit AlphaTargetLowering(TargetMachine &TM);
virtual MVT getShiftAmountTy(EVT LHSTy) const { return MVT::i64; }
/// getSetCCResultType - Get the SETCC result ValueType
virtual MVT::SimpleValueType getSetCCResultType(EVT VT) const;
@ -92,7 +94,7 @@ namespace llvm {
ConstraintWeight getSingleConstraintMatchWeight(
AsmOperandInfo &info, const char *constraint) const;
std::vector<unsigned>
std::vector<unsigned>
getRegClassForInlineAsmConstraint(const std::string &Constraint,
EVT VT) const;

View File

@ -41,7 +41,6 @@ using namespace llvm;
BlackfinTargetLowering::BlackfinTargetLowering(TargetMachine &TM)
: TargetLowering(TM, new TargetLoweringObjectFileELF()) {
setShiftAmountType(MVT::i16);
setBooleanContents(ZeroOrOneBooleanContent);
setStackPointerRegisterToSaveRestore(BF::SP);
setIntDivIsCheap(false);

View File

@ -32,6 +32,7 @@ namespace llvm {
class BlackfinTargetLowering : public TargetLowering {
public:
BlackfinTargetLowering(TargetMachine &TM);
virtual MVT getShiftAmountTy(EVT LHSTy) const { return MVT::i16; }
virtual MVT::SimpleValueType getSetCCResultType(EVT VT) const;
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
virtual void ReplaceNodeResults(SDNode *N,

View File

@ -435,7 +435,6 @@ SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM)
setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
setShiftAmountType(MVT::i32);
setBooleanContents(ZeroOrNegativeOneBooleanContent);
setStackPointerRegisterToSaveRestore(SPU::R1);
@ -2190,7 +2189,7 @@ static SDValue LowerI8Math(SDValue Op, SelectionDAG &DAG, unsigned Opc,
{
SDValue N0 = Op.getOperand(0); // Everything has at least one operand
DebugLoc dl = Op.getDebugLoc();
EVT ShiftVT = TLI.getShiftAmountTy();
EVT ShiftVT = TLI.getShiftAmountTy(N0.getValueType());
assert(Op.getValueType() == MVT::i8);
switch (Opc) {
@ -3112,7 +3111,7 @@ SPUTargetLowering::getSingleConstraintMatchWeight(
switch (*constraint) {
default:
weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
break;
break;
//FIXME: Seems like the supported constraint letters were just copied
// from PPC, as the following doesn't correspond to the GCC docs.
// I'm leaving it so until someone adds the corresponding lowering support.

View File

@ -109,6 +109,8 @@ namespace llvm {
/// getSetCCResultType - Return the ValueType for ISD::SETCC
virtual MVT::SimpleValueType getSetCCResultType(EVT VT) const;
virtual MVT getShiftAmountTy(EVT LHSTy) const { return MVT::i32; }
//! Custom lowering hooks
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
@ -179,9 +181,9 @@ namespace llvm {
virtual bool isLegalICmpImmediate(int64_t Imm) const;
virtual bool isLegalAddressingMode(const AddrMode &AM,
virtual bool isLegalAddressingMode(const AddrMode &AM,
const Type *Ty) const;
/// After allocating this many registers, the allocator should feel
/// register pressure. The value is a somewhat random guess, based on the
/// number of non callee saved registers in the C calling convention.

View File

@ -77,10 +77,6 @@ MSP430TargetLowering::MSP430TargetLowering(MSP430TargetMachine &tm) :
// Division is expensive
setIntDivIsCheap(false);
// Even if we have only 1 bit shift here, we can perform
// shifts of the whole bitwidth 1 bit per step.
setShiftAmountType(MVT::i8);
setStackPointerRegisterToSaveRestore(MSP430::SPW);
setBooleanContents(ZeroOrOneBooleanContent);
setSchedulingPreference(Sched::Latency);
@ -330,7 +326,7 @@ MSP430TargetLowering::LowerCCCArguments(SDValue Chain,
// Arguments passed in registers
EVT RegVT = VA.getLocVT();
switch (RegVT.getSimpleVT().SimpleTy) {
default:
default:
{
#ifndef NDEBUG
errs() << "LowerFormalArguments Unhandled argument type: "

View File

@ -73,6 +73,8 @@ namespace llvm {
public:
explicit MSP430TargetLowering(MSP430TargetMachine &TM);
virtual MVT getShiftAmountTy(EVT LHSTy) const { return MVT::i8; }
/// LowerOperation - Provide custom lowering hooks for some operations.
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;

View File

@ -362,7 +362,6 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
}
setShiftAmountType(MVT::i32);
setBooleanContents(ZeroOrOneBooleanContent);
if (TM.getSubtarget<PPCSubtarget>().isPPC64()) {

View File

@ -29,36 +29,36 @@ namespace llvm {
/// FSEL - Traditional three-operand fsel node.
///
FSEL,
/// FCFID - The FCFID instruction, taking an f64 operand and producing
/// and f64 value containing the FP representation of the integer that
/// was temporarily in the f64 operand.
FCFID,
/// FCTI[D,W]Z - The FCTIDZ and FCTIWZ instructions, taking an f32 or f64
/// FCTI[D,W]Z - The FCTIDZ and FCTIWZ instructions, taking an f32 or f64
/// operand, producing an f64 value containing the integer representation
/// of that FP value.
FCTIDZ, FCTIWZ,
/// STFIWX - The STFIWX instruction. The first operand is an input token
/// chain, then an f64 value to store, then an address to store it to.
STFIWX,
// VMADDFP, VNMSUBFP - The VMADDFP and VNMSUBFP instructions, taking
// three v4f32 operands and producing a v4f32 result.
VMADDFP, VNMSUBFP,
/// VPERM - The PPC VPERM Instruction.
///
VPERM,
/// Hi/Lo - These represent the high and low 16-bit parts of a global
/// address respectively. These nodes have two operands, the first of
/// which must be a TargetGlobalAddress, and the second of which must be a
/// Constant. Selected naively, these turn into 'lis G+C' and 'li G+C',
/// though these are usually folded into other nodes.
Hi, Lo,
TOC_ENTRY,
/// The following three target-specific nodes are used for calls through
@ -80,37 +80,37 @@ namespace llvm {
/// This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to
/// compute an allocation on the stack.
DYNALLOC,
/// GlobalBaseReg - On Darwin, this node represents the result of the mflr
/// at function entry, used for PIC code.
GlobalBaseReg,
/// These nodes represent the 32-bit PPC shifts that operate on 6-bit
/// shift amounts. These nodes are generated by the multi-precision shift
/// code.
SRL, SRA, SHL,
/// EXTSW_32 - This is the EXTSW instruction for use with "32-bit"
/// registers.
EXTSW_32,
/// CALL - A direct function call.
CALL_Darwin, CALL_SVR4,
/// NOP - Special NOP which follows 64-bit SVR4 calls.
NOP,
/// CHAIN,FLAG = MTCTR(VAL, CHAIN[, INFLAG]) - Directly corresponds to a
/// MTCTR instruction.
MTCTR,
/// CHAIN,FLAG = BCTRL(CHAIN, INFLAG) - Directly corresponds to a
/// BCTRL instruction.
BCTRL_Darwin, BCTRL_SVR4,
/// Return with a flag operand, matched by 'blr'
RET_FLAG,
/// R32 = MFCR(CRREG, INFLAG) - Represents the MFCRpseud/MFOCRF
/// instructions. This copies the bits corresponding to the specified
/// CRREG into the resultant GPR. Bits corresponding to other CR regs
@ -122,20 +122,20 @@ namespace llvm {
/// encoding for the OPC field to identify the compare. For example, 838
/// is VCMPGTSH.
VCMP,
/// RESVEC, OUTFLAG = VCMPo(LHS, RHS, OPC) - Represents one of the
/// altivec VCMP*o instructions. For lack of better number, we use the
/// altivec VCMP*o instructions. For lack of better number, we use the
/// opcode number encoding for the OPC field to identify the compare. For
/// example, 838 is VCMPGTSH.
VCMPo,
/// CHAIN = COND_BRANCH CHAIN, CRRC, OPC, DESTBB [, INFLAG] - This
/// corresponds to the COND_BRANCH pseudo instruction. CRRC is the
/// condition register to branch on, OPC is the branch opcode to use (e.g.
/// PPC::BLE), DESTBB is the destination block to branch to, and INFLAG is
/// an optional input flag argument.
COND_BRANCH,
// The following 5 instructions are used only as part of the
// long double-to-int conversion sequence.
@ -150,7 +150,7 @@ namespace llvm {
MTFSB1,
/// F8RC, OUTFLAG = FADDRTZ F8RC, F8RC, INFLAG - This is an FADD done with
/// rounding towards zero. It has flags added so it won't move past the
/// rounding towards zero. It has flags added so it won't move past the
/// FPSCR-setting instructions.
FADDRTZ,
@ -174,14 +174,14 @@ namespace llvm {
/// STD_32 - This is the STD instruction for use with "32-bit" registers.
STD_32 = ISD::FIRST_TARGET_MEMORY_OPCODE,
/// CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a
/// CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a
/// byte-swapping store instruction. It byte-swaps the low "Type" bits of
/// the GPRC input, then stores it through Ptr. Type can be either i16 or
/// i32.
STBRX,
/// GPRC, CHAIN = LBRX CHAIN, Ptr, Type - This is a
STBRX,
/// GPRC, CHAIN = LBRX CHAIN, Ptr, Type - This is a
/// byte-swapping load instruction. It loads "Type" bits, byte swaps it,
/// then puts it in the bottom bits of the GPRC. TYPE can be either i16
/// or i32.
@ -194,7 +194,7 @@ namespace llvm {
/// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
/// VPKUHUM instruction.
bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary);
/// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
/// VPKUWUM instruction.
bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary);
@ -208,16 +208,16 @@ namespace llvm {
/// a VRGH* instruction with the specified unit size (1,2 or 4 bytes).
bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
bool isUnary);
/// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
/// amount, otherwise return -1.
int isVSLDOIShuffleMask(SDNode *N, bool isUnary);
/// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a splat of a single element that is suitable for input to
/// VSPLTB/VSPLTH/VSPLTW.
bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize);
/// isAllNegativeZeroVector - Returns true if all elements of build_vector
/// are -0.0.
bool isAllNegativeZeroVector(SDNode *N);
@ -225,24 +225,26 @@ namespace llvm {
/// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the
/// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
unsigned getVSPLTImmediate(SDNode *N, unsigned EltSize);
/// get_VSPLTI_elt - If this is a build_vector of constants which can be
/// formed by using a vspltis[bhw] instruction of the specified element
/// size, return the constant being splatted. The ByteSize field indicates
/// the number of bytes of each element [124] -> [bhw].
SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG);
}
class PPCTargetLowering : public TargetLowering {
const PPCSubtarget &PPCSubTarget;
public:
explicit PPCTargetLowering(PPCTargetMachine &TM);
/// getTargetNodeName() - This method returns the name of a target specific
/// DAG node.
virtual const char *getTargetNodeName(unsigned Opcode) const;
virtual MVT getShiftAmountTy(EVT LHSTy) const { return MVT::i32; }
/// getSetCCResultType - Return the ISD::SETCC ValueType
virtual MVT::SimpleValueType getSetCCResultType(EVT VT) const;
@ -253,19 +255,19 @@ namespace llvm {
SDValue &Offset,
ISD::MemIndexedMode &AM,
SelectionDAG &DAG) const;
/// SelectAddressRegReg - Given the specified addressed, check to see if it
/// can be represented as an indexed [r+r] operation. Returns false if it
/// can be more efficiently represented with [r+imm].
bool SelectAddressRegReg(SDValue N, SDValue &Base, SDValue &Index,
SelectionDAG &DAG) const;
/// SelectAddressRegImm - Returns true if the address N can be represented
/// by a base register plus a signed 16-bit displacement [r+imm], and if it
/// is not better represented as reg+reg.
bool SelectAddressRegImm(SDValue N, SDValue &Disp, SDValue &Base,
SelectionDAG &DAG) const;
/// SelectAddressRegRegOnly - Given the specified addressed, force it to be
/// represented as an indexed [r+r] operation.
bool SelectAddressRegRegOnly(SDValue N, SDValue &Base, SDValue &Index,
@ -277,7 +279,7 @@ namespace llvm {
bool SelectAddressRegImmShift(SDValue N, SDValue &Disp, SDValue &Base,
SelectionDAG &DAG) const;
/// LowerOperation - Provide custom lowering hooks for some operations.
///
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
@ -289,10 +291,10 @@ namespace llvm {
SelectionDAG &DAG) const;
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
virtual void computeMaskedBitsForTargetNode(const SDValue Op,
const APInt &Mask,
APInt &KnownZero,
APInt &KnownZero,
APInt &KnownOne,
const SelectionDAG &DAG,
unsigned Depth = 0) const;
@ -300,13 +302,13 @@ namespace llvm {
virtual MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *MBB) const;
MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI,
MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI,
MachineBasicBlock *MBB, bool is64Bit,
unsigned BinOpcode) const;
MachineBasicBlock *EmitPartwordAtomicBinary(MachineInstr *MI,
MachineBasicBlock *MBB,
MachineBasicBlock *EmitPartwordAtomicBinary(MachineInstr *MI,
MachineBasicBlock *MBB,
bool is8bit, unsigned Opcode) const;
ConstraintType getConstraintType(const std::string &Constraint) const;
/// Examine constraint string and operand type and determine a weight value.
@ -314,7 +316,7 @@ namespace llvm {
ConstraintWeight getSingleConstraintMatchWeight(
AsmOperandInfo &info, const char *constraint) const;
std::pair<unsigned, const TargetRegisterClass*>
std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const std::string &Constraint,
EVT VT) const;
@ -329,11 +331,11 @@ namespace llvm {
char ConstraintLetter,
std::vector<SDValue> &Ops,
SelectionDAG &DAG) const;
/// isLegalAddressingMode - Return true if the addressing mode represented
/// by AM is legal for this target, for a load/store of the specified type.
virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty)const;
/// isLegalAddressImmediate - Return true if the integer value can be used
/// as the offset of the target addressing mode for load / store of the
/// given type.
@ -344,7 +346,7 @@ namespace llvm {
virtual bool isLegalAddressImmediate(GlobalValue *GV) const;
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
/// getOptimalMemOpType - Returns the target specific optimal type for load
/// and store operations as a result of memset, memcpy, and memmove
/// lowering. If DstAlign is zero that means it's safe to destination

View File

@ -59,9 +59,6 @@ SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm) :
// Compute derived properties from the register classes
computeRegisterProperties();
// Set shifts properties
setShiftAmountType(MVT::i64);
// Provide all sorts of operation actions
setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);

View File

@ -57,6 +57,8 @@ namespace llvm {
public:
explicit SystemZTargetLowering(SystemZTargetMachine &TM);
virtual MVT getShiftAmountTy(EVT LHSTy) const { return MVT::i64; }
/// LowerOperation - Provide custom lowering hooks for some operations.
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;

View File

@ -220,7 +220,6 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
static MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 };
// X86 is weird, it always uses i8 for shift amounts and setcc results.
setShiftAmountType(MVT::i8);
setBooleanContents(ZeroOrOneBooleanContent);
setSchedulingPreference(Sched::RegPressure);
setStackPointerRegisterToSaveRestore(X86StackPtr);
@ -4181,7 +4180,8 @@ static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp);
return DAG.getNode(ISD::BITCAST, dl, VT,
DAG.getNode(Opc, dl, ShVT, SrcOp,
DAG.getConstant(NumBits, TLI.getShiftAmountTy())));
DAG.getConstant(NumBits,
TLI.getShiftAmountTy(SrcOp.getValueType()))));
}
SDValue
@ -4330,15 +4330,15 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
// For AVX-length vectors, build the individual 128-bit pieces and
// use shuffles to put them in place.
if (VT.getSizeInBits() > 256 &&
Subtarget->hasAVX() &&
if (VT.getSizeInBits() > 256 &&
Subtarget->hasAVX() &&
!ISD::isBuildVectorAllZeros(Op.getNode())) {
SmallVector<SDValue, 8> V;
V.resize(NumElems);
for (unsigned i = 0; i < NumElems; ++i) {
V[i] = Op.getOperand(i);
}
EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2);
// Build the lower subvector.
@ -5046,7 +5046,8 @@ SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
DAG.getIntPtrConstant(Elt1 / 2));
if ((Elt1 & 1) == 0)
InsElt = DAG.getNode(ISD::SHL, dl, MVT::i16, InsElt,
DAG.getConstant(8, TLI.getShiftAmountTy()));
DAG.getConstant(8,
TLI.getShiftAmountTy(InsElt.getValueType())));
else if (Elt0 >= 0)
InsElt = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt,
DAG.getConstant(0xFF00, MVT::i16));
@ -5060,7 +5061,8 @@ SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
Elt0Src, DAG.getIntPtrConstant(Elt0 / 2));
if ((Elt0 & 1) != 0)
InsElt0 = DAG.getNode(ISD::SRL, dl, MVT::i16, InsElt0,
DAG.getConstant(8, TLI.getShiftAmountTy()));
DAG.getConstant(8,
TLI.getShiftAmountTy(InsElt0.getValueType())));
else if (Elt1 >= 0)
InsElt0 = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt0,
DAG.getConstant(0x00FF, MVT::i16));
@ -5477,7 +5479,7 @@ SDValue getMOVLP(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, bool HasSSE2) {
// Both of them can't be memory operations though.
if (MayFoldVectorLoad(V1) && MayFoldVectorLoad(V2))
CanFoldLoad = false;
if (CanFoldLoad) {
if (HasSSE2 && NumElems == 2)
return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG);
@ -6090,7 +6092,7 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const {
SDValue ScaledN2 = N2;
if (Upper)
ScaledN2 = DAG.getNode(ISD::SUB, dl, N2.getValueType(), N2,
DAG.getConstant(NumElems /
DAG.getConstant(NumElems /
(VT.getSizeInBits() / 128),
N2.getValueType()));
Op = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, SubN0.getValueType(), SubN0,

View File

@ -159,16 +159,16 @@ namespace llvm {
/// PSHUFB - Shuffle 16 8-bit values within a vector.
PSHUFB,
/// PANDN - and with not'd value.
PANDN,
/// PSIGNB/W/D - Copy integer sign.
PSIGNB, PSIGNW, PSIGND,
PSIGNB, PSIGNW, PSIGND,
/// PBLENDVB - Variable blend
PBLENDVB,
/// FMAX, FMIN - Floating point max and min.
///
FMAX, FMIN,
@ -212,7 +212,7 @@ namespace llvm {
// ADD, SUB, SMUL, etc. - Arithmetic operations with FLAGS results.
ADD, SUB, ADC, SBB, SMUL,
INC, DEC, OR, XOR, AND,
UMUL, // LOW, HI, FLAGS = umul LHS, RHS
// MUL_IMM - X86 specific multiply by immediate.
@ -467,6 +467,8 @@ namespace llvm {
virtual unsigned getJumpTableEncoding() const;
virtual MVT getShiftAmountTy(EVT LHSTy) const { return MVT::i8; }
virtual const MCExpr *
LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
const MachineBasicBlock *MBB, unsigned uid,

View File

@ -42,9 +42,9 @@
using namespace llvm;
const char *XCoreTargetLowering::
getTargetNodeName(unsigned Opcode) const
getTargetNodeName(unsigned Opcode) const
{
switch (Opcode)
switch (Opcode)
{
case XCoreISD::BL : return "XCoreISD::BL";
case XCoreISD::PCRelativeWrapper : return "XCoreISD::PCRelativeWrapper";
@ -77,7 +77,6 @@ XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM)
// Division is expensive
setIntDivIsCheap(false);
setShiftAmountType(MVT::i32);
setStackPointerRegisterToSaveRestore(XCore::SP);
setSchedulingPreference(Sched::RegPressure);
@ -95,7 +94,7 @@ XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM)
// Stop the combiner recombining select and set_cc
setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
// 64bit
setOperationAction(ISD::ADD, MVT::i64, Custom);
setOperationAction(ISD::SUB, MVT::i64, Custom);
@ -106,14 +105,14 @@ XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM)
setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
// Bit Manipulation
setOperationAction(ISD::CTPOP, MVT::i32, Expand);
setOperationAction(ISD::ROTL , MVT::i32, Expand);
setOperationAction(ISD::ROTR , MVT::i32, Expand);
setOperationAction(ISD::TRAP, MVT::Other, Legal);
// Jump tables.
setOperationAction(ISD::BR_JT, MVT::Other, Custom);
@ -122,7 +121,7 @@ XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM)
// Thread Local Storage
setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
// Conversion of i64 -> double produces constantpool nodes
setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
@ -143,7 +142,7 @@ XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM)
setOperationAction(ISD::VACOPY, MVT::Other, Expand);
setOperationAction(ISD::VAARG, MVT::Other, Custom);
setOperationAction(ISD::VASTART, MVT::Other, Custom);
// Dynamic stack
setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
@ -163,7 +162,7 @@ XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM)
SDValue XCoreTargetLowering::
LowerOperation(SDValue Op, SelectionDAG &DAG) const {
switch (Op.getOpcode())
switch (Op.getOpcode())
{
case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
@ -414,7 +413,7 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
SDValue Chain = LD->getChain();
SDValue BasePtr = LD->getBasePtr();
DebugLoc DL = Op.getDebugLoc();
SDValue Base;
int64_t Offset;
if (!LD->isVolatile() &&
@ -437,10 +436,10 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
SDValue HighOffset = DAG.getConstant((Offset & ~0x3) + 4, MVT::i32);
SDValue LowShift = DAG.getConstant((Offset & 0x3) * 8, MVT::i32);
SDValue HighShift = DAG.getConstant(32 - (Offset & 0x3) * 8, MVT::i32);
SDValue LowAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base, LowOffset);
SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base, HighOffset);
SDValue Low = DAG.getLoad(getPointerTy(), DL, Chain,
LowAddr, MachinePointerInfo(), false, false, 0);
SDValue High = DAG.getLoad(getPointerTy(), DL, Chain,
@ -453,7 +452,7 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
SDValue Ops[] = { Result, Chain };
return DAG.getMergeValues(Ops, 2, DL);
}
if (LD->getAlignment() == 2) {
SDValue Low = DAG.getExtLoad(ISD::ZEXTLOAD, DL, MVT::i32, Chain,
BasePtr, LD->getPointerInfo(), MVT::i16,
@ -473,16 +472,16 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
SDValue Ops[] = { Result, Chain };
return DAG.getMergeValues(Ops, 2, DL);
}
// Lower to a call to __misaligned_load(BasePtr).
const Type *IntPtrTy = getTargetData()->getIntPtrType(*DAG.getContext());
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
Entry.Ty = IntPtrTy;
Entry.Node = BasePtr;
Args.push_back(Entry);
std::pair<SDValue, SDValue> CallResult =
LowerCallTo(Chain, IntPtrTy, false, false,
false, false, 0, CallingConv::C, false,
@ -515,7 +514,7 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG) const
SDValue BasePtr = ST->getBasePtr();
SDValue Value = ST->getValue();
DebugLoc dl = Op.getDebugLoc();
if (ST->getAlignment() == 2) {
SDValue Low = Value;
SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value,
@ -532,19 +531,19 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG) const
ST->isNonTemporal(), 2);
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StoreLow, StoreHigh);
}
// Lower to a call to __misaligned_store(BasePtr, Value).
const Type *IntPtrTy = getTargetData()->getIntPtrType(*DAG.getContext());
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
Entry.Ty = IntPtrTy;
Entry.Node = BasePtr;
Args.push_back(Entry);
Entry.Node = Value;
Args.push_back(Entry);
std::pair<SDValue, SDValue> CallResult =
LowerCallTo(Chain, Type::getVoidTy(*DAG.getContext()), false, false,
false, false, 0, CallingConv::C, false,
@ -722,7 +721,7 @@ ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const
}
DebugLoc dl = N->getDebugLoc();
// Extract components
SDValue LHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
N->getOperand(0), DAG.getConstant(0, MVT::i32));
@ -732,7 +731,7 @@ ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const
N->getOperand(1), DAG.getConstant(0, MVT::i32));
SDValue RHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
N->getOperand(1), DAG.getConstant(1, MVT::i32));
// Expand
unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD :
XCoreISD::LSUB;
@ -740,7 +739,7 @@ ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const
SDValue Carry = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
LHSL, RHSL, Zero);
SDValue Lo(Carry.getNode(), 1);
SDValue Ignored = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
LHSH, RHSH, Carry);
SDValue Hi(Ignored.getNode(), 1);
@ -761,8 +760,8 @@ LowerVAARG(SDValue Op, SelectionDAG &DAG) const
Node->getOperand(1), MachinePointerInfo(V),
false, false, 0);
// Increment the pointer, VAList, to the next vararg
SDValue Tmp3 = DAG.getNode(ISD::ADD, dl, getPointerTy(), VAList,
DAG.getConstant(VT.getSizeInBits(),
SDValue Tmp3 = DAG.getNode(ISD::ADD, dl, getPointerTy(), VAList,
DAG.getConstant(VT.getSizeInBits(),
getPointerTy()));
// Store the incremented VAList to the legalized pointer
Tmp3 = DAG.getStore(VAList.getValue(1), dl, Tmp3, Node->getOperand(1),
@ -781,20 +780,20 @@ LowerVASTART(SDValue Op, SelectionDAG &DAG) const
MachineFunction &MF = DAG.getMachineFunction();
XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
SDValue Addr = DAG.getFrameIndex(XFI->getVarArgsFrameIndex(), MVT::i32);
return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1),
return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1),
MachinePointerInfo(), false, false, 0);
}
SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op,
SelectionDAG &DAG) const {
DebugLoc dl = Op.getDebugLoc();
// Depths > 0 not supported yet!
// Depths > 0 not supported yet!
if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
return SDValue();
MachineFunction &MF = DAG.getMachineFunction();
const TargetRegisterInfo *RegInfo = getTargetMachine().getRegisterInfo();
return DAG.getCopyFromReg(DAG.getEntryNode(), dl,
return DAG.getCopyFromReg(DAG.getEntryNode(), dl,
RegInfo->getFrameRegister(MF), MVT::i32);
}
@ -919,7 +918,7 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
// Get a count of how many bytes are to be pushed on the stack.
unsigned NumBytes = CCInfo.getNextStackOffset();
Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes,
Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes,
getPointerTy(), true));
SmallVector<std::pair<unsigned, SDValue>, 4> RegsToPass;
@ -944,8 +943,8 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
break;
}
// Arguments that can be passed on register must be kept at
// Arguments that can be passed on register must be kept at
// RegsToPass vector
if (VA.isRegLoc()) {
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
@ -954,7 +953,7 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
int Offset = VA.getLocMemOffset();
MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other,
MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other,
Chain, Arg,
DAG.getConstant(Offset/4, MVT::i32)));
}
@ -963,16 +962,16 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
// Transform all store nodes into one single node because
// all store nodes are independent of each other.
if (!MemOpChains.empty())
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
&MemOpChains[0], MemOpChains.size());
// Build a sequence of copy-to-reg nodes chained together with token
// Build a sequence of copy-to-reg nodes chained together with token
// chain and flag operands which copy the outgoing args into registers.
// The InFlag in necessary since all emited instructions must be
// stuck together.
SDValue InFlag;
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
RegsToPass[i].second, InFlag);
InFlag = Chain.getValue(1);
}
@ -986,7 +985,7 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
// XCoreBranchLink = #chain, #target_address, #opt_in_flags...
// = Chain, Callee, Reg#1, Reg#2, ...
// = Chain, Callee, Reg#1, Reg#2, ...
//
// Returns a chain & a flag for retval copy to use.
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
@ -994,7 +993,7 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
Ops.push_back(Chain);
Ops.push_back(Callee);
// Add argument registers to the end of the list so that they are
// Add argument registers to the end of the list so that they are
// known live into the call.
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
Ops.push_back(DAG.getRegister(RegsToPass[i].first,
@ -1098,11 +1097,11 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
unsigned StackSlotSize = XCoreFrameLowering::stackSlotSize();
unsigned LRSaveSize = StackSlotSize;
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
if (VA.isRegLoc()) {
// Arguments passed in registers
EVT RegVT = VA.getLocVT();
@ -1139,12 +1138,12 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
// Create the SelectionDAG nodes corresponding to a load
//from this parameter
SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
MachinePointerInfo::getFixedStack(FI),
false, false, 0));
}
}
if (isVarArg) {
/* Argument registers */
static const unsigned ArgRegs[] = {
@ -1186,7 +1185,7 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
true));
}
}
return Chain;
}
@ -1222,7 +1221,7 @@ XCoreTargetLowering::LowerReturn(SDValue Chain,
// Analize return values.
CCInfo.AnalyzeReturn(Outs, RetCC_XCore);
// If this is the first return lowered for this function, add
// If this is the first return lowered for this function, add
// the regs to the liveout set for the function.
if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
for (unsigned i = 0; i != RVLocs.size(); ++i)
@ -1237,7 +1236,7 @@ XCoreTargetLowering::LowerReturn(SDValue Chain,
CCValAssign &VA = RVLocs[i];
assert(VA.isRegLoc() && "Can only return in registers!");
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
OutVals[i], Flag);
// guarantee that all emitted copies are
@ -1265,7 +1264,7 @@ XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
DebugLoc dl = MI->getDebugLoc();
assert((MI->getOpcode() == XCore::SELECT_CC) &&
"Unexpected instr type to insert");
// To "insert" a SELECT_CC instruction, we actually have to insert the diamond
// control-flow pattern. The incoming instruction knows the destination vreg
// to set, the condition code register to branch on, the true/false values to
@ -1273,7 +1272,7 @@ XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
const BasicBlock *LLVM_BB = BB->getBasicBlock();
MachineFunction::iterator It = BB;
++It;
// thisMBB:
// ...
// TrueVal = ...
@ -1296,7 +1295,7 @@ XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// Next, add the true and fallthrough blocks as its successors.
BB->addSuccessor(copy0MBB);
BB->addSuccessor(sinkMBB);
BuildMI(BB, dl, TII.get(XCore::BRFT_lru6))
.addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB);
@ -1304,10 +1303,10 @@ XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// %FalseValue = ...
// # fallthrough to sinkMBB
BB = copy0MBB;
// Update machine-CFG edges
BB->addSuccessor(sinkMBB);
// sinkMBB:
// %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
// ...
@ -1316,7 +1315,7 @@ XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
TII.get(XCore::PHI), MI->getOperand(0).getReg())
.addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB)
.addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
MI->eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
@ -1354,7 +1353,7 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
// fold (ladd x, 0, y) -> 0, add x, y iff carry is unused and y has only the
// low bit set
if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 0)) {
if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 0)) {
APInt KnownZero, KnownOne;
APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
VT.getSizeInBits() - 1);
@ -1377,7 +1376,7 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
EVT VT = N0.getValueType();
// fold (lsub 0, 0, x) -> x, -x iff x has only the low bit set
if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) {
if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) {
APInt KnownZero, KnownOne;
APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
VT.getSizeInBits() - 1);
@ -1393,7 +1392,7 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
// fold (lsub x, 0, y) -> 0, sub x, y iff borrow is unused and y has only the
// low bit set
if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 0)) {
if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 0)) {
APInt KnownZero, KnownOne;
APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
VT.getSizeInBits() - 1);
@ -1557,7 +1556,7 @@ static inline bool isImmUs4(int64_t val)
/// isLegalAddressingMode - Return true if the addressing mode represented
/// by AM is legal for this target, for a load/store of the specified type.
bool
XCoreTargetLowering::isLegalAddressingMode(const AddrMode &AM,
XCoreTargetLowering::isLegalAddressingMode(const AddrMode &AM,
const Type *Ty) const {
if (Ty->getTypeID() == Type::VoidTyID)
return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs);
@ -1568,7 +1567,7 @@ XCoreTargetLowering::isLegalAddressingMode(const AddrMode &AM,
return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 &&
AM.BaseOffs%4 == 0;
}
switch (Size) {
case 1:
// reg + imm
@ -1593,7 +1592,7 @@ XCoreTargetLowering::isLegalAddressingMode(const AddrMode &AM,
// reg + reg<<2
return AM.Scale == 4 && AM.BaseOffs == 0;
}
return false;
}
@ -1603,7 +1602,7 @@ XCoreTargetLowering::isLegalAddressingMode(const AddrMode &AM,
std::vector<unsigned> XCoreTargetLowering::
getRegClassForInlineAsmConstraint(const std::string &Constraint,
EVT VT) const
EVT VT) const
{
if (Constraint.size() != 1)
return std::vector<unsigned>();
@ -1611,9 +1610,9 @@ getRegClassForInlineAsmConstraint(const std::string &Constraint,
switch (Constraint[0]) {
default : break;
case 'r':
return make_vector<unsigned>(XCore::R0, XCore::R1, XCore::R2,
XCore::R3, XCore::R4, XCore::R5,
XCore::R6, XCore::R7, XCore::R8,
return make_vector<unsigned>(XCore::R0, XCore::R1, XCore::R2,
XCore::R3, XCore::R4, XCore::R5,
XCore::R6, XCore::R7, XCore::R8,
XCore::R9, XCore::R10, XCore::R11, 0);
break;
}

View File

@ -20,11 +20,11 @@
#include "XCore.h"
namespace llvm {
// Forward delcarations
class XCoreSubtarget;
class XCoreTargetMachine;
namespace XCoreISD {
enum NodeType {
// Start the numbering where the builtin ops and target ops leave off.
@ -38,16 +38,16 @@ namespace llvm {
// dp relative address
DPRelativeWrapper,
// cp relative address
CPRelativeWrapper,
// Store word to stack
STWSP,
// Corresponds to retsp instruction
RETSP,
// Corresponds to LADD instruction
LADD,
@ -74,13 +74,14 @@ namespace llvm {
//===--------------------------------------------------------------------===//
// TargetLowering Implementation
//===--------------------------------------------------------------------===//
class XCoreTargetLowering : public TargetLowering
class XCoreTargetLowering : public TargetLowering
{
public:
explicit XCoreTargetLowering(XCoreTargetMachine &TM);
virtual unsigned getJumpTableEncoding() const;
virtual MVT getShiftAmountTy(EVT LHSTy) const { return MVT::i32; }
/// LowerOperation - Provide custom lowering hooks for some operations.
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
@ -91,10 +92,10 @@ namespace llvm {
virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
SelectionDAG &DAG) const;
/// getTargetNodeName - This method returns the name of a target specific
/// getTargetNodeName - This method returns the name of a target specific
// DAG node.
virtual const char *getTargetNodeName(unsigned Opcode) const;
virtual MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *MBB) const;
@ -108,7 +109,7 @@ namespace llvm {
private:
const XCoreTargetMachine &TM;
const XCoreSubtarget &Subtarget;
// Lower Operand helpers
SDValue LowerCCCArguments(SDValue Chain,
CallingConv::ID CallConv,
@ -148,12 +149,12 @@ namespace llvm {
SDValue LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerTRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
// Inline asm support
std::vector<unsigned>
getRegClassForInlineAsmConstraint(const std::string &Constraint,
EVT VT) const;
// Expand specifics
SDValue TryExpandADDWithMul(SDNode *Op, SelectionDAG &DAG) const;
SDValue ExpandADDSUB(SDNode *Op, SelectionDAG &DAG) const;