mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-01-31 20:51:52 +01:00
Reduce dyn_cast<> to isa<> or cast<> where possible.
No functional change intended. llvm-svn: 234586
This commit is contained in:
parent
2bc558bb52
commit
f6149322d4
@ -694,10 +694,9 @@ static void computeKnownBitsFromAssume(Value *V, APInt &KnownZero,
|
|||||||
// We're running this loop for once for each value queried resulting in a
|
// We're running this loop for once for each value queried resulting in a
|
||||||
// runtime of ~O(#assumes * #values).
|
// runtime of ~O(#assumes * #values).
|
||||||
|
|
||||||
assert(isa<IntrinsicInst>(I) &&
|
assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
|
||||||
dyn_cast<IntrinsicInst>(I)->getIntrinsicID() == Intrinsic::assume &&
|
|
||||||
"must be an assume intrinsic");
|
"must be an assume intrinsic");
|
||||||
|
|
||||||
Value *Arg = I->getArgOperand(0);
|
Value *Arg = I->getArgOperand(0);
|
||||||
|
|
||||||
if (Arg == V && isValidAssumeForContext(I, Q)) {
|
if (Arg == V && isValidAssumeForContext(I, Q)) {
|
||||||
|
@ -1984,12 +1984,12 @@ SDValue DAGCombiner::visitMUL(SDNode *N) {
|
|||||||
N0IsConst = isConstantSplatVector(N0.getNode(), ConstValue0);
|
N0IsConst = isConstantSplatVector(N0.getNode(), ConstValue0);
|
||||||
N1IsConst = isConstantSplatVector(N1.getNode(), ConstValue1);
|
N1IsConst = isConstantSplatVector(N1.getNode(), ConstValue1);
|
||||||
} else {
|
} else {
|
||||||
N0IsConst = dyn_cast<ConstantSDNode>(N0) != nullptr;
|
N0IsConst = isa<ConstantSDNode>(N0);
|
||||||
ConstValue0 = N0IsConst ? (dyn_cast<ConstantSDNode>(N0))->getAPIntValue()
|
if (N0IsConst)
|
||||||
: APInt();
|
ConstValue0 = cast<ConstantSDNode>(N0)->getAPIntValue();
|
||||||
N1IsConst = dyn_cast<ConstantSDNode>(N1) != nullptr;
|
N1IsConst = isa<ConstantSDNode>(N1);
|
||||||
ConstValue1 = N1IsConst ? (dyn_cast<ConstantSDNode>(N1))->getAPIntValue()
|
if (N1IsConst)
|
||||||
: APInt();
|
ConstValue1 = cast<ConstantSDNode>(N1)->getAPIntValue();
|
||||||
}
|
}
|
||||||
|
|
||||||
// fold (mul c1, c2) -> c1*c2
|
// fold (mul c1, c2) -> c1*c2
|
||||||
@ -11662,7 +11662,7 @@ SDValue DAGCombiner::visitEXTRACT_SUBVECTOR(SDNode* N) {
|
|||||||
// type.
|
// type.
|
||||||
if (V->getOperand(0).getValueType() != NVT)
|
if (V->getOperand(0).getValueType() != NVT)
|
||||||
return SDValue();
|
return SDValue();
|
||||||
unsigned Idx = dyn_cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
|
unsigned Idx = N->getConstantOperandVal(1);
|
||||||
unsigned NumElems = NVT.getVectorNumElements();
|
unsigned NumElems = NVT.getVectorNumElements();
|
||||||
assert((Idx % NumElems) == 0 &&
|
assert((Idx % NumElems) == 0 &&
|
||||||
"IDX in concat is not a multiple of the result vector length.");
|
"IDX in concat is not a multiple of the result vector length.");
|
||||||
|
@ -626,7 +626,7 @@ SelectionDAGBuilder::LowerStatepoint(ImmutableStatepoint ISP,
|
|||||||
// Add a leading constant argument with the Flags and the calling convention
|
// Add a leading constant argument with the Flags and the calling convention
|
||||||
// masked together
|
// masked together
|
||||||
CallingConv::ID CallConv = CS.getCallingConv();
|
CallingConv::ID CallConv = CS.getCallingConv();
|
||||||
int Flags = dyn_cast<ConstantInt>(CS.getArgument(2))->getZExtValue();
|
int Flags = cast<ConstantInt>(CS.getArgument(2))->getZExtValue();
|
||||||
assert(Flags == 0 && "not expected to be used");
|
assert(Flags == 0 && "not expected to be used");
|
||||||
Ops.push_back(DAG.getTargetConstant(StackMaps::ConstantOp, MVT::i64));
|
Ops.push_back(DAG.getTargetConstant(StackMaps::ConstantOp, MVT::i64));
|
||||||
Ops.push_back(
|
Ops.push_back(
|
||||||
|
@ -316,7 +316,7 @@ void Interpreter::visitICmpInst(ICmpInst &I) {
|
|||||||
|
|
||||||
#define IMPLEMENT_VECTOR_FCMP(OP) \
|
#define IMPLEMENT_VECTOR_FCMP(OP) \
|
||||||
case Type::VectorTyID: \
|
case Type::VectorTyID: \
|
||||||
if(dyn_cast<VectorType>(Ty)->getElementType()->isFloatTy()) { \
|
if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) { \
|
||||||
IMPLEMENT_VECTOR_FCMP_T(OP, Float); \
|
IMPLEMENT_VECTOR_FCMP_T(OP, Float); \
|
||||||
} else { \
|
} else { \
|
||||||
IMPLEMENT_VECTOR_FCMP_T(OP, Double); \
|
IMPLEMENT_VECTOR_FCMP_T(OP, Double); \
|
||||||
@ -363,7 +363,7 @@ static GenericValue executeFCMP_OEQ(GenericValue Src1, GenericValue Src2,
|
|||||||
|
|
||||||
#define MASK_VECTOR_NANS(TY, X,Y, FLAG) \
|
#define MASK_VECTOR_NANS(TY, X,Y, FLAG) \
|
||||||
if (TY->isVectorTy()) { \
|
if (TY->isVectorTy()) { \
|
||||||
if (dyn_cast<VectorType>(TY)->getElementType()->isFloatTy()) { \
|
if (cast<VectorType>(TY)->getElementType()->isFloatTy()) { \
|
||||||
MASK_VECTOR_NANS_T(X, Y, Float, FLAG) \
|
MASK_VECTOR_NANS_T(X, Y, Float, FLAG) \
|
||||||
} else { \
|
} else { \
|
||||||
MASK_VECTOR_NANS_T(X, Y, Double, FLAG) \
|
MASK_VECTOR_NANS_T(X, Y, Double, FLAG) \
|
||||||
@ -536,7 +536,7 @@ static GenericValue executeFCMP_ORD(GenericValue Src1, GenericValue Src2,
|
|||||||
if(Ty->isVectorTy()) {
|
if(Ty->isVectorTy()) {
|
||||||
assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
|
assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
|
||||||
Dest.AggregateVal.resize( Src1.AggregateVal.size() );
|
Dest.AggregateVal.resize( Src1.AggregateVal.size() );
|
||||||
if(dyn_cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
|
if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
|
||||||
for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
|
for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
|
||||||
Dest.AggregateVal[_i].IntVal = APInt(1,
|
Dest.AggregateVal[_i].IntVal = APInt(1,
|
||||||
( (Src1.AggregateVal[_i].FloatVal ==
|
( (Src1.AggregateVal[_i].FloatVal ==
|
||||||
@ -567,7 +567,7 @@ static GenericValue executeFCMP_UNO(GenericValue Src1, GenericValue Src2,
|
|||||||
if(Ty->isVectorTy()) {
|
if(Ty->isVectorTy()) {
|
||||||
assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
|
assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
|
||||||
Dest.AggregateVal.resize( Src1.AggregateVal.size() );
|
Dest.AggregateVal.resize( Src1.AggregateVal.size() );
|
||||||
if(dyn_cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
|
if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
|
||||||
for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
|
for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
|
||||||
Dest.AggregateVal[_i].IntVal = APInt(1,
|
Dest.AggregateVal[_i].IntVal = APInt(1,
|
||||||
( (Src1.AggregateVal[_i].FloatVal !=
|
( (Src1.AggregateVal[_i].FloatVal !=
|
||||||
@ -713,10 +713,10 @@ void Interpreter::visitBinaryOperator(BinaryOperator &I) {
|
|||||||
// Macros to choose appropriate TY: float or double and run operation
|
// Macros to choose appropriate TY: float or double and run operation
|
||||||
// execution
|
// execution
|
||||||
#define FLOAT_VECTOR_OP(OP) { \
|
#define FLOAT_VECTOR_OP(OP) { \
|
||||||
if (dyn_cast<VectorType>(Ty)->getElementType()->isFloatTy()) \
|
if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) \
|
||||||
FLOAT_VECTOR_FUNCTION(OP, FloatVal) \
|
FLOAT_VECTOR_FUNCTION(OP, FloatVal) \
|
||||||
else { \
|
else { \
|
||||||
if (dyn_cast<VectorType>(Ty)->getElementType()->isDoubleTy()) \
|
if (cast<VectorType>(Ty)->getElementType()->isDoubleTy()) \
|
||||||
FLOAT_VECTOR_FUNCTION(OP, DoubleVal) \
|
FLOAT_VECTOR_FUNCTION(OP, DoubleVal) \
|
||||||
else { \
|
else { \
|
||||||
dbgs() << "Unhandled type for OP instruction: " << *Ty << "\n"; \
|
dbgs() << "Unhandled type for OP instruction: " << *Ty << "\n"; \
|
||||||
@ -745,12 +745,12 @@ void Interpreter::visitBinaryOperator(BinaryOperator &I) {
|
|||||||
case Instruction::FMul: FLOAT_VECTOR_OP(*) break;
|
case Instruction::FMul: FLOAT_VECTOR_OP(*) break;
|
||||||
case Instruction::FDiv: FLOAT_VECTOR_OP(/) break;
|
case Instruction::FDiv: FLOAT_VECTOR_OP(/) break;
|
||||||
case Instruction::FRem:
|
case Instruction::FRem:
|
||||||
if (dyn_cast<VectorType>(Ty)->getElementType()->isFloatTy())
|
if (cast<VectorType>(Ty)->getElementType()->isFloatTy())
|
||||||
for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
|
for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
|
||||||
R.AggregateVal[i].FloatVal =
|
R.AggregateVal[i].FloatVal =
|
||||||
fmod(Src1.AggregateVal[i].FloatVal, Src2.AggregateVal[i].FloatVal);
|
fmod(Src1.AggregateVal[i].FloatVal, Src2.AggregateVal[i].FloatVal);
|
||||||
else {
|
else {
|
||||||
if (dyn_cast<VectorType>(Ty)->getElementType()->isDoubleTy())
|
if (cast<VectorType>(Ty)->getElementType()->isDoubleTy())
|
||||||
for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
|
for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
|
||||||
R.AggregateVal[i].DoubleVal =
|
R.AggregateVal[i].DoubleVal =
|
||||||
fmod(Src1.AggregateVal[i].DoubleVal, Src2.AggregateVal[i].DoubleVal);
|
fmod(Src1.AggregateVal[i].DoubleVal, Src2.AggregateVal[i].DoubleVal);
|
||||||
|
@ -2040,7 +2040,7 @@ RecordKeeper::getAllDerivedDefinitions(const std::string &ClassName) const {
|
|||||||
/// to CurRec's name.
|
/// to CurRec's name.
|
||||||
Init *llvm::QualifyName(Record &CurRec, MultiClass *CurMultiClass,
|
Init *llvm::QualifyName(Record &CurRec, MultiClass *CurMultiClass,
|
||||||
Init *Name, const std::string &Scoper) {
|
Init *Name, const std::string &Scoper) {
|
||||||
RecTy *Type = dyn_cast<TypedInit>(Name)->getType();
|
RecTy *Type = cast<TypedInit>(Name)->getType();
|
||||||
|
|
||||||
BinOpInit *NewName =
|
BinOpInit *NewName =
|
||||||
BinOpInit::get(BinOpInit::STRCONCAT,
|
BinOpInit::get(BinOpInit::STRCONCAT,
|
||||||
|
@ -848,7 +848,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
|
|||||||
// MOV X0, WideImmediate
|
// MOV X0, WideImmediate
|
||||||
// LDR X2, [BaseReg, X0]
|
// LDR X2, [BaseReg, X0]
|
||||||
if (isa<ConstantSDNode>(RHS)) {
|
if (isa<ConstantSDNode>(RHS)) {
|
||||||
int64_t ImmOff = (int64_t)dyn_cast<ConstantSDNode>(RHS)->getZExtValue();
|
int64_t ImmOff = (int64_t)cast<ConstantSDNode>(RHS)->getZExtValue();
|
||||||
unsigned Scale = Log2_32(Size);
|
unsigned Scale = Log2_32(Size);
|
||||||
// Skip the immediate can be seleced by load/store addressing mode.
|
// Skip the immediate can be seleced by load/store addressing mode.
|
||||||
// Also skip the immediate can be encoded by a single ADD (SUB is also
|
// Also skip the immediate can be encoded by a single ADD (SUB is also
|
||||||
|
@ -132,7 +132,7 @@ SDNode *BPFDAGToDAGISel::Select(SDNode *Node) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
case ISD::FrameIndex: {
|
case ISD::FrameIndex: {
|
||||||
int FI = dyn_cast<FrameIndexSDNode>(Node)->getIndex();
|
int FI = cast<FrameIndexSDNode>(Node)->getIndex();
|
||||||
EVT VT = Node->getValueType(0);
|
EVT VT = Node->getValueType(0);
|
||||||
SDValue TFI = CurDAG->getTargetFrameIndex(FI, VT);
|
SDValue TFI = CurDAG->getTargetFrameIndex(FI, VT);
|
||||||
unsigned Opc = BPF::MOV_rr;
|
unsigned Opc = BPF::MOV_rr;
|
||||||
|
@ -2106,7 +2106,7 @@ HexagonTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
|
|||||||
// is Big Endian.
|
// is Big Endian.
|
||||||
unsigned OpIdx = NElts - i - 1;
|
unsigned OpIdx = NElts - i - 1;
|
||||||
SDValue Operand = BVN->getOperand(OpIdx);
|
SDValue Operand = BVN->getOperand(OpIdx);
|
||||||
if (dyn_cast<ConstantSDNode>(Operand))
|
if (isa<ConstantSDNode>(Operand))
|
||||||
// This operand is already in ConstVal.
|
// This operand is already in ConstVal.
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
@ -440,7 +440,7 @@ bool MipsFastISel::computeAddress(const Value *Obj, Address &Addr) {
|
|||||||
|
|
||||||
bool MipsFastISel::computeCallAddress(const Value *V, Address &Addr) {
|
bool MipsFastISel::computeCallAddress(const Value *V, Address &Addr) {
|
||||||
const GlobalValue *GV = dyn_cast<GlobalValue>(V);
|
const GlobalValue *GV = dyn_cast<GlobalValue>(V);
|
||||||
if (GV && isa<Function>(GV) && dyn_cast<Function>(GV)->isIntrinsic())
|
if (GV && isa<Function>(GV) && cast<Function>(GV)->isIntrinsic())
|
||||||
return false;
|
return false;
|
||||||
if (!GV)
|
if (!GV)
|
||||||
return false;
|
return false;
|
||||||
|
@ -1765,12 +1765,11 @@ void NVPTXAsmPrinter::bufferLEByte(const Constant *CPV, int Bytes,
|
|||||||
case Type::IntegerTyID: {
|
case Type::IntegerTyID: {
|
||||||
const Type *ETy = CPV->getType();
|
const Type *ETy = CPV->getType();
|
||||||
if (ETy == Type::getInt8Ty(CPV->getContext())) {
|
if (ETy == Type::getInt8Ty(CPV->getContext())) {
|
||||||
unsigned char c =
|
unsigned char c = (unsigned char)cast<ConstantInt>(CPV)->getZExtValue();
|
||||||
(unsigned char)(dyn_cast<ConstantInt>(CPV))->getZExtValue();
|
|
||||||
ptr = &c;
|
ptr = &c;
|
||||||
aggBuffer->addBytes(ptr, 1, Bytes);
|
aggBuffer->addBytes(ptr, 1, Bytes);
|
||||||
} else if (ETy == Type::getInt16Ty(CPV->getContext())) {
|
} else if (ETy == Type::getInt16Ty(CPV->getContext())) {
|
||||||
short int16 = (short)(dyn_cast<ConstantInt>(CPV))->getZExtValue();
|
short int16 = (short)cast<ConstantInt>(CPV)->getZExtValue();
|
||||||
ptr = (unsigned char *)&int16;
|
ptr = (unsigned char *)&int16;
|
||||||
aggBuffer->addBytes(ptr, 2, Bytes);
|
aggBuffer->addBytes(ptr, 2, Bytes);
|
||||||
} else if (ETy == Type::getInt32Ty(CPV->getContext())) {
|
} else if (ETy == Type::getInt32Ty(CPV->getContext())) {
|
||||||
|
@ -3893,7 +3893,7 @@ static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1,
|
|||||||
const SDNode *left = N0.getOperand(0).getNode();
|
const SDNode *left = N0.getOperand(0).getNode();
|
||||||
const SDNode *right = N0.getOperand(1).getNode();
|
const SDNode *right = N0.getOperand(1).getNode();
|
||||||
|
|
||||||
if (dyn_cast<ConstantSDNode>(left) || dyn_cast<ConstantSDNode>(right))
|
if (isa<ConstantSDNode>(left) || isa<ConstantSDNode>(right))
|
||||||
opIsLive = true;
|
opIsLive = true;
|
||||||
|
|
||||||
if (!opIsLive)
|
if (!opIsLive)
|
||||||
|
@ -70,8 +70,8 @@ static void convertTransferToLoop(
|
|||||||
|
|
||||||
// srcAddr and dstAddr are expected to be pointer types,
|
// srcAddr and dstAddr are expected to be pointer types,
|
||||||
// so no check is made here.
|
// so no check is made here.
|
||||||
unsigned srcAS = dyn_cast<PointerType>(srcAddr->getType())->getAddressSpace();
|
unsigned srcAS = cast<PointerType>(srcAddr->getType())->getAddressSpace();
|
||||||
unsigned dstAS = dyn_cast<PointerType>(dstAddr->getType())->getAddressSpace();
|
unsigned dstAS = cast<PointerType>(dstAddr->getType())->getAddressSpace();
|
||||||
|
|
||||||
// Cast pointers to (char *)
|
// Cast pointers to (char *)
|
||||||
srcAddr = builder.CreateBitCast(srcAddr, Type::getInt8PtrTy(Context, srcAS));
|
srcAddr = builder.CreateBitCast(srcAddr, Type::getInt8PtrTy(Context, srcAS));
|
||||||
@ -108,7 +108,7 @@ static void convertMemSetToLoop(Instruction *splitAt, Value *dstAddr,
|
|||||||
origBB->getTerminator()->setSuccessor(0, loopBB);
|
origBB->getTerminator()->setSuccessor(0, loopBB);
|
||||||
IRBuilder<> builder(origBB, origBB->getTerminator());
|
IRBuilder<> builder(origBB, origBB->getTerminator());
|
||||||
|
|
||||||
unsigned dstAS = dyn_cast<PointerType>(dstAddr->getType())->getAddressSpace();
|
unsigned dstAS = cast<PointerType>(dstAddr->getType())->getAddressSpace();
|
||||||
|
|
||||||
// Cast pointer to the type of value getting stored
|
// Cast pointer to the type of value getting stored
|
||||||
dstAddr =
|
dstAddr =
|
||||||
|
@ -345,7 +345,7 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
|
|||||||
unsigned NOps = N->getNumOperands();
|
unsigned NOps = N->getNumOperands();
|
||||||
for (unsigned i = 0; i < NOps; i++) {
|
for (unsigned i = 0; i < NOps; i++) {
|
||||||
// XXX: Why is this here?
|
// XXX: Why is this here?
|
||||||
if (dyn_cast<RegisterSDNode>(N->getOperand(i))) {
|
if (isa<RegisterSDNode>(N->getOperand(i))) {
|
||||||
IsRegSeq = false;
|
IsRegSeq = false;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -358,7 +358,7 @@ def atomic_load_umax_local : local_binary_atomic_op<atomic_load_umax>;
|
|||||||
|
|
||||||
def mskor_global : PatFrag<(ops node:$val, node:$ptr),
|
def mskor_global : PatFrag<(ops node:$val, node:$ptr),
|
||||||
(AMDGPUstore_mskor node:$val, node:$ptr), [{
|
(AMDGPUstore_mskor node:$val, node:$ptr), [{
|
||||||
return dyn_cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
|
return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
|
||||||
}]>;
|
}]>;
|
||||||
|
|
||||||
|
|
||||||
@ -389,7 +389,7 @@ def flat_store : PatFrag<(ops node:$val, node:$ptr),
|
|||||||
|
|
||||||
def mskor_flat : PatFrag<(ops node:$val, node:$ptr),
|
def mskor_flat : PatFrag<(ops node:$val, node:$ptr),
|
||||||
(AMDGPUstore_mskor node:$val, node:$ptr), [{
|
(AMDGPUstore_mskor node:$val, node:$ptr), [{
|
||||||
return dyn_cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::FLAT_ADDRESS;
|
return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::FLAT_ADDRESS;
|
||||||
}]>;
|
}]>;
|
||||||
|
|
||||||
class global_binary_atomic_op<SDNode atomic_op> : PatFrag<
|
class global_binary_atomic_op<SDNode atomic_op> : PatFrag<
|
||||||
|
@ -1811,7 +1811,7 @@ SDValue Swz[4], SelectionDAG &DAG) const {
|
|||||||
|
|
||||||
BuildVector = CompactSwizzlableVector(DAG, BuildVector, SwizzleRemap);
|
BuildVector = CompactSwizzlableVector(DAG, BuildVector, SwizzleRemap);
|
||||||
for (unsigned i = 0; i < 4; i++) {
|
for (unsigned i = 0; i < 4; i++) {
|
||||||
unsigned Idx = dyn_cast<ConstantSDNode>(Swz[i])->getZExtValue();
|
unsigned Idx = cast<ConstantSDNode>(Swz[i])->getZExtValue();
|
||||||
if (SwizzleRemap.find(Idx) != SwizzleRemap.end())
|
if (SwizzleRemap.find(Idx) != SwizzleRemap.end())
|
||||||
Swz[i] = DAG.getConstant(SwizzleRemap[Idx], MVT::i32);
|
Swz[i] = DAG.getConstant(SwizzleRemap[Idx], MVT::i32);
|
||||||
}
|
}
|
||||||
@ -1819,7 +1819,7 @@ SDValue Swz[4], SelectionDAG &DAG) const {
|
|||||||
SwizzleRemap.clear();
|
SwizzleRemap.clear();
|
||||||
BuildVector = ReorganizeVector(DAG, BuildVector, SwizzleRemap);
|
BuildVector = ReorganizeVector(DAG, BuildVector, SwizzleRemap);
|
||||||
for (unsigned i = 0; i < 4; i++) {
|
for (unsigned i = 0; i < 4; i++) {
|
||||||
unsigned Idx = dyn_cast<ConstantSDNode>(Swz[i])->getZExtValue();
|
unsigned Idx = cast<ConstantSDNode>(Swz[i])->getZExtValue();
|
||||||
if (SwizzleRemap.find(Idx) != SwizzleRemap.end())
|
if (SwizzleRemap.find(Idx) != SwizzleRemap.end())
|
||||||
Swz[i] = DAG.getConstant(SwizzleRemap[Idx], MVT::i32);
|
Swz[i] = DAG.getConstant(SwizzleRemap[Idx], MVT::i32);
|
||||||
}
|
}
|
||||||
|
@ -162,7 +162,7 @@ class R600TextureIntrinsicsReplacer :
|
|||||||
Value *SamplerId = I.getArgOperand(2);
|
Value *SamplerId = I.getArgOperand(2);
|
||||||
|
|
||||||
unsigned TextureType =
|
unsigned TextureType =
|
||||||
dyn_cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
|
cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
|
||||||
|
|
||||||
unsigned SrcSelect[4] = { 0, 1, 2, 3 };
|
unsigned SrcSelect[4] = { 0, 1, 2, 3 };
|
||||||
unsigned CT[4] = {1, 1, 1, 1};
|
unsigned CT[4] = {1, 1, 1, 1};
|
||||||
@ -186,7 +186,7 @@ class R600TextureIntrinsicsReplacer :
|
|||||||
Value *SamplerId = I.getArgOperand(5);
|
Value *SamplerId = I.getArgOperand(5);
|
||||||
|
|
||||||
unsigned TextureType =
|
unsigned TextureType =
|
||||||
dyn_cast<ConstantInt>(I.getArgOperand(6))->getZExtValue();
|
cast<ConstantInt>(I.getArgOperand(6))->getZExtValue();
|
||||||
|
|
||||||
unsigned SrcSelect[4] = { 0, 1, 2, 3 };
|
unsigned SrcSelect[4] = { 0, 1, 2, 3 };
|
||||||
unsigned CT[4] = {1, 1, 1, 1};
|
unsigned CT[4] = {1, 1, 1, 1};
|
||||||
|
@ -11970,7 +11970,7 @@ static SDValue LowerZERO_EXTEND_AVX512(SDValue Op,
|
|||||||
// Now we have only mask extension
|
// Now we have only mask extension
|
||||||
assert(InVT.getVectorElementType() == MVT::i1);
|
assert(InVT.getVectorElementType() == MVT::i1);
|
||||||
SDValue Cst = DAG.getTargetConstant(1, ExtVT.getScalarType());
|
SDValue Cst = DAG.getTargetConstant(1, ExtVT.getScalarType());
|
||||||
const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
|
const Constant *C = cast<ConstantSDNode>(Cst)->getConstantIntValue();
|
||||||
SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
|
SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
|
||||||
unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
|
unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
|
||||||
SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
|
SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
|
||||||
@ -12046,7 +12046,7 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
SDValue Cst = DAG.getTargetConstant(1, InVT.getVectorElementType());
|
SDValue Cst = DAG.getTargetConstant(1, InVT.getVectorElementType());
|
||||||
const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
|
const Constant *C = cast<ConstantSDNode>(Cst)->getConstantIntValue();
|
||||||
SDValue CP = DAG.getConstantPool(C, getPointerTy());
|
SDValue CP = DAG.getConstantPool(C, getPointerTy());
|
||||||
unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
|
unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
|
||||||
SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
|
SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
|
||||||
@ -15287,10 +15287,8 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
|
|||||||
}
|
}
|
||||||
case PREFETCH: {
|
case PREFETCH: {
|
||||||
SDValue Hint = Op.getOperand(6);
|
SDValue Hint = Op.getOperand(6);
|
||||||
unsigned HintVal;
|
unsigned HintVal = cast<ConstantSDNode>(Hint)->getZExtValue();
|
||||||
if (dyn_cast<ConstantSDNode> (Hint) == nullptr ||
|
assert(HintVal < 2 && "Wrong prefetch hint in intrinsic: should be 0 or 1");
|
||||||
(HintVal = dyn_cast<ConstantSDNode> (Hint)->getZExtValue()) > 1)
|
|
||||||
llvm_unreachable("Wrong prefetch hint in intrinsic: should be 0 or 1");
|
|
||||||
unsigned Opcode = (HintVal ? IntrData->Opc1 : IntrData->Opc0);
|
unsigned Opcode = (HintVal ? IntrData->Opc1 : IntrData->Opc0);
|
||||||
SDValue Chain = Op.getOperand(0);
|
SDValue Chain = Op.getOperand(0);
|
||||||
SDValue Mask = Op.getOperand(2);
|
SDValue Mask = Op.getOperand(2);
|
||||||
@ -24242,7 +24240,7 @@ TargetLowering::ConstraintWeight
|
|||||||
break;
|
break;
|
||||||
case 'G':
|
case 'G':
|
||||||
case 'C':
|
case 'C':
|
||||||
if (dyn_cast<ConstantFP>(CallOperandVal)) {
|
if (isa<ConstantFP>(CallOperandVal)) {
|
||||||
weight = CW_Constant;
|
weight = CW_Constant;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
@ -631,53 +631,53 @@ def vinsert256_insert : PatFrag<(ops node:$bigvec, node:$smallvec,
|
|||||||
|
|
||||||
def masked_load_aligned128 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
|
def masked_load_aligned128 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
|
||||||
(masked_load node:$src1, node:$src2, node:$src3), [{
|
(masked_load node:$src1, node:$src2, node:$src3), [{
|
||||||
if (dyn_cast<MaskedLoadSDNode>(N))
|
if (auto *Load = dyn_cast<MaskedLoadSDNode>(N))
|
||||||
return cast<MaskedLoadSDNode>(N)->getAlignment() >= 16;
|
return Load->getAlignment() >= 16;
|
||||||
return false;
|
return false;
|
||||||
}]>;
|
}]>;
|
||||||
|
|
||||||
def masked_load_aligned256 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
|
def masked_load_aligned256 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
|
||||||
(masked_load node:$src1, node:$src2, node:$src3), [{
|
(masked_load node:$src1, node:$src2, node:$src3), [{
|
||||||
if (dyn_cast<MaskedLoadSDNode>(N))
|
if (auto *Load = dyn_cast<MaskedLoadSDNode>(N))
|
||||||
return cast<MaskedLoadSDNode>(N)->getAlignment() >= 32;
|
return Load->getAlignment() >= 32;
|
||||||
return false;
|
return false;
|
||||||
}]>;
|
}]>;
|
||||||
|
|
||||||
def masked_load_aligned512 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
|
def masked_load_aligned512 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
|
||||||
(masked_load node:$src1, node:$src2, node:$src3), [{
|
(masked_load node:$src1, node:$src2, node:$src3), [{
|
||||||
if (dyn_cast<MaskedLoadSDNode>(N))
|
if (auto *Load = dyn_cast<MaskedLoadSDNode>(N))
|
||||||
return cast<MaskedLoadSDNode>(N)->getAlignment() >= 64;
|
return Load->getAlignment() >= 64;
|
||||||
return false;
|
return false;
|
||||||
}]>;
|
}]>;
|
||||||
|
|
||||||
def masked_load_unaligned : PatFrag<(ops node:$src1, node:$src2, node:$src3),
|
def masked_load_unaligned : PatFrag<(ops node:$src1, node:$src2, node:$src3),
|
||||||
(masked_load node:$src1, node:$src2, node:$src3), [{
|
(masked_load node:$src1, node:$src2, node:$src3), [{
|
||||||
return (dyn_cast<MaskedLoadSDNode>(N) != 0);
|
return isa<MaskedLoadSDNode>(N);
|
||||||
}]>;
|
}]>;
|
||||||
|
|
||||||
def masked_store_aligned128 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
|
def masked_store_aligned128 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
|
||||||
(masked_store node:$src1, node:$src2, node:$src3), [{
|
(masked_store node:$src1, node:$src2, node:$src3), [{
|
||||||
if (dyn_cast<MaskedStoreSDNode>(N))
|
if (auto *Store = dyn_cast<MaskedStoreSDNode>(N))
|
||||||
return cast<MaskedStoreSDNode>(N)->getAlignment() >= 16;
|
return Store->getAlignment() >= 16;
|
||||||
return false;
|
return false;
|
||||||
}]>;
|
}]>;
|
||||||
|
|
||||||
def masked_store_aligned256 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
|
def masked_store_aligned256 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
|
||||||
(masked_store node:$src1, node:$src2, node:$src3), [{
|
(masked_store node:$src1, node:$src2, node:$src3), [{
|
||||||
if (dyn_cast<MaskedStoreSDNode>(N))
|
if (auto *Store = dyn_cast<MaskedStoreSDNode>(N))
|
||||||
return cast<MaskedStoreSDNode>(N)->getAlignment() >= 32;
|
return Store->getAlignment() >= 32;
|
||||||
return false;
|
return false;
|
||||||
}]>;
|
}]>;
|
||||||
|
|
||||||
def masked_store_aligned512 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
|
def masked_store_aligned512 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
|
||||||
(masked_store node:$src1, node:$src2, node:$src3), [{
|
(masked_store node:$src1, node:$src2, node:$src3), [{
|
||||||
if (dyn_cast<MaskedStoreSDNode>(N))
|
if (auto *Store = dyn_cast<MaskedStoreSDNode>(N))
|
||||||
return cast<MaskedStoreSDNode>(N)->getAlignment() >= 64;
|
return Store->getAlignment() >= 64;
|
||||||
return false;
|
return false;
|
||||||
}]>;
|
}]>;
|
||||||
|
|
||||||
def masked_store_unaligned : PatFrag<(ops node:$src1, node:$src2, node:$src3),
|
def masked_store_unaligned : PatFrag<(ops node:$src1, node:$src2, node:$src3),
|
||||||
(masked_store node:$src1, node:$src2, node:$src3), [{
|
(masked_store node:$src1, node:$src2, node:$src3), [{
|
||||||
return (dyn_cast<MaskedStoreSDNode>(N) != 0);
|
return isa<MaskedStoreSDNode>(N);
|
||||||
}]>;
|
}]>;
|
||||||
|
|
||||||
|
@ -1053,7 +1053,7 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
|
|||||||
// path is rarely taken. This seems to be the case for SPEC benchmarks.
|
// path is rarely taken. This seems to be the case for SPEC benchmarks.
|
||||||
TerminatorInst *CheckTerm = SplitBlockAndInsertIfThen(
|
TerminatorInst *CheckTerm = SplitBlockAndInsertIfThen(
|
||||||
Cmp, InsertBefore, false, MDBuilder(*C).createBranchWeights(1, 100000));
|
Cmp, InsertBefore, false, MDBuilder(*C).createBranchWeights(1, 100000));
|
||||||
assert(dyn_cast<BranchInst>(CheckTerm)->isUnconditional());
|
assert(cast<BranchInst>(CheckTerm)->isUnconditional());
|
||||||
BasicBlock *NextBB = CheckTerm->getSuccessor(0);
|
BasicBlock *NextBB = CheckTerm->getSuccessor(0);
|
||||||
IRB.SetInsertPoint(CheckTerm);
|
IRB.SetInsertPoint(CheckTerm);
|
||||||
Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeSize);
|
Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeSize);
|
||||||
|
@ -308,7 +308,7 @@ void SanitizerCoverageModule::InjectCoverageForIndirectCalls(
|
|||||||
IRBuilder<> IRB(I);
|
IRBuilder<> IRB(I);
|
||||||
CallSite CS(I);
|
CallSite CS(I);
|
||||||
Value *Callee = CS.getCalledValue();
|
Value *Callee = CS.getCalledValue();
|
||||||
if (dyn_cast<InlineAsm>(Callee)) continue;
|
if (isa<InlineAsm>(Callee)) continue;
|
||||||
GlobalVariable *CalleeCache = new GlobalVariable(
|
GlobalVariable *CalleeCache = new GlobalVariable(
|
||||||
*F.getParent(), Ty, false, GlobalValue::PrivateLinkage,
|
*F.getParent(), Ty, false, GlobalValue::PrivateLinkage,
|
||||||
Constant::getNullValue(Ty), "__sancov_gen_callee_cache");
|
Constant::getNullValue(Ty), "__sancov_gen_callee_cache");
|
||||||
|
@ -1183,7 +1183,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) {
|
|||||||
case Instruction::ICmp:
|
case Instruction::ICmp:
|
||||||
case Instruction::FCmp: {
|
case Instruction::FCmp: {
|
||||||
// Check that all of the compares have the same predicate.
|
// Check that all of the compares have the same predicate.
|
||||||
CmpInst::Predicate P0 = dyn_cast<CmpInst>(VL0)->getPredicate();
|
CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
|
||||||
Type *ComparedTy = cast<Instruction>(VL[0])->getOperand(0)->getType();
|
Type *ComparedTy = cast<Instruction>(VL[0])->getOperand(0)->getType();
|
||||||
for (unsigned i = 1, e = VL.size(); i < e; ++i) {
|
for (unsigned i = 1, e = VL.size(); i < e; ++i) {
|
||||||
CmpInst *Cmp = cast<CmpInst>(VL[i]);
|
CmpInst *Cmp = cast<CmpInst>(VL[i]);
|
||||||
@ -2202,7 +2202,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
|
|||||||
if (Value *V = alreadyVectorized(E->Scalars))
|
if (Value *V = alreadyVectorized(E->Scalars))
|
||||||
return V;
|
return V;
|
||||||
|
|
||||||
CmpInst::Predicate P0 = dyn_cast<CmpInst>(VL0)->getPredicate();
|
CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
|
||||||
Value *V;
|
Value *V;
|
||||||
if (Opcode == Instruction::FCmp)
|
if (Opcode == Instruction::FCmp)
|
||||||
V = Builder.CreateFCmp(P0, L, R);
|
V = Builder.CreateFCmp(P0, L, R);
|
||||||
|
@ -117,7 +117,7 @@ static void dumpNode( yaml::Node *n
|
|||||||
outs() << indent(Indent) << "}";
|
outs() << indent(Indent) << "}";
|
||||||
} else if (yaml::AliasNode *an = dyn_cast<yaml::AliasNode>(n)){
|
} else if (yaml::AliasNode *an = dyn_cast<yaml::AliasNode>(n)){
|
||||||
outs() << "*" << an->getName();
|
outs() << "*" << an->getName();
|
||||||
} else if (dyn_cast<yaml::NullNode>(n)) {
|
} else if (isa<yaml::NullNode>(n)) {
|
||||||
outs() << prettyTag(n) << " null";
|
outs() << prettyTag(n) << " null";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user