1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 10:42:39 +01:00

progress making the world safe to ConstantDataVector. While

we're at it, allow PatternMatch's "neg" pattern to match integer
vector negations, and enhance ComputeNumSigned bits to handle
shl of vectors.

llvm-svn: 149082
This commit is contained in:
Chris Lattner 2012-01-26 21:37:55 +00:00
parent bed3b11d94
commit ba1715c058
4 changed files with 93 additions and 64 deletions

View File

@ -98,12 +98,19 @@ struct apint_match {
Res = &CI->getValue();
return true;
}
// FIXME: Remove this.
if (ConstantVector *CV = dyn_cast<ConstantVector>(V))
if (ConstantInt *CI =
dyn_cast_or_null<ConstantInt>(CV->getSplatValue())) {
Res = &CI->getValue();
return true;
}
if (ConstantDataVector *CV = dyn_cast<ConstantDataVector>(V))
if (ConstantInt *CI =
dyn_cast_or_null<ConstantInt>(CV->getSplatValue())) {
Res = &CI->getValue();
return true;
}
return false;
}
};
@ -144,9 +151,13 @@ struct cst_pred_ty : public Predicate {
bool match(ITy *V) {
if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
return this->isValue(CI->getValue());
// FIXME: Remove this.
if (const ConstantVector *CV = dyn_cast<ConstantVector>(V))
if (ConstantInt *CI = dyn_cast_or_null<ConstantInt>(CV->getSplatValue()))
return this->isValue(CI->getValue());
if (const ConstantDataVector *CV = dyn_cast<ConstantDataVector>(V))
if (ConstantInt *CI = dyn_cast_or_null<ConstantInt>(CV->getSplatValue()))
return this->isValue(CI->getValue());
return false;
}
};
@ -164,12 +175,22 @@ struct api_pred_ty : public Predicate {
Res = &CI->getValue();
return true;
}
// FIXME: remove.
if (const ConstantVector *CV = dyn_cast<ConstantVector>(V))
if (ConstantInt *CI = dyn_cast_or_null<ConstantInt>(CV->getSplatValue()))
if (this->isValue(CI->getValue())) {
Res = &CI->getValue();
return true;
}
if (const ConstantDataVector *CV = dyn_cast<ConstantDataVector>(V))
if (ConstantInt *CI = dyn_cast_or_null<ConstantInt>(CV->getSplatValue()))
if (this->isValue(CI->getValue())) {
Res = &CI->getValue();
return true;
}
return false;
}
};
@ -611,11 +632,11 @@ struct not_match {
}
private:
bool matchIfNot(Value *LHS, Value *RHS) {
if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS))
return CI->isAllOnesValue() && L.match(LHS);
if (ConstantVector *CV = dyn_cast<ConstantVector>(RHS))
return CV->isAllOnesValue() && L.match(LHS);
return false;
return (isa<ConstantInt>(RHS) || isa<ConstantDataVector>(RHS) ||
// FIXME: Remove CV.
isa<ConstantVector>(RHS)) &&
cast<Constant>(RHS)->isAllOnesValue() &&
L.match(LHS);
}
};
@ -638,9 +659,9 @@ struct neg_match {
}
private:
bool matchIfNeg(Value *LHS, Value *RHS) {
if (ConstantInt *C = dyn_cast<ConstantInt>(LHS))
return C->isZero() && L.match(RHS);
return false;
return ((isa<ConstantInt>(LHS) && cast<ConstantInt>(LHS)->isZero()) ||
isa<ConstantAggregateZero>(LHS)) &&
L.match(RHS);
}
};

View File

@ -65,17 +65,17 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
}
// If this is a bitcast from constant vector -> vector, fold it.
ConstantVector *CV = dyn_cast<ConstantVector>(C);
if (CV == 0)
// FIXME: Remove ConstantVector support.
if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C))
return ConstantExpr::getBitCast(C, DestTy);
// If the element types match, VMCore can fold it.
unsigned NumDstElt = DestVTy->getNumElements();
unsigned NumSrcElt = CV->getNumOperands();
unsigned NumSrcElt = C->getType()->getVectorNumElements();
if (NumDstElt == NumSrcElt)
return ConstantExpr::getBitCast(C, DestTy);
Type *SrcEltTy = CV->getType()->getElementType();
Type *SrcEltTy = C->getType()->getVectorElementType();
Type *DstEltTy = DestVTy->getElementType();
// Otherwise, we're changing the number of elements in a vector, which
@ -95,7 +95,6 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt);
// Recursively handle this integer conversion, if possible.
C = FoldBitCast(C, DestIVTy, TD);
if (!C) return ConstantExpr::getBitCast(C, DestTy);
// Finally, VMCore can handle this now that #elts line up.
return ConstantExpr::getBitCast(C, DestTy);
@ -109,8 +108,9 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElt);
// Ask VMCore to do the conversion now that #elts line up.
C = ConstantExpr::getBitCast(C, SrcIVTy);
CV = dyn_cast<ConstantVector>(C);
if (!CV) // If VMCore wasn't able to fold it, bail out.
// If VMCore wasn't able to fold it, bail out.
if (!isa<ConstantVector>(C) && // FIXME: Remove ConstantVector.
!isa<ConstantDataVector>(C))
return C;
}
@ -132,7 +132,7 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
Constant *Elt = Zero;
unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
for (unsigned j = 0; j != Ratio; ++j) {
Constant *Src = dyn_cast<ConstantInt>(CV->getOperand(SrcElt++));
Constant *Src =dyn_cast<ConstantInt>(C->getAggregateElement(SrcElt++));
if (!Src) // Reject constantexpr elements.
return ConstantExpr::getBitCast(C, DestTy);
@ -149,28 +149,29 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
}
Result.push_back(Elt);
}
} else {
// Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
unsigned Ratio = NumDstElt/NumSrcElt;
unsigned DstBitSize = DstEltTy->getPrimitiveSizeInBits();
return ConstantVector::get(Result);
}
// Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
unsigned Ratio = NumDstElt/NumSrcElt;
unsigned DstBitSize = DstEltTy->getPrimitiveSizeInBits();
// Loop over each source value, expanding into multiple results.
for (unsigned i = 0; i != NumSrcElt; ++i) {
Constant *Src = dyn_cast<ConstantInt>(C->getAggregateElement(i));
if (!Src) // Reject constantexpr elements.
return ConstantExpr::getBitCast(C, DestTy);
// Loop over each source value, expanding into multiple results.
for (unsigned i = 0; i != NumSrcElt; ++i) {
Constant *Src = dyn_cast<ConstantInt>(CV->getOperand(i));
if (!Src) // Reject constantexpr elements.
return ConstantExpr::getBitCast(C, DestTy);
unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
for (unsigned j = 0; j != Ratio; ++j) {
// Shift the piece of the value into the right place, depending on
// endianness.
Constant *Elt = ConstantExpr::getLShr(Src,
ConstantInt::get(Src->getType(), ShiftAmt));
ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
for (unsigned j = 0; j != Ratio; ++j) {
// Shift the piece of the value into the right place, depending on
// endianness.
Constant *Elt = ConstantExpr::getLShr(Src,
ConstantInt::get(Src->getType(), ShiftAmt));
ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
// Truncate and remember this piece.
Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
}
// Truncate and remember this piece.
Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
}
}
@ -311,6 +312,7 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
// not reached.
}
// FIXME: Remove ConstantVector
if (isa<ConstantArray>(C) || isa<ConstantVector>(C) ||
isa<ConstantDataSequential>(C)) {
Type *EltTy = cast<SequentialType>(C->getType())->getElementType();
@ -1115,11 +1117,8 @@ static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
/// available for the result. Returns null if the conversion cannot be
/// performed, otherwise returns the Constant value resulting from the
/// conversion.
static Constant *ConstantFoldConvertToInt(ConstantFP *Op, bool roundTowardZero,
Type *Ty) {
assert(Op && "Called with NULL operand");
APFloat Val(Op->getValueAPF());
static Constant *ConstantFoldConvertToInt(const APFloat &Val,
bool roundTowardZero, Type *Ty) {
// All of these conversion intrinsics form an integer of at most 64bits.
unsigned ResultWidth = cast<IntegerType>(Ty)->getBitWidth();
assert(ResultWidth <= 64 &&
@ -1271,24 +1270,31 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
}
}
if (ConstantVector *Op = dyn_cast<ConstantVector>(Operands[0])) {
// Support ConstantVector in case we have an Undef in the top.
if (isa<ConstantVector>(Operands[0]) ||
isa<ConstantDataVector>(Operands[0])) {
Constant *Op = cast<Constant>(Operands[0]);
switch (F->getIntrinsicID()) {
default: break;
case Intrinsic::x86_sse_cvtss2si:
case Intrinsic::x86_sse_cvtss2si64:
case Intrinsic::x86_sse2_cvtsd2si:
case Intrinsic::x86_sse2_cvtsd2si64:
if (ConstantFP *FPOp = dyn_cast<ConstantFP>(Op->getOperand(0)))
return ConstantFoldConvertToInt(FPOp, /*roundTowardZero=*/false, Ty);
if (ConstantFP *FPOp =
dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
return ConstantFoldConvertToInt(FPOp->getValueAPF(),
/*roundTowardZero=*/false, Ty);
case Intrinsic::x86_sse_cvttss2si:
case Intrinsic::x86_sse_cvttss2si64:
case Intrinsic::x86_sse2_cvttsd2si:
case Intrinsic::x86_sse2_cvttsd2si64:
if (ConstantFP *FPOp = dyn_cast<ConstantFP>(Op->getOperand(0)))
return ConstantFoldConvertToInt(FPOp, /*roundTowardZero=*/true, Ty);
if (ConstantFP *FPOp =
dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
return ConstantFoldConvertToInt(FPOp->getValueAPF(),
/*roundTowardZero=*/true, Ty);
}
}
if (isa<UndefValue>(Operands[0])) {
if (F->getIntrinsicID() == Intrinsic::bswap)
return Operands[0];

View File

@ -89,6 +89,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
}
// Handle a constant vector by taking the intersection of the known bits of
// each element.
// FIXME: Remove.
if (ConstantVector *CV = dyn_cast<ConstantVector>(V)) {
KnownZero.setAllBits(); KnownOne.setAllBits();
for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
@ -1005,30 +1006,28 @@ unsigned llvm::ComputeNumSignBits(Value *V, const TargetData *TD,
Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
return ComputeNumSignBits(U->getOperand(0), TD, Depth+1) + Tmp;
case Instruction::AShr:
case Instruction::AShr: {
Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1);
// ashr X, C -> adds C sign bits.
if (ConstantInt *C = dyn_cast<ConstantInt>(U->getOperand(1))) {
Tmp += C->getZExtValue();
// ashr X, C -> adds C sign bits. Vectors too.
const APInt *ShAmt;
if (match(U->getOperand(1), m_APInt(ShAmt))) {
Tmp += ShAmt->getZExtValue();
if (Tmp > TyBits) Tmp = TyBits;
}
// vector ashr X, <C, C, C, C> -> adds C sign bits
if (ConstantVector *C = dyn_cast<ConstantVector>(U->getOperand(1))) {
if (ConstantInt *CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue())) {
Tmp += CI->getZExtValue();
if (Tmp > TyBits) Tmp = TyBits;
}
}
return Tmp;
case Instruction::Shl:
if (ConstantInt *C = dyn_cast<ConstantInt>(U->getOperand(1))) {
}
case Instruction::Shl: {
const APInt *ShAmt;
if (match(U->getOperand(1), m_APInt(ShAmt))) {
// shl destroys sign bits.
Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1);
if (C->getZExtValue() >= TyBits || // Bad shift.
C->getZExtValue() >= Tmp) break; // Shifted all sign bits out.
return Tmp - C->getZExtValue();
Tmp2 = ShAmt->getZExtValue();
if (Tmp2 >= TyBits || // Bad shift.
Tmp2 >= Tmp) break; // Shifted all sign bits out.
return Tmp - Tmp2;
}
break;
}
case Instruction::And:
case Instruction::Or:
case Instruction::Xor: // NOT is handled here.

View File

@ -655,9 +655,12 @@ static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
if (A->getType() == B->getType()) return false;
// For now, only support constants with the same size.
if (TD->getTypeStoreSize(A->getType()) != TD->getTypeStoreSize(B->getType()))
uint64_t StoreSize = TD->getTypeStoreSize(A->getType());
if (StoreSize != TD->getTypeStoreSize(B->getType()) ||
StoreSize > 128)
return false;
// If a floating-point value and an integer value have the same encoding,
// they can share a constant-pool entry.
if (const ConstantFP *AFP = dyn_cast<ConstantFP>(A))