mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-24 11:42:57 +01:00
[InstCombine] use m_APInt to allow (X << C) >>u C --> X & (-1 >>u C) with splat vectors
llvm-svn: 293208
This commit is contained in:
parent
b7932e32fe
commit
afb8de4915
@ -353,29 +353,37 @@ foldShiftByConstOfShiftByConst(BinaryOperator &I, const APInt *COp1,
|
||||
// Combinations of right and left shifts will still be optimized in
|
||||
// DAGCombine where scalar evolution no longer applies.
|
||||
|
||||
Value *X = ShiftOp->getOperand(0);
|
||||
unsigned ShiftAmt1 = ShAmt1->getLimitedValue();
|
||||
unsigned ShiftAmt2 = COp1->getLimitedValue();
|
||||
assert(ShiftAmt2 != 0 && "Should have been simplified earlier");
|
||||
if (ShiftAmt1 == 0)
|
||||
return nullptr; // Will be simplified in the future.
|
||||
|
||||
if (ShiftAmt1 == ShiftAmt2) {
|
||||
// FIXME: This repeats a fold that exists in foldShiftedShift(), but we're
|
||||
// not handling the related fold here:
|
||||
// (X >>u C) << C --> X & (-1 << C).
|
||||
// foldShiftedShift() is always called before this, but it is restricted to
|
||||
// only handle cases where the ShiftOp has one use. We don't have that
|
||||
// restriction here.
|
||||
if (I.getOpcode() != Instruction::LShr ||
|
||||
ShiftOp->getOpcode() != Instruction::Shl)
|
||||
return nullptr;
|
||||
|
||||
// (X << C) >>u C --> X & (-1 >>u C).
|
||||
APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt1));
|
||||
return BinaryOperator::CreateAnd(X, ConstantInt::get(I.getType(), Mask));
|
||||
}
|
||||
|
||||
// FIXME: Everything under here should be extended to work with vector types.
|
||||
|
||||
auto *ShiftAmt1C = dyn_cast<ConstantInt>(ShiftOp->getOperand(1));
|
||||
if (!ShiftAmt1C)
|
||||
return nullptr;
|
||||
|
||||
uint32_t ShiftAmt1 = ShiftAmt1C->getLimitedValue(TypeBits);
|
||||
uint32_t ShiftAmt2 = COp1->getLimitedValue(TypeBits);
|
||||
assert(ShiftAmt2 != 0 && "Should have been simplified earlier");
|
||||
if (ShiftAmt1 == 0)
|
||||
return nullptr; // Will be simplified in the future.
|
||||
|
||||
Value *X = ShiftOp->getOperand(0);
|
||||
IntegerType *Ty = cast<IntegerType>(I.getType());
|
||||
if (ShiftAmt1 == ShiftAmt2) {
|
||||
// If we have ((X << C) >>u C), turn this into X & (-1 >>u C).
|
||||
if (I.getOpcode() == Instruction::LShr &&
|
||||
ShiftOp->getOpcode() == Instruction::Shl) {
|
||||
APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt1));
|
||||
return BinaryOperator::CreateAnd(X,
|
||||
ConstantInt::get(I.getContext(), Mask));
|
||||
}
|
||||
} else if (ShiftAmt1 < ShiftAmt2) {
|
||||
if (ShiftAmt1 < ShiftAmt2) {
|
||||
uint32_t ShiftDiff = ShiftAmt2 - ShiftAmt1;
|
||||
|
||||
// (X >>?,exact C1) << C2 --> X << (C2-C1)
|
||||
|
@ -444,12 +444,12 @@ define i44 @shl_lshr_eq_amt_multi_use(i44 %A) {
|
||||
ret i44 %D
|
||||
}
|
||||
|
||||
; FIXME: Fold vector lshr (shl X, C), C -> and X, C' regardless of the number of uses of the shl.
|
||||
; Fold vector lshr (shl X, C), C -> and X, C' regardless of the number of uses of the shl.
|
||||
|
||||
define <2 x i44> @shl_lshr_eq_amt_multi_use_splat_vec(<2 x i44> %A) {
|
||||
; CHECK-LABEL: @shl_lshr_eq_amt_multi_use_splat_vec(
|
||||
; CHECK-NEXT: [[B:%.*]] = shl <2 x i44> %A, <i44 33, i44 33>
|
||||
; CHECK-NEXT: [[C:%.*]] = lshr exact <2 x i44> [[B]], <i44 33, i44 33>
|
||||
; CHECK-NEXT: [[C:%.*]] = and <2 x i44> %A, <i44 2047, i44 2047>
|
||||
; CHECK-NEXT: [[D:%.*]] = or <2 x i44> [[B]], [[C]]
|
||||
; CHECK-NEXT: ret <2 x i44> [[D]]
|
||||
;
|
||||
|
Loading…
Reference in New Issue
Block a user