mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 02:33:06 +01:00
[InstSimplify] Treat invariant group insts as bitcasts for load operands
We can look through invariant group intrinsics for the purposes of simplifying the result of a load. Since intrinsics can't be constants, but we also don't want to completely rewrite load constant folding, we convert the load operand to a constant. For GEPs and bitcasts we just treat them as constants. For invariant group intrinsics, we treat them as a bitcast. Reviewed By: lebedev.ri Differential Revision: https://reviews.llvm.org/D101103
This commit is contained in:
parent
15110c5b3d
commit
3b6a5ff6b4
@ -5819,6 +5819,74 @@ Value *llvm::SimplifyFreezeInst(Value *Op0, const SimplifyQuery &Q) {
|
||||
return ::SimplifyFreezeInst(Op0, Q);
|
||||
}
|
||||
|
||||
static Constant *ConstructLoadOperandConstant(Value *Op) {
|
||||
SmallVector<Value *, 4> Worklist;
|
||||
Worklist.push_back(Op);
|
||||
while (true) {
|
||||
Value *CurOp = Worklist.back();
|
||||
if (isa<Constant>(CurOp))
|
||||
break;
|
||||
if (auto *BC = dyn_cast<BitCastOperator>(CurOp)) {
|
||||
Worklist.push_back(BC->getOperand(0));
|
||||
} else if (auto *GEP = dyn_cast<GEPOperator>(CurOp)) {
|
||||
for (unsigned I = 1; I != GEP->getNumOperands(); ++I) {
|
||||
if (!isa<Constant>(GEP->getOperand(I)))
|
||||
return nullptr;
|
||||
}
|
||||
Worklist.push_back(GEP->getOperand(0));
|
||||
} else if (auto *II = dyn_cast<IntrinsicInst>(CurOp)) {
|
||||
if (II->isLaunderOrStripInvariantGroup())
|
||||
Worklist.push_back(II->getOperand(0));
|
||||
else
|
||||
return nullptr;
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
Constant *NewOp = cast<Constant>(Worklist.pop_back_val());
|
||||
while (!Worklist.empty()) {
|
||||
Value *CurOp = Worklist.pop_back_val();
|
||||
if (isa<BitCastOperator>(CurOp)) {
|
||||
NewOp = ConstantExpr::getBitCast(NewOp, CurOp->getType());
|
||||
} else if (auto *GEP = dyn_cast<GEPOperator>(CurOp)) {
|
||||
SmallVector<Constant *> Idxs;
|
||||
Idxs.reserve(GEP->getNumOperands() - 1);
|
||||
for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I) {
|
||||
Idxs.push_back(cast<Constant>(GEP->getOperand(I)));
|
||||
}
|
||||
NewOp = ConstantExpr::getGetElementPtr(GEP->getSourceElementType(), NewOp,
|
||||
Idxs, GEP->isInBounds(),
|
||||
GEP->getInRangeIndex());
|
||||
} else {
|
||||
assert(isa<IntrinsicInst>(CurOp) &&
|
||||
cast<IntrinsicInst>(CurOp)->isLaunderOrStripInvariantGroup() &&
|
||||
"expected invariant group intrinsic");
|
||||
NewOp = ConstantExpr::getBitCast(NewOp, CurOp->getType());
|
||||
}
|
||||
}
|
||||
return NewOp;
|
||||
}
|
||||
|
||||
static Value *SimplifyLoadInst(LoadInst *LI, const SimplifyQuery &Q) {
|
||||
if (LI->isVolatile())
|
||||
return nullptr;
|
||||
|
||||
if (auto *C = ConstantFoldInstruction(LI, Q.DL))
|
||||
return C;
|
||||
|
||||
// The following only catches more cases than ConstantFoldInstruction() if the
|
||||
// load operand wasn't a constant. Specifically, invariant.group intrinsics.
|
||||
if (isa<Constant>(LI->getPointerOperand()))
|
||||
return nullptr;
|
||||
|
||||
if (auto *C = dyn_cast_or_null<Constant>(
|
||||
ConstructLoadOperandConstant(LI->getPointerOperand())))
|
||||
return ConstantFoldLoadFromConstPtr(C, LI->getType(), Q.DL);
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
/// See if we can compute a simplified version of this instruction.
|
||||
/// If not, this returns null.
|
||||
|
||||
@ -5975,6 +6043,9 @@ Value *llvm::SimplifyInstruction(Instruction *I, const SimplifyQuery &SQ,
|
||||
// No simplifications for Alloca and it can't be constant folded.
|
||||
Result = nullptr;
|
||||
break;
|
||||
case Instruction::Load:
|
||||
Result = SimplifyLoadInst(cast<LoadInst>(I), Q);
|
||||
break;
|
||||
}
|
||||
|
||||
/// If called on unreachable code, the above logic may report that the
|
||||
|
@ -9,11 +9,7 @@ declare i8* @llvm.launder.invariant.group.p0i8(i8* %p)
|
||||
|
||||
define i64 @f() {
|
||||
; CHECK-LABEL: @f(
|
||||
; CHECK-NEXT: [[A:%.*]] = call i8* @llvm.strip.invariant.group.p0i8(i8* bitcast ({ i64, i64 }* @A to i8*))
|
||||
; CHECK-NEXT: [[B:%.*]] = getelementptr i8, i8* [[A]], i32 8
|
||||
; CHECK-NEXT: [[C:%.*]] = bitcast i8* [[B]] to i64*
|
||||
; CHECK-NEXT: [[D:%.*]] = load i64, i64* [[C]], align 4
|
||||
; CHECK-NEXT: ret i64 [[D]]
|
||||
; CHECK-NEXT: ret i64 3
|
||||
;
|
||||
%p = bitcast { i64, i64 }* @A to i8*
|
||||
%a = call i8* @llvm.strip.invariant.group.p0i8(i8* %p)
|
||||
@ -25,11 +21,7 @@ define i64 @f() {
|
||||
|
||||
define i64 @g() {
|
||||
; CHECK-LABEL: @g(
|
||||
; CHECK-NEXT: [[A:%.*]] = call i8* @llvm.launder.invariant.group.p0i8(i8* bitcast ({ i64, i64 }* @A to i8*))
|
||||
; CHECK-NEXT: [[B:%.*]] = getelementptr i8, i8* [[A]], i32 8
|
||||
; CHECK-NEXT: [[C:%.*]] = bitcast i8* [[B]] to i64*
|
||||
; CHECK-NEXT: [[D:%.*]] = load i64, i64* [[C]], align 4
|
||||
; CHECK-NEXT: ret i64 [[D]]
|
||||
; CHECK-NEXT: ret i64 3
|
||||
;
|
||||
%p = bitcast { i64, i64 }* @A to i8*
|
||||
%a = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
|
||||
|
Loading…
Reference in New Issue
Block a user