mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 18:54:02 +01:00
[IR] Add WithOverflowInst class
This adds a WithOverflowInst class with a few helper methods to get the underlying binop, signedness and nowrap type and makes use of it where sensible. There will be two more uses in D60650/D60656. The refactorings are all NFC, though I left some TODOs where things could be improved. In particular we have two places where add/sub are handled but mul isn't. Differential Revision: https://reviews.llvm.org/D60668 llvm-svn: 358512
This commit is contained in:
parent
0687a4b90a
commit
32304a1b52
@ -32,6 +32,7 @@ class DataLayout;
|
||||
class DominatorTree;
|
||||
class GEPOperator;
|
||||
class IntrinsicInst;
|
||||
class WithOverflowInst;
|
||||
struct KnownBits;
|
||||
class Loop;
|
||||
class LoopInfo;
|
||||
@ -454,10 +455,10 @@ class Value;
|
||||
const Instruction *CxtI,
|
||||
const DominatorTree *DT);
|
||||
|
||||
/// Returns true if the arithmetic part of the \p II 's result is
|
||||
/// Returns true if the arithmetic part of the \p WO 's result is
|
||||
/// used only along the paths control dependent on the computation
|
||||
/// not overflowing, \p II being an <op>.with.overflow intrinsic.
|
||||
bool isOverflowIntrinsicNoWrap(const IntrinsicInst *II,
|
||||
/// not overflowing, \p WO being an <op>.with.overflow intrinsic.
|
||||
bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
|
||||
const DominatorTree &DT);
|
||||
|
||||
|
||||
|
@ -265,6 +265,39 @@ namespace llvm {
|
||||
}
|
||||
};
|
||||
|
||||
/// This class represents a op.with.overflow intrinsic.
|
||||
class WithOverflowInst : public IntrinsicInst {
|
||||
public:
|
||||
static bool classof(const IntrinsicInst *I) {
|
||||
switch (I->getIntrinsicID()) {
|
||||
case Intrinsic::uadd_with_overflow:
|
||||
case Intrinsic::sadd_with_overflow:
|
||||
case Intrinsic::usub_with_overflow:
|
||||
case Intrinsic::ssub_with_overflow:
|
||||
case Intrinsic::umul_with_overflow:
|
||||
case Intrinsic::smul_with_overflow:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
static bool classof(const Value *V) {
|
||||
return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
|
||||
}
|
||||
|
||||
Value *getLHS() const { return const_cast<Value*>(getArgOperand(0)); }
|
||||
Value *getRHS() const { return const_cast<Value*>(getArgOperand(1)); }
|
||||
|
||||
/// Returns the binary operation underlying the intrinsic.
|
||||
Instruction::BinaryOps getBinaryOp() const;
|
||||
|
||||
/// Whether the intrinsic is signed or unsigned.
|
||||
bool isSigned() const;
|
||||
|
||||
/// Returns one of OBO::NoSignedWrap or OBO::NoUnsignedWrap.
|
||||
unsigned getNoWrapKind() const;
|
||||
};
|
||||
|
||||
/// Common base class for all memory intrinsics. Simply provides
|
||||
/// common methods.
|
||||
/// Written as CRTP to avoid a common base class amongst the
|
||||
|
@ -4575,52 +4575,21 @@ static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) {
|
||||
if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0)
|
||||
break;
|
||||
|
||||
auto *CI = dyn_cast<CallInst>(EVI->getAggregateOperand());
|
||||
if (!CI)
|
||||
auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand());
|
||||
if (!WO)
|
||||
break;
|
||||
|
||||
if (auto *F = CI->getCalledFunction())
|
||||
switch (F->getIntrinsicID()) {
|
||||
case Intrinsic::sadd_with_overflow:
|
||||
case Intrinsic::uadd_with_overflow:
|
||||
if (!isOverflowIntrinsicNoWrap(cast<IntrinsicInst>(CI), DT))
|
||||
return BinaryOp(Instruction::Add, CI->getArgOperand(0),
|
||||
CI->getArgOperand(1));
|
||||
Instruction::BinaryOps BinOp = WO->getBinaryOp();
|
||||
bool Signed = WO->isSigned();
|
||||
// TODO: Should add nuw/nsw flags for mul as well.
|
||||
if (BinOp == Instruction::Mul || !isOverflowIntrinsicNoWrap(WO, DT))
|
||||
return BinaryOp(BinOp, WO->getLHS(), WO->getRHS());
|
||||
|
||||
// Now that we know that all uses of the arithmetic-result component of
|
||||
// CI are guarded by the overflow check, we can go ahead and pretend
|
||||
// that the arithmetic is non-overflowing.
|
||||
if (F->getIntrinsicID() == Intrinsic::sadd_with_overflow)
|
||||
return BinaryOp(Instruction::Add, CI->getArgOperand(0),
|
||||
CI->getArgOperand(1), /* IsNSW = */ true,
|
||||
/* IsNUW = */ false);
|
||||
else
|
||||
return BinaryOp(Instruction::Add, CI->getArgOperand(0),
|
||||
CI->getArgOperand(1), /* IsNSW = */ false,
|
||||
/* IsNUW*/ true);
|
||||
case Intrinsic::ssub_with_overflow:
|
||||
case Intrinsic::usub_with_overflow:
|
||||
if (!isOverflowIntrinsicNoWrap(cast<IntrinsicInst>(CI), DT))
|
||||
return BinaryOp(Instruction::Sub, CI->getArgOperand(0),
|
||||
CI->getArgOperand(1));
|
||||
|
||||
// The same reasoning as sadd/uadd above.
|
||||
if (F->getIntrinsicID() == Intrinsic::ssub_with_overflow)
|
||||
return BinaryOp(Instruction::Sub, CI->getArgOperand(0),
|
||||
CI->getArgOperand(1), /* IsNSW = */ true,
|
||||
/* IsNUW = */ false);
|
||||
else
|
||||
return BinaryOp(Instruction::Sub, CI->getArgOperand(0),
|
||||
CI->getArgOperand(1), /* IsNSW = */ false,
|
||||
/* IsNUW = */ true);
|
||||
case Intrinsic::smul_with_overflow:
|
||||
case Intrinsic::umul_with_overflow:
|
||||
return BinaryOp(Instruction::Mul, CI->getArgOperand(0),
|
||||
CI->getArgOperand(1));
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
// Now that we know that all uses of the arithmetic-result component of
|
||||
// CI are guarded by the overflow check, we can go ahead and pretend
|
||||
// that the arithmetic is non-overflowing.
|
||||
return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(),
|
||||
/* IsNSW = */ Signed, /* IsNUW = */ !Signed);
|
||||
}
|
||||
|
||||
default:
|
||||
|
@ -4206,23 +4206,12 @@ OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS,
|
||||
return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange));
|
||||
}
|
||||
|
||||
bool llvm::isOverflowIntrinsicNoWrap(const IntrinsicInst *II,
|
||||
bool llvm::isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
|
||||
const DominatorTree &DT) {
|
||||
#ifndef NDEBUG
|
||||
auto IID = II->getIntrinsicID();
|
||||
assert((IID == Intrinsic::sadd_with_overflow ||
|
||||
IID == Intrinsic::uadd_with_overflow ||
|
||||
IID == Intrinsic::ssub_with_overflow ||
|
||||
IID == Intrinsic::usub_with_overflow ||
|
||||
IID == Intrinsic::smul_with_overflow ||
|
||||
IID == Intrinsic::umul_with_overflow) &&
|
||||
"Not an overflow intrinsic!");
|
||||
#endif
|
||||
|
||||
SmallVector<const BranchInst *, 2> GuardingBranches;
|
||||
SmallVector<const ExtractValueInst *, 2> Results;
|
||||
|
||||
for (const User *U : II->users()) {
|
||||
for (const User *U : WO->users()) {
|
||||
if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
|
||||
assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
|
||||
|
||||
|
@ -21,6 +21,7 @@
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "llvm/IR/IntrinsicInst.h"
|
||||
#include "llvm/IR/Operator.h"
|
||||
#include "llvm/ADT/StringSwitch.h"
|
||||
#include "llvm/IR/Constants.h"
|
||||
#include "llvm/IR/DebugInfoMetadata.h"
|
||||
@ -168,3 +169,36 @@ bool ConstrainedFPIntrinsic::isTernaryOp() const {
|
||||
}
|
||||
}
|
||||
|
||||
Instruction::BinaryOps WithOverflowInst::getBinaryOp() const {
|
||||
switch (getIntrinsicID()) {
|
||||
case Intrinsic::uadd_with_overflow:
|
||||
case Intrinsic::sadd_with_overflow:
|
||||
return Instruction::Add;
|
||||
case Intrinsic::usub_with_overflow:
|
||||
case Intrinsic::ssub_with_overflow:
|
||||
return Instruction::Sub;
|
||||
case Intrinsic::umul_with_overflow:
|
||||
case Intrinsic::smul_with_overflow:
|
||||
return Instruction::Mul;
|
||||
default:
|
||||
llvm_unreachable("Invalid intrinsic");
|
||||
}
|
||||
}
|
||||
|
||||
bool WithOverflowInst::isSigned() const {
|
||||
switch (getIntrinsicID()) {
|
||||
case Intrinsic::sadd_with_overflow:
|
||||
case Intrinsic::ssub_with_overflow:
|
||||
case Intrinsic::smul_with_overflow:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
unsigned WithOverflowInst::getNoWrapKind() const {
|
||||
if (isSigned())
|
||||
return OverflowingBinaryOperator::NoSignedWrap;
|
||||
else
|
||||
return OverflowingBinaryOperator::NoUnsignedWrap;
|
||||
}
|
||||
|
@ -2667,53 +2667,28 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
|
||||
return ExtractValueInst::Create(IV->getInsertedValueOperand(),
|
||||
makeArrayRef(exti, exte));
|
||||
}
|
||||
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Agg)) {
|
||||
// We're extracting from an intrinsic, see if we're the only user, which
|
||||
// allows us to simplify multiple result intrinsics to simpler things that
|
||||
// just get one value.
|
||||
if (II->hasOneUse()) {
|
||||
// Check if we're grabbing the overflow bit or the result of a 'with
|
||||
// overflow' intrinsic. If it's the latter we can remove the intrinsic
|
||||
if (WithOverflowInst *WO = dyn_cast<WithOverflowInst>(Agg)) {
|
||||
// We're extracting from an overflow intrinsic, see if we're the only user,
|
||||
// which allows us to simplify multiple result intrinsics to simpler
|
||||
// things that just get one value.
|
||||
if (WO->hasOneUse()) {
|
||||
// Check if we're grabbing only the result of a 'with overflow' intrinsic
|
||||
// and replace it with a traditional binary instruction.
|
||||
switch (II->getIntrinsicID()) {
|
||||
case Intrinsic::uadd_with_overflow:
|
||||
case Intrinsic::sadd_with_overflow:
|
||||
if (*EV.idx_begin() == 0) { // Normal result.
|
||||
Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
|
||||
replaceInstUsesWith(*II, UndefValue::get(II->getType()));
|
||||
eraseInstFromFunction(*II);
|
||||
return BinaryOperator::CreateAdd(LHS, RHS);
|
||||
}
|
||||
|
||||
// If the normal result of the add is dead, and the RHS is a constant,
|
||||
// we can transform this into a range comparison.
|
||||
// overflow = uadd a, -4 --> overflow = icmp ugt a, 3
|
||||
if (II->getIntrinsicID() == Intrinsic::uadd_with_overflow)
|
||||
if (ConstantInt *CI = dyn_cast<ConstantInt>(II->getArgOperand(1)))
|
||||
return new ICmpInst(ICmpInst::ICMP_UGT, II->getArgOperand(0),
|
||||
ConstantExpr::getNot(CI));
|
||||
break;
|
||||
case Intrinsic::usub_with_overflow:
|
||||
case Intrinsic::ssub_with_overflow:
|
||||
if (*EV.idx_begin() == 0) { // Normal result.
|
||||
Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
|
||||
replaceInstUsesWith(*II, UndefValue::get(II->getType()));
|
||||
eraseInstFromFunction(*II);
|
||||
return BinaryOperator::CreateSub(LHS, RHS);
|
||||
}
|
||||
break;
|
||||
case Intrinsic::umul_with_overflow:
|
||||
case Intrinsic::smul_with_overflow:
|
||||
if (*EV.idx_begin() == 0) { // Normal result.
|
||||
Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
|
||||
replaceInstUsesWith(*II, UndefValue::get(II->getType()));
|
||||
eraseInstFromFunction(*II);
|
||||
return BinaryOperator::CreateMul(LHS, RHS);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
if (*EV.idx_begin() == 0) {
|
||||
Instruction::BinaryOps BinOp = WO->getBinaryOp();
|
||||
Value *LHS = WO->getLHS(), *RHS = WO->getRHS();
|
||||
replaceInstUsesWith(*WO, UndefValue::get(WO->getType()));
|
||||
eraseInstFromFunction(*WO);
|
||||
return BinaryOperator::Create(BinOp, LHS, RHS);
|
||||
}
|
||||
|
||||
// If the normal result of the add is dead, and the RHS is a constant,
|
||||
// we can transform this into a range comparison.
|
||||
// overflow = uadd a, -4 --> overflow = icmp ugt a, 3
|
||||
if (WO->getIntrinsicID() == Intrinsic::uadd_with_overflow)
|
||||
if (ConstantInt *CI = dyn_cast<ConstantInt>(WO->getRHS()))
|
||||
return new ICmpInst(ICmpInst::ICMP_UGT, WO->getLHS(),
|
||||
ConstantExpr::getNot(CI));
|
||||
}
|
||||
}
|
||||
if (LoadInst *L = dyn_cast<LoadInst>(Agg))
|
||||
|
@ -399,59 +399,38 @@ static bool processSwitch(SwitchInst *SI, LazyValueInfo *LVI,
|
||||
}
|
||||
|
||||
// See if we can prove that the given overflow intrinsic will not overflow.
|
||||
static bool willNotOverflow(IntrinsicInst *II, LazyValueInfo *LVI) {
|
||||
using OBO = OverflowingBinaryOperator;
|
||||
auto NoWrap = [&] (Instruction::BinaryOps BinOp, unsigned NoWrapKind) {
|
||||
Value *RHS = II->getOperand(1);
|
||||
ConstantRange RRange = LVI->getConstantRange(RHS, II->getParent(), II);
|
||||
ConstantRange NWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
|
||||
BinOp, RRange, NoWrapKind);
|
||||
// As an optimization, do not compute LRange if we do not need it.
|
||||
if (NWRegion.isEmptySet())
|
||||
return false;
|
||||
Value *LHS = II->getOperand(0);
|
||||
ConstantRange LRange = LVI->getConstantRange(LHS, II->getParent(), II);
|
||||
return NWRegion.contains(LRange);
|
||||
};
|
||||
switch (II->getIntrinsicID()) {
|
||||
default:
|
||||
break;
|
||||
case Intrinsic::uadd_with_overflow:
|
||||
return NoWrap(Instruction::Add, OBO::NoUnsignedWrap);
|
||||
case Intrinsic::sadd_with_overflow:
|
||||
return NoWrap(Instruction::Add, OBO::NoSignedWrap);
|
||||
case Intrinsic::usub_with_overflow:
|
||||
return NoWrap(Instruction::Sub, OBO::NoUnsignedWrap);
|
||||
case Intrinsic::ssub_with_overflow:
|
||||
return NoWrap(Instruction::Sub, OBO::NoSignedWrap);
|
||||
}
|
||||
return false;
|
||||
static bool willNotOverflow(WithOverflowInst *WO, LazyValueInfo *LVI) {
|
||||
// TODO: Also support multiplication.
|
||||
Instruction::BinaryOps BinOp = WO->getBinaryOp();
|
||||
if (BinOp == Instruction::Mul)
|
||||
return false;
|
||||
|
||||
Value *RHS = WO->getRHS();
|
||||
ConstantRange RRange = LVI->getConstantRange(RHS, WO->getParent(), WO);
|
||||
ConstantRange NWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
|
||||
BinOp, RRange, WO->getNoWrapKind());
|
||||
// As an optimization, do not compute LRange if we do not need it.
|
||||
if (NWRegion.isEmptySet())
|
||||
return false;
|
||||
Value *LHS = WO->getLHS();
|
||||
ConstantRange LRange = LVI->getConstantRange(LHS, WO->getParent(), WO);
|
||||
return NWRegion.contains(LRange);
|
||||
}
|
||||
|
||||
static void processOverflowIntrinsic(IntrinsicInst *II) {
|
||||
IRBuilder<> B(II);
|
||||
Value *NewOp = nullptr;
|
||||
switch (II->getIntrinsicID()) {
|
||||
default:
|
||||
llvm_unreachable("Unexpected instruction.");
|
||||
case Intrinsic::uadd_with_overflow:
|
||||
NewOp = B.CreateNUWAdd(II->getOperand(0), II->getOperand(1), II->getName());
|
||||
break;
|
||||
case Intrinsic::sadd_with_overflow:
|
||||
NewOp = B.CreateNSWAdd(II->getOperand(0), II->getOperand(1), II->getName());
|
||||
break;
|
||||
case Intrinsic::usub_with_overflow:
|
||||
NewOp = B.CreateNUWSub(II->getOperand(0), II->getOperand(1), II->getName());
|
||||
break;
|
||||
case Intrinsic::ssub_with_overflow:
|
||||
NewOp = B.CreateNSWSub(II->getOperand(0), II->getOperand(1), II->getName());
|
||||
break;
|
||||
}
|
||||
static void processOverflowIntrinsic(WithOverflowInst *WO) {
|
||||
IRBuilder<> B(WO);
|
||||
Value *NewOp = B.CreateBinOp(
|
||||
WO->getBinaryOp(), WO->getLHS(), WO->getRHS(), WO->getName());
|
||||
if (WO->isSigned())
|
||||
cast<Instruction>(NewOp)->setHasNoSignedWrap();
|
||||
else
|
||||
cast<Instruction>(NewOp)->setHasNoUnsignedWrap();
|
||||
|
||||
Value *NewI = B.CreateInsertValue(UndefValue::get(WO->getType()), NewOp, 0);
|
||||
NewI = B.CreateInsertValue(NewI, ConstantInt::getFalse(WO->getContext()), 1);
|
||||
WO->replaceAllUsesWith(NewI);
|
||||
WO->eraseFromParent();
|
||||
++NumOverflows;
|
||||
Value *NewI = B.CreateInsertValue(UndefValue::get(II->getType()), NewOp, 0);
|
||||
NewI = B.CreateInsertValue(NewI, ConstantInt::getFalse(II->getContext()), 1);
|
||||
II->replaceAllUsesWith(NewI);
|
||||
II->eraseFromParent();
|
||||
}
|
||||
|
||||
/// Infer nonnull attributes for the arguments at the specified callsite.
|
||||
@ -459,9 +438,9 @@ static bool processCallSite(CallSite CS, LazyValueInfo *LVI) {
|
||||
SmallVector<unsigned, 4> ArgNos;
|
||||
unsigned ArgNo = 0;
|
||||
|
||||
if (auto *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) {
|
||||
if (willNotOverflow(II, LVI)) {
|
||||
processOverflowIntrinsic(II);
|
||||
if (auto *WO = dyn_cast<WithOverflowInst>(CS.getInstruction())) {
|
||||
if (willNotOverflow(WO, LVI)) {
|
||||
processOverflowIntrinsic(WO);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -329,36 +329,15 @@ GVN::Expression GVN::ValueTable::createExtractvalueExpr(ExtractValueInst *EI) {
|
||||
e.type = EI->getType();
|
||||
e.opcode = 0;
|
||||
|
||||
IntrinsicInst *I = dyn_cast<IntrinsicInst>(EI->getAggregateOperand());
|
||||
if (I != nullptr && EI->getNumIndices() == 1 && *EI->idx_begin() == 0 ) {
|
||||
// EI might be an extract from one of our recognised intrinsics. If it
|
||||
// is we'll synthesize a semantically equivalent expression instead on
|
||||
// an extract value expression.
|
||||
switch (I->getIntrinsicID()) {
|
||||
case Intrinsic::sadd_with_overflow:
|
||||
case Intrinsic::uadd_with_overflow:
|
||||
e.opcode = Instruction::Add;
|
||||
break;
|
||||
case Intrinsic::ssub_with_overflow:
|
||||
case Intrinsic::usub_with_overflow:
|
||||
e.opcode = Instruction::Sub;
|
||||
break;
|
||||
case Intrinsic::smul_with_overflow:
|
||||
case Intrinsic::umul_with_overflow:
|
||||
e.opcode = Instruction::Mul;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (e.opcode != 0) {
|
||||
// Intrinsic recognized. Grab its args to finish building the expression.
|
||||
assert(I->getNumArgOperands() == 2 &&
|
||||
"Expect two args for recognised intrinsics.");
|
||||
e.varargs.push_back(lookupOrAdd(I->getArgOperand(0)));
|
||||
e.varargs.push_back(lookupOrAdd(I->getArgOperand(1)));
|
||||
return e;
|
||||
}
|
||||
WithOverflowInst *WO = dyn_cast<WithOverflowInst>(EI->getAggregateOperand());
|
||||
if (WO != nullptr && EI->getNumIndices() == 1 && *EI->idx_begin() == 0) {
|
||||
// EI is an extract from one of our with.overflow intrinsics. Synthesize
|
||||
// a semantically equivalent expression instead of an extract value
|
||||
// expression.
|
||||
e.opcode = WO->getBinaryOp();
|
||||
e.varargs.push_back(lookupOrAdd(WO->getLHS()));
|
||||
e.varargs.push_back(lookupOrAdd(WO->getRHS()));
|
||||
return e;
|
||||
}
|
||||
|
||||
// Not a recognised intrinsic. Fall back to producing an extract value
|
||||
|
@ -1814,39 +1814,13 @@ NewGVN::performSymbolicPHIEvaluation(ArrayRef<ValPair> PHIOps,
|
||||
const Expression *
|
||||
NewGVN::performSymbolicAggrValueEvaluation(Instruction *I) const {
|
||||
if (auto *EI = dyn_cast<ExtractValueInst>(I)) {
|
||||
auto *II = dyn_cast<IntrinsicInst>(EI->getAggregateOperand());
|
||||
if (II && EI->getNumIndices() == 1 && *EI->idx_begin() == 0) {
|
||||
unsigned Opcode = 0;
|
||||
// EI might be an extract from one of our recognised intrinsics. If it
|
||||
// is we'll synthesize a semantically equivalent expression instead on
|
||||
// an extract value expression.
|
||||
switch (II->getIntrinsicID()) {
|
||||
case Intrinsic::sadd_with_overflow:
|
||||
case Intrinsic::uadd_with_overflow:
|
||||
Opcode = Instruction::Add;
|
||||
break;
|
||||
case Intrinsic::ssub_with_overflow:
|
||||
case Intrinsic::usub_with_overflow:
|
||||
Opcode = Instruction::Sub;
|
||||
break;
|
||||
case Intrinsic::smul_with_overflow:
|
||||
case Intrinsic::umul_with_overflow:
|
||||
Opcode = Instruction::Mul;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (Opcode != 0) {
|
||||
// Intrinsic recognized. Grab its args to finish building the
|
||||
// expression.
|
||||
assert(II->getNumArgOperands() == 2 &&
|
||||
"Expect two args for recognised intrinsics.");
|
||||
return createBinaryExpression(Opcode, EI->getType(),
|
||||
II->getArgOperand(0),
|
||||
II->getArgOperand(1), I);
|
||||
}
|
||||
}
|
||||
auto *WO = dyn_cast<WithOverflowInst>(EI->getAggregateOperand());
|
||||
if (WO && EI->getNumIndices() == 1 && *EI->idx_begin() == 0)
|
||||
// EI is an extract from one of our with.overflow intrinsics. Synthesize
|
||||
// a semantically equivalent expression instead of an extract value
|
||||
// expression.
|
||||
return createBinaryExpression(WO->getBinaryOp(), EI->getType(),
|
||||
WO->getLHS(), WO->getRHS(), I);
|
||||
}
|
||||
|
||||
return createAggregateValueExpression(I);
|
||||
|
Loading…
Reference in New Issue
Block a user