2017-05-15 21:57:41 +00:00
|
|
|
//===- Instructions.cpp - Implement the LLVM instructions -----------------===//
|
2005-04-21 23:48:37 +00:00
|
|
|
//
|
2019-01-19 08:50:56 +00:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2005-04-21 23:48:37 +00:00
|
|
|
//
|
2004-07-29 12:33:25 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
2005-01-29 00:35:16 +00:00
|
|
|
// This file implements all of the non-inline methods for the LLVM instruction
|
|
|
|
// classes.
|
2004-07-29 12:33:25 +00:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2017-06-06 11:49:48 +00:00
|
|
|
#include "llvm/IR/Instructions.h"
|
2009-09-23 18:32:25 +00:00
|
|
|
#include "LLVMContextImpl.h"
|
2017-05-15 21:57:41 +00:00
|
|
|
#include "llvm/ADT/None.h"
|
|
|
|
#include "llvm/ADT/SmallVector.h"
|
|
|
|
#include "llvm/ADT/Twine.h"
|
|
|
|
#include "llvm/IR/Attributes.h"
|
|
|
|
#include "llvm/IR/BasicBlock.h"
|
|
|
|
#include "llvm/IR/Constant.h"
|
2013-01-02 11:36:10 +00:00
|
|
|
#include "llvm/IR/Constants.h"
|
|
|
|
#include "llvm/IR/DataLayout.h"
|
|
|
|
#include "llvm/IR/DerivedTypes.h"
|
|
|
|
#include "llvm/IR/Function.h"
|
2017-05-15 21:57:41 +00:00
|
|
|
#include "llvm/IR/InstrTypes.h"
|
|
|
|
#include "llvm/IR/Instruction.h"
|
2018-12-27 23:40:17 +00:00
|
|
|
#include "llvm/IR/Intrinsics.h"
|
2017-05-15 21:57:41 +00:00
|
|
|
#include "llvm/IR/LLVMContext.h"
|
2019-04-22 17:04:51 +00:00
|
|
|
#include "llvm/IR/MDBuilder.h"
|
2017-05-15 21:57:41 +00:00
|
|
|
#include "llvm/IR/Metadata.h"
|
2013-01-02 11:36:10 +00:00
|
|
|
#include "llvm/IR/Module.h"
|
|
|
|
#include "llvm/IR/Operator.h"
|
2017-05-15 21:57:41 +00:00
|
|
|
#include "llvm/IR/Type.h"
|
|
|
|
#include "llvm/IR/Value.h"
|
|
|
|
#include "llvm/Support/AtomicOrdering.h"
|
|
|
|
#include "llvm/Support/Casting.h"
|
2012-12-03 16:50:05 +00:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2007-04-22 19:24:39 +00:00
|
|
|
#include "llvm/Support/MathExtras.h"
|
2019-10-08 12:53:54 +00:00
|
|
|
#include "llvm/Support/TypeSize.h"
|
2017-05-15 21:57:41 +00:00
|
|
|
#include <algorithm>
|
|
|
|
#include <cassert>
|
|
|
|
#include <cstdint>
|
|
|
|
#include <vector>
|
|
|
|
|
2004-07-29 12:33:25 +00:00
|
|
|
using namespace llvm;
|
|
|
|
|
2018-06-26 06:17:00 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AllocaInst Class
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
Optional<uint64_t>
|
|
|
|
AllocaInst::getAllocationSizeInBits(const DataLayout &DL) const {
|
|
|
|
uint64_t Size = DL.getTypeAllocSizeInBits(getAllocatedType());
|
|
|
|
if (isArrayAllocation()) {
|
2020-07-01 15:46:43 +01:00
|
|
|
auto *C = dyn_cast<ConstantInt>(getArraySize());
|
2018-06-26 06:17:00 +00:00
|
|
|
if (!C)
|
|
|
|
return None;
|
|
|
|
Size *= C->getZExtValue();
|
|
|
|
}
|
|
|
|
return Size;
|
|
|
|
}
|
|
|
|
|
2008-12-29 00:12:50 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// SelectInst Class
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// areInvalidOperands - Return a string if the specified operands are invalid
|
|
|
|
/// for a select operation, otherwise return null.
|
|
|
|
const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
|
|
|
|
if (Op1->getType() != Op2->getType())
|
|
|
|
return "both values to select must have same type";
|
2015-08-14 05:09:07 +00:00
|
|
|
|
|
|
|
if (Op1->getType()->isTokenTy())
|
|
|
|
return "select values cannot have token type";
|
|
|
|
|
2011-07-18 04:54:35 +00:00
|
|
|
if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) {
|
2008-12-29 00:12:50 +00:00
|
|
|
// Vector select.
|
2009-08-13 21:58:54 +00:00
|
|
|
if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))
|
2008-12-29 00:12:50 +00:00
|
|
|
return "vector select condition element type must be i1";
|
2011-07-18 04:54:35 +00:00
|
|
|
VectorType *ET = dyn_cast<VectorType>(Op1->getType());
|
2014-04-09 06:08:46 +00:00
|
|
|
if (!ET)
|
2008-12-29 00:12:50 +00:00
|
|
|
return "selected values for vector select must be vectors";
|
2020-05-27 15:21:48 +01:00
|
|
|
if (ET->getElementCount() != VT->getElementCount())
|
2008-12-29 00:12:50 +00:00
|
|
|
return "vector select requires selected vectors to have "
|
|
|
|
"the same vector length as select condition";
|
2009-08-13 21:58:54 +00:00
|
|
|
} else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) {
|
2008-12-29 00:12:50 +00:00
|
|
|
return "select condition must be i1 or <n x i1>";
|
|
|
|
}
|
2014-04-09 06:08:46 +00:00
|
|
|
return nullptr;
|
2008-12-29 00:12:50 +00:00
|
|
|
}
|
|
|
|
|
2005-01-29 00:35:16 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// PHINode Class
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
PHINode::PHINode(const PHINode &PN)
|
2015-06-10 22:38:46 +00:00
|
|
|
: Instruction(PN.getType(), Instruction::PHI, nullptr, PN.getNumOperands()),
|
|
|
|
ReservedSpace(PN.getNumOperands()) {
|
|
|
|
allocHungoffUses(PN.getNumOperands());
|
2011-06-23 09:09:15 +00:00
|
|
|
std::copy(PN.op_begin(), PN.op_end(), op_begin());
|
|
|
|
std::copy(PN.block_begin(), PN.block_end(), block_begin());
|
2009-08-25 22:11:20 +00:00
|
|
|
SubclassOptionalData = PN.SubclassOptionalData;
|
2005-01-29 00:35:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// removeIncomingValue - Remove an incoming value. This is useful if a
|
|
|
|
// predecessor basic block is deleted.
|
|
|
|
Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
|
2011-06-23 09:09:15 +00:00
|
|
|
Value *Removed = getIncomingValue(Idx);
|
2005-01-29 00:35:16 +00:00
|
|
|
|
|
|
|
// Move everything after this operand down.
|
|
|
|
//
|
|
|
|
// FIXME: we could just swap with the end of the list, then erase. However,
|
2011-06-23 09:09:15 +00:00
|
|
|
// clients might not expect this to happen. The code as it is thrashes the
|
2005-01-29 00:35:16 +00:00
|
|
|
// use/def lists, which is kinda lame.
|
2011-06-23 09:09:15 +00:00
|
|
|
std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx);
|
|
|
|
std::copy(block_begin() + Idx + 1, block_end(), block_begin() + Idx);
|
2005-01-29 00:35:16 +00:00
|
|
|
|
|
|
|
// Nuke the last value.
|
2014-04-09 06:08:46 +00:00
|
|
|
Op<-1>().set(nullptr);
|
2015-06-12 17:48:10 +00:00
|
|
|
setNumHungOffUseOperands(getNumOperands() - 1);
|
2005-01-29 00:35:16 +00:00
|
|
|
|
|
|
|
// If the PHI node is dead, because it has zero entries, nuke it now.
|
2011-06-23 09:09:15 +00:00
|
|
|
if (getNumOperands() == 0 && DeletePHIIfEmpty) {
|
2005-01-29 00:35:16 +00:00
|
|
|
// If anyone is using this PHI, make them use a dummy value instead...
|
2009-07-30 23:03:37 +00:00
|
|
|
replaceAllUsesWith(UndefValue::get(getType()));
|
2005-01-29 00:35:16 +00:00
|
|
|
eraseFromParent();
|
|
|
|
}
|
|
|
|
return Removed;
|
|
|
|
}
|
|
|
|
|
2011-04-01 08:00:58 +00:00
|
|
|
/// growOperands - grow operands - This grows the operand list in response
|
|
|
|
/// to a push_back style of operation. This grows the number of ops by 1.5
|
|
|
|
/// times.
|
2005-01-29 00:35:16 +00:00
|
|
|
///
|
2011-04-01 08:00:58 +00:00
|
|
|
void PHINode::growOperands() {
|
2008-05-10 08:32:32 +00:00
|
|
|
unsigned e = getNumOperands();
|
2011-06-23 09:09:15 +00:00
|
|
|
unsigned NumOps = e + e / 2;
|
|
|
|
if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common.
|
|
|
|
|
2005-01-29 00:35:16 +00:00
|
|
|
ReservedSpace = NumOps;
|
2015-06-10 22:38:41 +00:00
|
|
|
growHungoffUses(ReservedSpace, /* IsPhi */ true);
|
2005-01-29 00:35:16 +00:00
|
|
|
}
|
|
|
|
|
2005-08-04 23:24:19 +00:00
|
|
|
/// hasConstantValue - If the specified PHI node always merges together the same
|
|
|
|
/// value, return the value, otherwise return null.
|
2010-11-17 04:30:22 +00:00
|
|
|
Value *PHINode::hasConstantValue() const {
|
|
|
|
// Exploit the fact that phi nodes always have at least one entry.
|
|
|
|
Value *ConstantValue = getIncomingValue(0);
|
|
|
|
for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
|
2012-07-03 17:10:28 +00:00
|
|
|
if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
|
|
|
|
if (ConstantValue != this)
|
2014-04-09 06:08:46 +00:00
|
|
|
return nullptr; // Incoming values not all the same.
|
2012-07-03 17:10:28 +00:00
|
|
|
// The case where the first value is this PHI.
|
|
|
|
ConstantValue = getIncomingValue(i);
|
|
|
|
}
|
2012-07-03 21:15:40 +00:00
|
|
|
if (ConstantValue == this)
|
|
|
|
return UndefValue::get(getType());
|
2010-11-17 04:30:22 +00:00
|
|
|
return ConstantValue;
|
2005-08-04 23:24:19 +00:00
|
|
|
}
|
|
|
|
|
2016-04-14 17:42:47 +00:00
|
|
|
/// hasConstantOrUndefValue - Whether the specified PHI node always merges
|
|
|
|
/// together the same value, assuming that undefs result in the same value as
|
|
|
|
/// non-undefs.
|
|
|
|
/// Unlike \ref hasConstantValue, this does not return a value because the
|
|
|
|
/// unique non-undef incoming value need not dominate the PHI node.
|
|
|
|
bool PHINode::hasConstantOrUndefValue() const {
|
|
|
|
Value *ConstantValue = nullptr;
|
|
|
|
for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) {
|
|
|
|
Value *Incoming = getIncomingValue(i);
|
|
|
|
if (Incoming != this && !isa<UndefValue>(Incoming)) {
|
|
|
|
if (ConstantValue && ConstantValue != Incoming)
|
|
|
|
return false;
|
|
|
|
ConstantValue = Incoming;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2011-08-12 20:24:12 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// LandingPadInst Implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2015-06-17 20:52:32 +00:00
|
|
|
LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
|
|
|
|
const Twine &NameStr, Instruction *InsertBefore)
|
|
|
|
: Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) {
|
|
|
|
init(NumReservedValues, NameStr);
|
2011-08-12 20:24:12 +00:00
|
|
|
}
|
|
|
|
|
2015-06-17 20:52:32 +00:00
|
|
|
LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
|
|
|
|
const Twine &NameStr, BasicBlock *InsertAtEnd)
|
|
|
|
: Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertAtEnd) {
|
|
|
|
init(NumReservedValues, NameStr);
|
2011-08-12 20:24:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
LandingPadInst::LandingPadInst(const LandingPadInst &LP)
|
2015-06-10 22:38:46 +00:00
|
|
|
: Instruction(LP.getType(), Instruction::LandingPad, nullptr,
|
|
|
|
LP.getNumOperands()),
|
|
|
|
ReservedSpace(LP.getNumOperands()) {
|
|
|
|
allocHungoffUses(LP.getNumOperands());
|
2015-06-12 17:48:05 +00:00
|
|
|
Use *OL = getOperandList();
|
|
|
|
const Use *InOL = LP.getOperandList();
|
2011-08-12 20:24:12 +00:00
|
|
|
for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
|
|
|
|
OL[I] = InOL[I];
|
|
|
|
|
|
|
|
setCleanup(LP.isCleanup());
|
|
|
|
}
|
|
|
|
|
2015-06-17 20:52:32 +00:00
|
|
|
LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
|
2011-08-12 20:24:12 +00:00
|
|
|
const Twine &NameStr,
|
|
|
|
Instruction *InsertBefore) {
|
2015-06-17 20:52:32 +00:00
|
|
|
return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);
|
2011-08-12 20:24:12 +00:00
|
|
|
}
|
|
|
|
|
2015-06-17 20:52:32 +00:00
|
|
|
LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
|
2011-08-12 20:24:12 +00:00
|
|
|
const Twine &NameStr,
|
|
|
|
BasicBlock *InsertAtEnd) {
|
2015-06-17 20:52:32 +00:00
|
|
|
return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertAtEnd);
|
2011-08-12 20:24:12 +00:00
|
|
|
}
|
|
|
|
|
2015-06-17 20:52:32 +00:00
|
|
|
void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {
|
2011-08-12 20:24:12 +00:00
|
|
|
ReservedSpace = NumReservedValues;
|
2015-06-17 20:52:32 +00:00
|
|
|
setNumHungOffUseOperands(0);
|
2015-06-10 22:38:46 +00:00
|
|
|
allocHungoffUses(ReservedSpace);
|
2011-08-12 20:24:12 +00:00
|
|
|
setName(NameStr);
|
|
|
|
setCleanup(false);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// growOperands - grow operands - This grows the operand list in response to a
|
|
|
|
/// push_back style of operation. This grows the number of ops by 2 times.
|
|
|
|
void LandingPadInst::growOperands(unsigned Size) {
|
|
|
|
unsigned e = getNumOperands();
|
|
|
|
if (ReservedSpace >= e + Size) return;
|
2015-06-17 20:52:32 +00:00
|
|
|
ReservedSpace = (std::max(e, 1U) + Size / 2) * 2;
|
2015-06-10 22:38:41 +00:00
|
|
|
growHungoffUses(ReservedSpace);
|
2011-08-12 20:24:12 +00:00
|
|
|
}
|
|
|
|
|
2014-06-04 18:51:31 +00:00
|
|
|
void LandingPadInst::addClause(Constant *Val) {
|
2011-08-12 20:24:12 +00:00
|
|
|
unsigned OpNo = getNumOperands();
|
|
|
|
growOperands(1);
|
|
|
|
assert(OpNo < ReservedSpace && "Growing didn't work!");
|
2015-06-12 17:48:10 +00:00
|
|
|
setNumHungOffUseOperands(getNumOperands() + 1);
|
2015-06-12 17:48:05 +00:00
|
|
|
getOperandList()[OpNo] = Val;
|
2011-08-12 20:24:12 +00:00
|
|
|
}
|
2005-01-29 00:35:16 +00:00
|
|
|
|
[TI removal] Leverage the fact that TerminatorInst is gone to create
a normal base class that provides all common "call" functionality.
This merges two complex CRTP mixins for the common "call" logic and
common operand bundle logic into a single, normal base class of
`CallInst` and `InvokeInst`. Going forward, users can typically
`dyn_cast<CallBase>` and use the resulting API. No more need for the
`CallSite` wrapper. I'm planning to migrate current usage of the wrapper
to directly use the base class and then it can be removed, but those are
simpler and much more incremental steps. The big change is to introduce
this abstraction into the type system.
I've tried to do some basic simplifications of the APIs that I couldn't
really help but touch as part of this:
- I've tried to organize the attribute API and bundle API into groups to
make understanding the API of `CallBase` easier. Without this,
I wasn't able to navigate the API sanely for all of the ways I needed
to modify it.
- I've added what seem like more clear and consistent APIs for getting
at the called operand. These ended up being especially useful to
consolidate the *numerous* duplicated code paths trying to do this.
- I've largely reworked the organization and implementation of the APIs
for computing the argument operands as they needed to change to work
with the new subclass approach.
To minimize any cost associated with this abstraction, I've moved the
operand layout in memory to store the called operand last. This makes
its position relative to the end of the operand array the same,
regardless of the subclass. It should make it much cheaper to reference
from the `CallBase` abstraction, and this is likely one of the most
frequent things to query.
We do still pay one abstraction penalty here: we have to branch to
determine whether there are 0 or 2 extra operands when computing the end
of the argument operand sequence. However, that seems both rare and
should optimize well. I've implemented this in a way specifically
designed to allow it to optimize fairly well. If this shows up in
profiles, we can add overrides of the relevant methods to the subclasses
that bypass this penalty. It seems very unlikely that this will be an
issue as the code was *already* dealing with an ever present abstraction
of whether or not there are operand bundles, so this isn't the first
branch to go into the computation.
I've tried to remove as much of the obvious vestigial API surface of the
old CRTP implementation as I could, but I suspect there is further
cleanup that should now be possible, especially around the operand
bundle APIs. I'm leaving all of that for future work in this patch as
enough things are changing here as-is.
One thing that made this harder for me to reason about and debug was the
pervasive use of unsigned values in subtraction and other arithmetic
computations. I had to debug more than one unintentional wrap. I've
switched a few of these to use `int` which seems substantially simpler,
but I've held back from doing this more broadly to avoid creating
confusing divergence within a single class's API.
I also worked to remove all of the magic numbers used to index into
operands, putting them behind named constants or putting them into
a single method with a comment and strictly using the method elsewhere.
This was necessary to be able to re-layout the operands as discussed
above.
Thanks to Ben for reviewing this (somewhat large and awkward) patch!
Differential Revision: https://reviews.llvm.org/D54788
llvm-svn: 347452
2018-11-22 10:31:35 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// CallBase Implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2020-07-07 01:16:36 +03:00
|
|
|
CallBase *CallBase::Create(CallBase *CB, ArrayRef<OperandBundleDef> Bundles,
|
|
|
|
Instruction *InsertPt) {
|
|
|
|
switch (CB->getOpcode()) {
|
|
|
|
case Instruction::Call:
|
|
|
|
return CallInst::Create(cast<CallInst>(CB), Bundles, InsertPt);
|
|
|
|
case Instruction::Invoke:
|
|
|
|
return InvokeInst::Create(cast<InvokeInst>(CB), Bundles, InsertPt);
|
|
|
|
case Instruction::CallBr:
|
|
|
|
return CallBrInst::Create(cast<CallBrInst>(CB), Bundles, InsertPt);
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unknown CallBase sub-class!");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-27 23:40:17 +00:00
|
|
|
Function *CallBase::getCaller() { return getParent()->getParent(); }
|
|
|
|
|
2019-02-08 20:48:56 +00:00
|
|
|
unsigned CallBase::getNumSubclassExtraOperandsDynamic() const {
|
|
|
|
assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!");
|
|
|
|
return cast<CallBrInst>(this)->getNumIndirectDests() + 1;
|
|
|
|
}
|
|
|
|
|
2019-01-07 07:15:51 +00:00
|
|
|
bool CallBase::isIndirectCall() const {
|
2020-04-27 20:15:59 -07:00
|
|
|
const Value *V = getCalledOperand();
|
2019-01-07 07:15:51 +00:00
|
|
|
if (isa<Function>(V) || isa<Constant>(V))
|
|
|
|
return false;
|
2020-04-06 16:14:40 -07:00
|
|
|
return !isInlineAsm();
|
2019-01-07 07:15:51 +00:00
|
|
|
}
|
|
|
|
|
2019-01-31 17:23:29 +00:00
|
|
|
/// Tests if this call site must be tail call optimized. Only a CallInst can
|
|
|
|
/// be tail call optimized.
|
|
|
|
bool CallBase::isMustTailCall() const {
|
|
|
|
if (auto *CI = dyn_cast<CallInst>(this))
|
|
|
|
return CI->isMustTailCall();
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Tests if this call site is marked as a tail call.
|
|
|
|
bool CallBase::isTailCall() const {
|
|
|
|
if (auto *CI = dyn_cast<CallInst>(this))
|
|
|
|
return CI->isTailCall();
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-12-27 23:40:17 +00:00
|
|
|
Intrinsic::ID CallBase::getIntrinsicID() const {
|
|
|
|
if (auto *F = getCalledFunction())
|
|
|
|
return F->getIntrinsicID();
|
|
|
|
return Intrinsic::not_intrinsic;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool CallBase::isReturnNonNull() const {
|
|
|
|
if (hasRetAttr(Attribute::NonNull))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (getDereferenceableBytes(AttributeList::ReturnIndex) > 0 &&
|
|
|
|
!NullPointerIsDefined(getCaller(),
|
|
|
|
getType()->getPointerAddressSpace()))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
[TI removal] Leverage the fact that TerminatorInst is gone to create
a normal base class that provides all common "call" functionality.
This merges two complex CRTP mixins for the common "call" logic and
common operand bundle logic into a single, normal base class of
`CallInst` and `InvokeInst`. Going forward, users can typically
`dyn_cast<CallBase>` and use the resulting API. No more need for the
`CallSite` wrapper. I'm planning to migrate current usage of the wrapper
to directly use the base class and then it can be removed, but those are
simpler and much more incremental steps. The big change is to introduce
this abstraction into the type system.
I've tried to do some basic simplifications of the APIs that I couldn't
really help but touch as part of this:
- I've tried to organize the attribute API and bundle API into groups to
make understanding the API of `CallBase` easier. Without this,
I wasn't able to navigate the API sanely for all of the ways I needed
to modify it.
- I've added what seem like more clear and consistent APIs for getting
at the called operand. These ended up being especially useful to
consolidate the *numerous* duplicated code paths trying to do this.
- I've largely reworked the organization and implementation of the APIs
for computing the argument operands as they needed to change to work
with the new subclass approach.
To minimize any cost associated with this abstraction, I've moved the
operand layout in memory to store the called operand last. This makes
its position relative to the end of the operand array the same,
regardless of the subclass. It should make it much cheaper to reference
from the `CallBase` abstraction, and this is likely one of the most
frequent things to query.
We do still pay one abstraction penalty here: we have to branch to
determine whether there are 0 or 2 extra operands when computing the end
of the argument operand sequence. However, that seems both rare and
should optimize well. I've implemented this in a way specifically
designed to allow it to optimize fairly well. If this shows up in
profiles, we can add overrides of the relevant methods to the subclasses
that bypass this penalty. It seems very unlikely that this will be an
issue as the code was *already* dealing with an ever present abstraction
of whether or not there are operand bundles, so this isn't the first
branch to go into the computation.
I've tried to remove as much of the obvious vestigial API surface of the
old CRTP implementation as I could, but I suspect there is further
cleanup that should now be possible, especially around the operand
bundle APIs. I'm leaving all of that for future work in this patch as
enough things are changing here as-is.
One thing that made this harder for me to reason about and debug was the
pervasive use of unsigned values in subtraction and other arithmetic
computations. I had to debug more than one unintentional wrap. I've
switched a few of these to use `int` which seems substantially simpler,
but I've held back from doing this more broadly to avoid creating
confusing divergence within a single class's API.
I also worked to remove all of the magic numbers used to index into
operands, putting them behind named constants or putting them into
a single method with a comment and strictly using the method elsewhere.
This was necessary to be able to re-layout the operands as discussed
above.
Thanks to Ben for reviewing this (somewhat large and awkward) patch!
Differential Revision: https://reviews.llvm.org/D54788
llvm-svn: 347452
2018-11-22 10:31:35 +00:00
|
|
|
Value *CallBase::getReturnedArgOperand() const {
|
|
|
|
unsigned Index;
|
|
|
|
|
|
|
|
if (Attrs.hasAttrSomewhere(Attribute::Returned, &Index) && Index)
|
|
|
|
return getArgOperand(Index - AttributeList::FirstArgIndex);
|
|
|
|
if (const Function *F = getCalledFunction())
|
|
|
|
if (F->getAttributes().hasAttrSomewhere(Attribute::Returned, &Index) &&
|
|
|
|
Index)
|
|
|
|
return getArgOperand(Index - AttributeList::FirstArgIndex);
|
|
|
|
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool CallBase::hasRetAttr(Attribute::AttrKind Kind) const {
|
|
|
|
if (Attrs.hasAttribute(AttributeList::ReturnIndex, Kind))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// Look at the callee, if available.
|
|
|
|
if (const Function *F = getCalledFunction())
|
|
|
|
return F->getAttributes().hasAttribute(AttributeList::ReturnIndex, Kind);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Determine whether the argument or parameter has the given attribute.
|
|
|
|
bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
|
|
|
|
assert(ArgNo < getNumArgOperands() && "Param index out of bounds!");
|
|
|
|
|
|
|
|
if (Attrs.hasParamAttribute(ArgNo, Kind))
|
|
|
|
return true;
|
|
|
|
if (const Function *F = getCalledFunction())
|
|
|
|
return F->getAttributes().hasParamAttribute(ArgNo, Kind);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const {
|
|
|
|
if (const Function *F = getCalledFunction())
|
2020-06-14 23:52:39 +02:00
|
|
|
return F->getAttributes().hasFnAttribute(Kind);
|
[TI removal] Leverage the fact that TerminatorInst is gone to create
a normal base class that provides all common "call" functionality.
This merges two complex CRTP mixins for the common "call" logic and
common operand bundle logic into a single, normal base class of
`CallInst` and `InvokeInst`. Going forward, users can typically
`dyn_cast<CallBase>` and use the resulting API. No more need for the
`CallSite` wrapper. I'm planning to migrate current usage of the wrapper
to directly use the base class and then it can be removed, but those are
simpler and much more incremental steps. The big change is to introduce
this abstraction into the type system.
I've tried to do some basic simplifications of the APIs that I couldn't
really help but touch as part of this:
- I've tried to organize the attribute API and bundle API into groups to
make understanding the API of `CallBase` easier. Without this,
I wasn't able to navigate the API sanely for all of the ways I needed
to modify it.
- I've added what seem like more clear and consistent APIs for getting
at the called operand. These ended up being especially useful to
consolidate the *numerous* duplicated code paths trying to do this.
- I've largely reworked the organization and implementation of the APIs
for computing the argument operands as they needed to change to work
with the new subclass approach.
To minimize any cost associated with this abstraction, I've moved the
operand layout in memory to store the called operand last. This makes
its position relative to the end of the operand array the same,
regardless of the subclass. It should make it much cheaper to reference
from the `CallBase` abstraction, and this is likely one of the most
frequent things to query.
We do still pay one abstraction penalty here: we have to branch to
determine whether there are 0 or 2 extra operands when computing the end
of the argument operand sequence. However, that seems both rare and
should optimize well. I've implemented this in a way specifically
designed to allow it to optimize fairly well. If this shows up in
profiles, we can add overrides of the relevant methods to the subclasses
that bypass this penalty. It seems very unlikely that this will be an
issue as the code was *already* dealing with an ever present abstraction
of whether or not there are operand bundles, so this isn't the first
branch to go into the computation.
I've tried to remove as much of the obvious vestigial API surface of the
old CRTP implementation as I could, but I suspect there is further
cleanup that should now be possible, especially around the operand
bundle APIs. I'm leaving all of that for future work in this patch as
enough things are changing here as-is.
One thing that made this harder for me to reason about and debug was the
pervasive use of unsigned values in subtraction and other arithmetic
computations. I had to debug more than one unintentional wrap. I've
switched a few of these to use `int` which seems substantially simpler,
but I've held back from doing this more broadly to avoid creating
confusing divergence within a single class's API.
I also worked to remove all of the magic numbers used to index into
operands, putting them behind named constants or putting them into
a single method with a comment and strictly using the method elsewhere.
This was necessary to be able to re-layout the operands as discussed
above.
Thanks to Ben for reviewing this (somewhat large and awkward) patch!
Differential Revision: https://reviews.llvm.org/D54788
llvm-svn: 347452
2018-11-22 10:31:35 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const {
|
|
|
|
if (const Function *F = getCalledFunction())
|
2020-06-14 23:52:39 +02:00
|
|
|
return F->getAttributes().hasFnAttribute(Kind);
|
[TI removal] Leverage the fact that TerminatorInst is gone to create
a normal base class that provides all common "call" functionality.
This merges two complex CRTP mixins for the common "call" logic and
common operand bundle logic into a single, normal base class of
`CallInst` and `InvokeInst`. Going forward, users can typically
`dyn_cast<CallBase>` and use the resulting API. No more need for the
`CallSite` wrapper. I'm planning to migrate current usage of the wrapper
to directly use the base class and then it can be removed, but those are
simpler and much more incremental steps. The big change is to introduce
this abstraction into the type system.
I've tried to do some basic simplifications of the APIs that I couldn't
really help but touch as part of this:
- I've tried to organize the attribute API and bundle API into groups to
make understanding the API of `CallBase` easier. Without this,
I wasn't able to navigate the API sanely for all of the ways I needed
to modify it.
- I've added what seem like more clear and consistent APIs for getting
at the called operand. These ended up being especially useful to
consolidate the *numerous* duplicated code paths trying to do this.
- I've largely reworked the organization and implementation of the APIs
for computing the argument operands as they needed to change to work
with the new subclass approach.
To minimize any cost associated with this abstraction, I've moved the
operand layout in memory to store the called operand last. This makes
its position relative to the end of the operand array the same,
regardless of the subclass. It should make it much cheaper to reference
from the `CallBase` abstraction, and this is likely one of the most
frequent things to query.
We do still pay one abstraction penalty here: we have to branch to
determine whether there are 0 or 2 extra operands when computing the end
of the argument operand sequence. However, that seems both rare and
should optimize well. I've implemented this in a way specifically
designed to allow it to optimize fairly well. If this shows up in
profiles, we can add overrides of the relevant methods to the subclasses
that bypass this penalty. It seems very unlikely that this will be an
issue as the code was *already* dealing with an ever present abstraction
of whether or not there are operand bundles, so this isn't the first
branch to go into the computation.
I've tried to remove as much of the obvious vestigial API surface of the
old CRTP implementation as I could, but I suspect there is further
cleanup that should now be possible, especially around the operand
bundle APIs. I'm leaving all of that for future work in this patch as
enough things are changing here as-is.
One thing that made this harder for me to reason about and debug was the
pervasive use of unsigned values in subtraction and other arithmetic
computations. I had to debug more than one unintentional wrap. I've
switched a few of these to use `int` which seems substantially simpler,
but I've held back from doing this more broadly to avoid creating
confusing divergence within a single class's API.
I also worked to remove all of the magic numbers used to index into
operands, putting them behind named constants or putting them into
a single method with a comment and strictly using the method elsewhere.
This was necessary to be able to re-layout the operands as discussed
above.
Thanks to Ben for reviewing this (somewhat large and awkward) patch!
Differential Revision: https://reviews.llvm.org/D54788
llvm-svn: 347452
2018-11-22 10:31:35 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-01-24 17:24:10 -08:00
|
|
|
void CallBase::getOperandBundlesAsDefs(
|
|
|
|
SmallVectorImpl<OperandBundleDef> &Defs) const {
|
|
|
|
for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
|
|
|
|
Defs.emplace_back(getOperandBundleAt(i));
|
|
|
|
}
|
|
|
|
|
[TI removal] Leverage the fact that TerminatorInst is gone to create
a normal base class that provides all common "call" functionality.
This merges two complex CRTP mixins for the common "call" logic and
common operand bundle logic into a single, normal base class of
`CallInst` and `InvokeInst`. Going forward, users can typically
`dyn_cast<CallBase>` and use the resulting API. No more need for the
`CallSite` wrapper. I'm planning to migrate current usage of the wrapper
to directly use the base class and then it can be removed, but those are
simpler and much more incremental steps. The big change is to introduce
this abstraction into the type system.
I've tried to do some basic simplifications of the APIs that I couldn't
really help but touch as part of this:
- I've tried to organize the attribute API and bundle API into groups to
make understanding the API of `CallBase` easier. Without this,
I wasn't able to navigate the API sanely for all of the ways I needed
to modify it.
- I've added what seem like more clear and consistent APIs for getting
at the called operand. These ended up being especially useful to
consolidate the *numerous* duplicated code paths trying to do this.
- I've largely reworked the organization and implementation of the APIs
for computing the argument operands as they needed to change to work
with the new subclass approach.
To minimize any cost associated with this abstraction, I've moved the
operand layout in memory to store the called operand last. This makes
its position relative to the end of the operand array the same,
regardless of the subclass. It should make it much cheaper to reference
from the `CallBase` abstraction, and this is likely one of the most
frequent things to query.
We do still pay one abstraction penalty here: we have to branch to
determine whether there are 0 or 2 extra operands when computing the end
of the argument operand sequence. However, that seems both rare and
should optimize well. I've implemented this in a way specifically
designed to allow it to optimize fairly well. If this shows up in
profiles, we can add overrides of the relevant methods to the subclasses
that bypass this penalty. It seems very unlikely that this will be an
issue as the code was *already* dealing with an ever present abstraction
of whether or not there are operand bundles, so this isn't the first
branch to go into the computation.
I've tried to remove as much of the obvious vestigial API surface of the
old CRTP implementation as I could, but I suspect there is further
cleanup that should now be possible, especially around the operand
bundle APIs. I'm leaving all of that for future work in this patch as
enough things are changing here as-is.
One thing that made this harder for me to reason about and debug was the
pervasive use of unsigned values in subtraction and other arithmetic
computations. I had to debug more than one unintentional wrap. I've
switched a few of these to use `int` which seems substantially simpler,
but I've held back from doing this more broadly to avoid creating
confusing divergence within a single class's API.
I also worked to remove all of the magic numbers used to index into
operands, putting them behind named constants or putting them into
a single method with a comment and strictly using the method elsewhere.
This was necessary to be able to re-layout the operands as discussed
above.
Thanks to Ben for reviewing this (somewhat large and awkward) patch!
Differential Revision: https://reviews.llvm.org/D54788
llvm-svn: 347452
2018-11-22 10:31:35 +00:00
|
|
|
CallBase::op_iterator
|
|
|
|
CallBase::populateBundleOperandInfos(ArrayRef<OperandBundleDef> Bundles,
|
|
|
|
const unsigned BeginIndex) {
|
|
|
|
auto It = op_begin() + BeginIndex;
|
|
|
|
for (auto &B : Bundles)
|
|
|
|
It = std::copy(B.input_begin(), B.input_end(), It);
|
|
|
|
|
|
|
|
auto *ContextImpl = getContext().pImpl;
|
|
|
|
auto BI = Bundles.begin();
|
|
|
|
unsigned CurrentIndex = BeginIndex;
|
|
|
|
|
|
|
|
for (auto &BOI : bundle_op_infos()) {
|
|
|
|
assert(BI != Bundles.end() && "Incorrect allocation?");
|
|
|
|
|
|
|
|
BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
|
|
|
|
BOI.Begin = CurrentIndex;
|
|
|
|
BOI.End = CurrentIndex + BI->input_size();
|
|
|
|
CurrentIndex = BOI.End;
|
|
|
|
BI++;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(BI == Bundles.end() && "Incorrect allocation?");
|
|
|
|
|
|
|
|
return It;
|
|
|
|
}
|
|
|
|
|
2020-03-07 18:00:05 +01:00
|
|
|
CallBase::BundleOpInfo &CallBase::getBundleOpInfoForOperand(unsigned OpIdx) {
|
|
|
|
/// When there isn't many bundles, we do a simple linear search.
|
|
|
|
/// Else fallback to a binary-search that use the fact that bundles usually
|
|
|
|
/// have similar number of argument to get faster convergence.
|
|
|
|
if (bundle_op_info_end() - bundle_op_info_begin() < 8) {
|
|
|
|
for (auto &BOI : bundle_op_infos())
|
|
|
|
if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
|
|
|
|
return BOI;
|
|
|
|
|
|
|
|
llvm_unreachable("Did not find operand bundle for operand!");
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(OpIdx >= arg_size() && "the Idx is not in the operand bundles");
|
|
|
|
assert(bundle_op_info_end() - bundle_op_info_begin() > 0 &&
|
|
|
|
OpIdx < std::prev(bundle_op_info_end())->End &&
|
|
|
|
"The Idx isn't in the operand bundle");
|
|
|
|
|
|
|
|
/// We need a decimal number below and to prevent using floating point numbers
|
|
|
|
/// we use an intergal value multiplied by this constant.
|
|
|
|
constexpr unsigned NumberScaling = 1024;
|
|
|
|
|
|
|
|
bundle_op_iterator Begin = bundle_op_info_begin();
|
|
|
|
bundle_op_iterator End = bundle_op_info_end();
|
|
|
|
bundle_op_iterator Current;
|
|
|
|
|
|
|
|
while (Begin != End) {
|
|
|
|
unsigned ScaledOperandPerBundle =
|
|
|
|
NumberScaling * (std::prev(End)->End - Begin->Begin) / (End - Begin);
|
|
|
|
Current = Begin + (((OpIdx - Begin->Begin) * NumberScaling) /
|
|
|
|
ScaledOperandPerBundle);
|
|
|
|
if (Current >= End)
|
|
|
|
Current = std::prev(End);
|
|
|
|
assert(Current < End && Current >= Begin &&
|
|
|
|
"the operand bundle doesn't cover every value in the range");
|
|
|
|
if (OpIdx >= Current->Begin && OpIdx < Current->End)
|
|
|
|
break;
|
|
|
|
if (OpIdx >= Current->End)
|
|
|
|
Begin = Current + 1;
|
|
|
|
else
|
|
|
|
End = Current;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(OpIdx >= Current->Begin && OpIdx < Current->End &&
|
|
|
|
"the operand bundle doesn't cover every value in the range");
|
|
|
|
return *Current;
|
|
|
|
}
|
|
|
|
|
2004-07-29 12:33:25 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// CallInst Implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2015-04-23 21:36:23 +00:00
|
|
|
void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
|
2015-09-24 19:14:18 +00:00
|
|
|
ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
|
2015-04-23 21:36:23 +00:00
|
|
|
this->FTy = FTy;
|
2015-09-24 19:14:18 +00:00
|
|
|
assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 &&
|
|
|
|
"NumOperands not set up?");
|
[TI removal] Leverage the fact that TerminatorInst is gone to create
a normal base class that provides all common "call" functionality.
This merges two complex CRTP mixins for the common "call" logic and
common operand bundle logic into a single, normal base class of
`CallInst` and `InvokeInst`. Going forward, users can typically
`dyn_cast<CallBase>` and use the resulting API. No more need for the
`CallSite` wrapper. I'm planning to migrate current usage of the wrapper
to directly use the base class and then it can be removed, but those are
simpler and much more incremental steps. The big change is to introduce
this abstraction into the type system.
I've tried to do some basic simplifications of the APIs that I couldn't
really help but touch as part of this:
- I've tried to organize the attribute API and bundle API into groups to
make understanding the API of `CallBase` easier. Without this,
I wasn't able to navigate the API sanely for all of the ways I needed
to modify it.
- I've added what seem like more clear and consistent APIs for getting
at the called operand. These ended up being especially useful to
consolidate the *numerous* duplicated code paths trying to do this.
- I've largely reworked the organization and implementation of the APIs
for computing the argument operands as they needed to change to work
with the new subclass approach.
To minimize any cost associated with this abstraction, I've moved the
operand layout in memory to store the called operand last. This makes
its position relative to the end of the operand array the same,
regardless of the subclass. It should make it much cheaper to reference
from the `CallBase` abstraction, and this is likely one of the most
frequent things to query.
We do still pay one abstraction penalty here: we have to branch to
determine whether there are 0 or 2 extra operands when computing the end
of the argument operand sequence. However, that seems both rare and
should optimize well. I've implemented this in a way specifically
designed to allow it to optimize fairly well. If this shows up in
profiles, we can add overrides of the relevant methods to the subclasses
that bypass this penalty. It seems very unlikely that this will be an
issue as the code was *already* dealing with an ever present abstraction
of whether or not there are operand bundles, so this isn't the first
branch to go into the computation.
I've tried to remove as much of the obvious vestigial API surface of the
old CRTP implementation as I could, but I suspect there is further
cleanup that should now be possible, especially around the operand
bundle APIs. I'm leaving all of that for future work in this patch as
enough things are changing here as-is.
One thing that made this harder for me to reason about and debug was the
pervasive use of unsigned values in subtraction and other arithmetic
computations. I had to debug more than one unintentional wrap. I've
switched a few of these to use `int` which seems substantially simpler,
but I've held back from doing this more broadly to avoid creating
confusing divergence within a single class's API.
I also worked to remove all of the magic numbers used to index into
operands, putting them behind named constants or putting them into
a single method with a comment and strictly using the method elsewhere.
This was necessary to be able to re-layout the operands as discussed
above.
Thanks to Ben for reviewing this (somewhat large and awkward) patch!
Differential Revision: https://reviews.llvm.org/D54788
llvm-svn: 347452
2018-11-22 10:31:35 +00:00
|
|
|
setCalledOperand(Func);
|
2004-07-29 12:33:25 +00:00
|
|
|
|
2011-07-15 08:37:34 +00:00
|
|
|
#ifndef NDEBUG
|
|
|
|
assert((Args.size() == FTy->getNumParams() ||
|
|
|
|
(FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
|
2006-05-03 00:48:22 +00:00
|
|
|
"Calling a function with bad signature!");
|
2011-07-15 08:37:34 +00:00
|
|
|
|
|
|
|
for (unsigned i = 0; i != Args.size(); ++i)
|
2018-07-30 19:41:25 +00:00
|
|
|
assert((i >= FTy->getNumParams() ||
|
2011-07-15 08:37:34 +00:00
|
|
|
FTy->getParamType(i) == Args[i]->getType()) &&
|
2006-05-03 00:48:22 +00:00
|
|
|
"Calling a function with a bad signature!");
|
2011-07-15 08:37:34 +00:00
|
|
|
#endif
|
2004-07-29 12:33:25 +00:00
|
|
|
|
2018-11-17 01:44:25 +00:00
|
|
|
llvm::copy(Args, op_begin());
|
2015-09-24 19:14:18 +00:00
|
|
|
|
|
|
|
auto It = populateBundleOperandInfos(Bundles, Args.size());
|
|
|
|
(void)It;
|
|
|
|
assert(It + 1 == op_end() && "Should add up!");
|
|
|
|
|
2011-07-15 08:37:34 +00:00
|
|
|
setName(NameStr);
|
2004-07-29 12:33:25 +00:00
|
|
|
}
|
|
|
|
|
2019-01-14 21:37:42 +00:00
|
|
|
void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {
|
|
|
|
this->FTy = FTy;
|
2015-06-12 17:48:10 +00:00
|
|
|
assert(getNumOperands() == 1 && "NumOperands not set up?");
|
[TI removal] Leverage the fact that TerminatorInst is gone to create
a normal base class that provides all common "call" functionality.
This merges two complex CRTP mixins for the common "call" logic and
common operand bundle logic into a single, normal base class of
`CallInst` and `InvokeInst`. Going forward, users can typically
`dyn_cast<CallBase>` and use the resulting API. No more need for the
`CallSite` wrapper. I'm planning to migrate current usage of the wrapper
to directly use the base class and then it can be removed, but those are
simpler and much more incremental steps. The big change is to introduce
this abstraction into the type system.
I've tried to do some basic simplifications of the APIs that I couldn't
really help but touch as part of this:
- I've tried to organize the attribute API and bundle API into groups to
make understanding the API of `CallBase` easier. Without this,
I wasn't able to navigate the API sanely for all of the ways I needed
to modify it.
- I've added what seem like more clear and consistent APIs for getting
at the called operand. These ended up being especially useful to
consolidate the *numerous* duplicated code paths trying to do this.
- I've largely reworked the organization and implementation of the APIs
for computing the argument operands as they needed to change to work
with the new subclass approach.
To minimize any cost associated with this abstraction, I've moved the
operand layout in memory to store the called operand last. This makes
its position relative to the end of the operand array the same,
regardless of the subclass. It should make it much cheaper to reference
from the `CallBase` abstraction, and this is likely one of the most
frequent things to query.
We do still pay one abstraction penalty here: we have to branch to
determine whether there are 0 or 2 extra operands when computing the end
of the argument operand sequence. However, that seems both rare and
should optimize well. I've implemented this in a way specifically
designed to allow it to optimize fairly well. If this shows up in
profiles, we can add overrides of the relevant methods to the subclasses
that bypass this penalty. It seems very unlikely that this will be an
issue as the code was *already* dealing with an ever present abstraction
of whether or not there are operand bundles, so this isn't the first
branch to go into the computation.
I've tried to remove as much of the obvious vestigial API surface of the
old CRTP implementation as I could, but I suspect there is further
cleanup that should now be possible, especially around the operand
bundle APIs. I'm leaving all of that for future work in this patch as
enough things are changing here as-is.
One thing that made this harder for me to reason about and debug was the
pervasive use of unsigned values in subtraction and other arithmetic
computations. I had to debug more than one unintentional wrap. I've
switched a few of these to use `int` which seems substantially simpler,
but I've held back from doing this more broadly to avoid creating
confusing divergence within a single class's API.
I also worked to remove all of the magic numbers used to index into
operands, putting them behind named constants or putting them into
a single method with a comment and strictly using the method elsewhere.
This was necessary to be able to re-layout the operands as discussed
above.
Thanks to Ben for reviewing this (somewhat large and awkward) patch!
Differential Revision: https://reviews.llvm.org/D54788
llvm-svn: 347452
2018-11-22 10:31:35 +00:00
|
|
|
setCalledOperand(Func);
|
2005-04-21 23:48:37 +00:00
|
|
|
|
2007-02-01 04:59:37 +00:00
|
|
|
assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
|
2004-07-29 12:33:25 +00:00
|
|
|
|
2011-07-15 08:37:34 +00:00
|
|
|
setName(NameStr);
|
2004-07-29 12:33:25 +00:00
|
|
|
}
|
|
|
|
|
2019-01-14 21:37:42 +00:00
|
|
|
CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
|
|
|
|
Instruction *InsertBefore)
|
|
|
|
: CallBase(Ty->getReturnType(), Instruction::Call,
|
|
|
|
OperandTraits<CallBase>::op_end(this) - 1, 1, InsertBefore) {
|
|
|
|
init(Ty, Func, Name);
|
2004-07-29 12:33:25 +00:00
|
|
|
}
|
|
|
|
|
2019-01-14 21:37:42 +00:00
|
|
|
CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
|
|
|
|
BasicBlock *InsertAtEnd)
|
|
|
|
: CallBase(Ty->getReturnType(), Instruction::Call,
|
|
|
|
OperandTraits<CallBase>::op_end(this) - 1, 1, InsertAtEnd) {
|
|
|
|
init(Ty, Func, Name);
|
2004-07-29 12:33:25 +00:00
|
|
|
}
|
|
|
|
|
2005-04-21 23:48:37 +00:00
|
|
|
CallInst::CallInst(const CallInst &CI)
|
[TI removal] Leverage the fact that TerminatorInst is gone to create
a normal base class that provides all common "call" functionality.
This merges two complex CRTP mixins for the common "call" logic and
common operand bundle logic into a single, normal base class of
`CallInst` and `InvokeInst`. Going forward, users can typically
`dyn_cast<CallBase>` and use the resulting API. No more need for the
`CallSite` wrapper. I'm planning to migrate current usage of the wrapper
to directly use the base class and then it can be removed, but those are
simpler and much more incremental steps. The big change is to introduce
this abstraction into the type system.
I've tried to do some basic simplifications of the APIs that I couldn't
really help but touch as part of this:
- I've tried to organize the attribute API and bundle API into groups to
make understanding the API of `CallBase` easier. Without this,
I wasn't able to navigate the API sanely for all of the ways I needed
to modify it.
- I've added what seem like more clear and consistent APIs for getting
at the called operand. These ended up being especially useful to
consolidate the *numerous* duplicated code paths trying to do this.
- I've largely reworked the organization and implementation of the APIs
for computing the argument operands as they needed to change to work
with the new subclass approach.
To minimize any cost associated with this abstraction, I've moved the
operand layout in memory to store the called operand last. This makes
its position relative to the end of the operand array the same,
regardless of the subclass. It should make it much cheaper to reference
from the `CallBase` abstraction, and this is likely one of the most
frequent things to query.
We do still pay one abstraction penalty here: we have to branch to
determine whether there are 0 or 2 extra operands when computing the end
of the argument operand sequence. However, that seems both rare and
should optimize well. I've implemented this in a way specifically
designed to allow it to optimize fairly well. If this shows up in
profiles, we can add overrides of the relevant methods to the subclasses
that bypass this penalty. It seems very unlikely that this will be an
issue as the code was *already* dealing with an ever present abstraction
of whether or not there are operand bundles, so this isn't the first
branch to go into the computation.
I've tried to remove as much of the obvious vestigial API surface of the
old CRTP implementation as I could, but I suspect there is further
cleanup that should now be possible, especially around the operand
bundle APIs. I'm leaving all of that for future work in this patch as
enough things are changing here as-is.
One thing that made this harder for me to reason about and debug was the
pervasive use of unsigned values in subtraction and other arithmetic
computations. I had to debug more than one unintentional wrap. I've
switched a few of these to use `int` which seems substantially simpler,
but I've held back from doing this more broadly to avoid creating
confusing divergence within a single class's API.
I also worked to remove all of the magic numbers used to index into
operands, putting them behind named constants or putting them into
a single method with a comment and strictly using the method elsewhere.
This was necessary to be able to re-layout the operands as discussed
above.
Thanks to Ben for reviewing this (somewhat large and awkward) patch!
Differential Revision: https://reviews.llvm.org/D54788
llvm-svn: 347452
2018-11-22 10:31:35 +00:00
|
|
|
: CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call,
|
|
|
|
OperandTraits<CallBase>::op_end(this) - CI.getNumOperands(),
|
|
|
|
CI.getNumOperands()) {
|
2014-05-06 20:08:20 +00:00
|
|
|
setTailCallKind(CI.getTailCallKind());
|
2009-12-29 02:14:09 +00:00
|
|
|
setCallingConv(CI.getCallingConv());
|
2015-09-24 19:14:18 +00:00
|
|
|
|
2011-07-15 08:37:34 +00:00
|
|
|
std::copy(CI.op_begin(), CI.op_end(), op_begin());
|
2015-09-24 19:14:18 +00:00
|
|
|
std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(),
|
|
|
|
bundle_op_info_begin());
|
2009-08-25 22:11:20 +00:00
|
|
|
SubclassOptionalData = CI.SubclassOptionalData;
|
2004-07-29 12:33:25 +00:00
|
|
|
}
|
|
|
|
|
2015-11-18 06:23:38 +00:00
|
|
|
CallInst *CallInst::Create(CallInst *CI, ArrayRef<OperandBundleDef> OpB,
|
|
|
|
Instruction *InsertPt) {
|
2015-12-10 06:39:02 +00:00
|
|
|
std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
|
2015-11-18 06:23:38 +00:00
|
|
|
|
2020-04-27 20:15:59 -07:00
|
|
|
auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledOperand(),
|
2019-02-01 20:43:25 +00:00
|
|
|
Args, OpB, CI->getName(), InsertPt);
|
2015-11-18 06:23:38 +00:00
|
|
|
NewCI->setTailCallKind(CI->getTailCallKind());
|
|
|
|
NewCI->setCallingConv(CI->getCallingConv());
|
|
|
|
NewCI->SubclassOptionalData = CI->SubclassOptionalData;
|
2015-12-09 01:01:28 +00:00
|
|
|
NewCI->setAttributes(CI->getAttributes());
|
2016-01-14 06:21:42 +00:00
|
|
|
NewCI->setDebugLoc(CI->getDebugLoc());
|
2015-11-18 06:23:38 +00:00
|
|
|
return NewCI;
|
|
|
|
}
|
|
|
|
|
2020-08-14 12:00:09 +07:00
|
|
|
CallInst *CallInst::CreateWithReplacedBundle(CallInst *CI, OperandBundleDef OpB,
|
|
|
|
Instruction *InsertPt) {
|
|
|
|
SmallVector<OperandBundleDef, 2> OpDefs;
|
|
|
|
for (unsigned i = 0, e = CI->getNumOperandBundles(); i < e; ++i) {
|
|
|
|
auto ChildOB = CI->getOperandBundleAt(i);
|
|
|
|
if (ChildOB.getTagName() != OpB.getTag())
|
|
|
|
OpDefs.emplace_back(ChildOB);
|
|
|
|
}
|
|
|
|
OpDefs.emplace_back(OpB);
|
|
|
|
return CallInst::Create(CI, OpDefs, InsertPt);
|
|
|
|
}
|
|
|
|
|
2019-04-22 17:04:51 +00:00
|
|
|
// Update profile weight for call instruction by scaling it using the ratio
|
|
|
|
// of S/T. The meaning of "branch_weights" meta data for call instruction is
|
|
|
|
// transfered to represent call count.
|
|
|
|
void CallInst::updateProfWeight(uint64_t S, uint64_t T) {
|
|
|
|
auto *ProfileData = getMetadata(LLVMContext::MD_prof);
|
|
|
|
if (ProfileData == nullptr)
|
|
|
|
return;
|
|
|
|
|
|
|
|
auto *ProfDataName = dyn_cast<MDString>(ProfileData->getOperand(0));
|
|
|
|
if (!ProfDataName || (!ProfDataName->getString().equals("branch_weights") &&
|
|
|
|
!ProfDataName->getString().equals("VP")))
|
|
|
|
return;
|
|
|
|
|
2019-05-08 03:57:25 +00:00
|
|
|
if (T == 0) {
|
|
|
|
LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
|
|
|
|
"div by 0. Ignoring. Likely the function "
|
|
|
|
<< getParent()->getParent()->getName()
|
|
|
|
<< " has 0 entry count, and contains call instructions "
|
|
|
|
"with non-zero prof info.");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-04-22 17:04:51 +00:00
|
|
|
MDBuilder MDB(getContext());
|
|
|
|
SmallVector<Metadata *, 3> Vals;
|
|
|
|
Vals.push_back(ProfileData->getOperand(0));
|
|
|
|
APInt APS(128, S), APT(128, T);
|
|
|
|
if (ProfDataName->getString().equals("branch_weights") &&
|
|
|
|
ProfileData->getNumOperands() > 0) {
|
|
|
|
// Using APInt::div may be expensive, but most cases should fit 64 bits.
|
|
|
|
APInt Val(128, mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(1))
|
|
|
|
->getValue()
|
|
|
|
.getZExtValue());
|
|
|
|
Val *= APS;
|
|
|
|
Vals.push_back(MDB.createConstant(ConstantInt::get(
|
|
|
|
Type::getInt64Ty(getContext()), Val.udiv(APT).getLimitedValue())));
|
|
|
|
} else if (ProfDataName->getString().equals("VP"))
|
|
|
|
for (unsigned i = 1; i < ProfileData->getNumOperands(); i += 2) {
|
|
|
|
// The first value is the key of the value profile, which will not change.
|
|
|
|
Vals.push_back(ProfileData->getOperand(i));
|
|
|
|
// Using APInt::div may be expensive, but most cases should fit 64 bits.
|
|
|
|
APInt Val(128,
|
|
|
|
mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(i + 1))
|
|
|
|
->getValue()
|
|
|
|
.getZExtValue());
|
|
|
|
Val *= APS;
|
|
|
|
Vals.push_back(MDB.createConstant(
|
|
|
|
ConstantInt::get(Type::getInt64Ty(getContext()),
|
|
|
|
Val.udiv(APT).getLimitedValue())));
|
|
|
|
}
|
|
|
|
setMetadata(LLVMContext::MD_prof, MDNode::get(getContext(), Vals));
|
|
|
|
}
|
|
|
|
|
2009-09-10 04:36:43 +00:00
|
|
|
/// IsConstantOne - Return true only if val is constant int 1
|
|
|
|
static bool IsConstantOne(Value *val) {
|
2014-11-13 22:55:19 +00:00
|
|
|
assert(val && "IsConstantOne does not work with nullptr val");
|
2014-09-15 17:56:51 +00:00
|
|
|
const ConstantInt *CVal = dyn_cast<ConstantInt>(val);
|
|
|
|
return CVal && CVal->isOne();
|
2009-09-10 04:36:43 +00:00
|
|
|
}
|
|
|
|
|
2009-10-17 23:52:26 +00:00
|
|
|
static Instruction *createMalloc(Instruction *InsertBefore,
|
2011-07-18 04:54:35 +00:00
|
|
|
BasicBlock *InsertAtEnd, Type *IntPtrTy,
|
2016-04-29 08:07:22 +00:00
|
|
|
Type *AllocTy, Value *AllocSize,
|
|
|
|
Value *ArraySize,
|
|
|
|
ArrayRef<OperandBundleDef> OpB,
|
|
|
|
Function *MallocF, const Twine &Name) {
|
2009-09-10 11:31:39 +00:00
|
|
|
assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) &&
|
2009-09-18 19:20:02 +00:00
|
|
|
"createMalloc needs either InsertBefore or InsertAtEnd");
|
|
|
|
|
2018-07-30 19:41:25 +00:00
|
|
|
// malloc(type) becomes:
|
2009-09-18 19:20:02 +00:00
|
|
|
// bitcast (i8* malloc(typeSize)) to type*
|
|
|
|
// malloc(type, arraySize) becomes:
|
2016-02-03 21:34:39 +00:00
|
|
|
// bitcast (i8* malloc(typeSize*arraySize)) to type*
|
2009-11-07 00:16:28 +00:00
|
|
|
if (!ArraySize)
|
|
|
|
ArraySize = ConstantInt::get(IntPtrTy, 1);
|
|
|
|
else if (ArraySize->getType() != IntPtrTy) {
|
|
|
|
if (InsertBefore)
|
2009-11-07 00:36:50 +00:00
|
|
|
ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false,
|
|
|
|
"", InsertBefore);
|
2009-11-07 00:16:28 +00:00
|
|
|
else
|
2009-11-07 00:36:50 +00:00
|
|
|
ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false,
|
|
|
|
"", InsertAtEnd);
|
2009-11-07 00:16:28 +00:00
|
|
|
}
|
2009-09-10 04:36:43 +00:00
|
|
|
|
2009-09-10 11:31:39 +00:00
|
|
|
if (!IsConstantOne(ArraySize)) {
|
2009-09-10 04:36:43 +00:00
|
|
|
if (IsConstantOne(AllocSize)) {
|
|
|
|
AllocSize = ArraySize; // Operand * 1 = Operand
|
|
|
|
} else if (Constant *CO = dyn_cast<Constant>(ArraySize)) {
|
|
|
|
Constant *Scale = ConstantExpr::getIntegerCast(CO, IntPtrTy,
|
|
|
|
false /*ZExt*/);
|
|
|
|
// Malloc arg is constant product of type size and array size
|
|
|
|
AllocSize = ConstantExpr::getMul(Scale, cast<Constant>(AllocSize));
|
|
|
|
} else {
|
|
|
|
// Multiply type size by the array size...
|
|
|
|
if (InsertBefore)
|
2009-09-18 19:20:02 +00:00
|
|
|
AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize,
|
|
|
|
"mallocsize", InsertBefore);
|
2009-09-10 04:36:43 +00:00
|
|
|
else
|
2009-09-18 19:20:02 +00:00
|
|
|
AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize,
|
|
|
|
"mallocsize", InsertAtEnd);
|
2009-09-10 04:36:43 +00:00
|
|
|
}
|
2009-09-10 11:31:39 +00:00
|
|
|
}
|
2009-09-10 04:36:43 +00:00
|
|
|
|
2009-09-18 19:20:02 +00:00
|
|
|
assert(AllocSize->getType() == IntPtrTy && "malloc arg is wrong size");
|
2009-09-10 04:36:43 +00:00
|
|
|
// Create the call to Malloc.
|
2016-02-03 21:34:39 +00:00
|
|
|
BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd;
|
|
|
|
Module *M = BB->getParent()->getParent();
|
Land the long talked about "type system rewrite" patch. This
patch brings numerous advantages to LLVM. One way to look at it
is through diffstat:
109 files changed, 3005 insertions(+), 5906 deletions(-)
Removing almost 3K lines of code is a good thing. Other advantages
include:
1. Value::getType() is a simple load that can be CSE'd, not a mutating
union-find operation.
2. Types a uniqued and never move once created, defining away PATypeHolder.
3. Structs can be "named" now, and their name is part of the identity that
uniques them. This means that the compiler doesn't merge them structurally
which makes the IR much less confusing.
4. Now that there is no way to get a cycle in a type graph without a named
struct type, "upreferences" go away.
5. Type refinement is completely gone, which should make LTO much MUCH faster
in some common cases with C++ code.
6. Types are now generally immutable, so we can use "Type *" instead
"const Type *" everywhere.
Downsides of this patch are that it removes some functions from the C API,
so people using those will have to upgrade to (not yet added) new API.
"LLVM 3.0" is the right time to do this.
There are still some cleanups pending after this, this patch is large enough
as-is.
llvm-svn: 134829
2011-07-09 17:41:24 +00:00
|
|
|
Type *BPTy = Type::getInt8PtrTy(BB->getContext());
|
[opaque pointer types] Add a FunctionCallee wrapper type, and use it.
Recommit r352791 after tweaking DerivedTypes.h slightly, so that gcc
doesn't choke on it, hopefully.
Original Message:
The FunctionCallee type is effectively a {FunctionType*,Value*} pair,
and is a useful convenience to enable code to continue passing the
result of getOrInsertFunction() through to EmitCall, even once pointer
types lose their pointee-type.
Then:
- update the CallInst/InvokeInst instruction creation functions to
take a Callee,
- modify getOrInsertFunction to return FunctionCallee, and
- update all callers appropriately.
One area of particular note is the change to the sanitizer
code. Previously, they had been casting the result of
`getOrInsertFunction` to a `Function*` via
`checkSanitizerInterfaceFunction`, and storing that. That would report
an error if someone had already inserted a function declaraction with
a mismatching signature.
However, in general, LLVM allows for such mismatches, as
`getOrInsertFunction` will automatically insert a bitcast if
needed. As part of this cleanup, cause the sanitizer code to do the
same. (It will call its functions using the expected signature,
however they may have been declared.)
Finally, in a small number of locations, callers of
`getOrInsertFunction` actually were expecting/requiring that a brand
new function was being created. In such cases, I've switched them to
Function::Create instead.
Differential Revision: https://reviews.llvm.org/D57315
llvm-svn: 352827
2019-02-01 02:28:03 +00:00
|
|
|
FunctionCallee MallocFunc = MallocF;
|
2009-11-10 19:53:28 +00:00
|
|
|
if (!MallocFunc)
|
2009-10-17 00:00:19 +00:00
|
|
|
// prototype malloc as "void *malloc(size_t)"
|
2017-04-11 15:01:18 +00:00
|
|
|
MallocFunc = M->getOrInsertFunction("malloc", BPTy, IntPtrTy);
|
2011-07-18 04:54:35 +00:00
|
|
|
PointerType *AllocPtrType = PointerType::getUnqual(AllocTy);
|
2014-04-09 06:08:46 +00:00
|
|
|
CallInst *MCall = nullptr;
|
|
|
|
Instruction *Result = nullptr;
|
2009-09-18 19:20:02 +00:00
|
|
|
if (InsertBefore) {
|
2016-04-29 08:07:22 +00:00
|
|
|
MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall",
|
|
|
|
InsertBefore);
|
2009-10-17 00:00:19 +00:00
|
|
|
Result = MCall;
|
|
|
|
if (Result->getType() != AllocPtrType)
|
|
|
|
// Create a cast instruction to convert to the right type...
|
2009-11-07 00:16:28 +00:00
|
|
|
Result = new BitCastInst(MCall, AllocPtrType, Name, InsertBefore);
|
2009-09-18 19:20:02 +00:00
|
|
|
} else {
|
2016-04-29 08:07:22 +00:00
|
|
|
MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall");
|
2009-10-17 00:00:19 +00:00
|
|
|
Result = MCall;
|
|
|
|
if (Result->getType() != AllocPtrType) {
|
|
|
|
InsertAtEnd->getInstList().push_back(MCall);
|
|
|
|
// Create a cast instruction to convert to the right type...
|
2009-11-07 00:16:28 +00:00
|
|
|
Result = new BitCastInst(MCall, AllocPtrType, Name);
|
2009-10-17 00:00:19 +00:00
|
|
|
}
|
2009-09-18 19:20:02 +00:00
|
|
|
}
|
2009-09-10 04:36:43 +00:00
|
|
|
MCall->setTailCall();
|
[opaque pointer types] Add a FunctionCallee wrapper type, and use it.
Recommit r352791 after tweaking DerivedTypes.h slightly, so that gcc
doesn't choke on it, hopefully.
Original Message:
The FunctionCallee type is effectively a {FunctionType*,Value*} pair,
and is a useful convenience to enable code to continue passing the
result of getOrInsertFunction() through to EmitCall, even once pointer
types lose their pointee-type.
Then:
- update the CallInst/InvokeInst instruction creation functions to
take a Callee,
- modify getOrInsertFunction to return FunctionCallee, and
- update all callers appropriately.
One area of particular note is the change to the sanitizer
code. Previously, they had been casting the result of
`getOrInsertFunction` to a `Function*` via
`checkSanitizerInterfaceFunction`, and storing that. That would report
an error if someone had already inserted a function declaraction with
a mismatching signature.
However, in general, LLVM allows for such mismatches, as
`getOrInsertFunction` will automatically insert a bitcast if
needed. As part of this cleanup, cause the sanitizer code to do the
same. (It will call its functions using the expected signature,
however they may have been declared.)
Finally, in a small number of locations, callers of
`getOrInsertFunction` actually were expecting/requiring that a brand
new function was being created. In such cases, I've switched them to
Function::Create instead.
Differential Revision: https://reviews.llvm.org/D57315
llvm-svn: 352827
2019-02-01 02:28:03 +00:00
|
|
|
if (Function *F = dyn_cast<Function>(MallocFunc.getCallee())) {
|
2009-11-10 19:53:28 +00:00
|
|
|
MCall->setCallingConv(F->getCallingConv());
|
2017-05-03 18:17:31 +00:00
|
|
|
if (!F->returnDoesNotAlias())
|
|
|
|
F->setReturnDoesNotAlias();
|
2009-11-10 19:53:28 +00:00
|
|
|
}
|
2010-01-05 13:12:22 +00:00
|
|
|
assert(!MCall->getType()->isVoidTy() && "Malloc has void return type");
|
2009-09-18 19:20:02 +00:00
|
|
|
|
2009-10-17 00:00:19 +00:00
|
|
|
return Result;
|
2009-09-10 04:36:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// CreateMalloc - Generate the IR for a call to malloc:
|
|
|
|
/// 1. Compute the malloc call's argument as the specified type's size,
|
|
|
|
/// possibly multiplied by the array size if the array size is not
|
|
|
|
/// constant 1.
|
|
|
|
/// 2. Call malloc with that argument.
|
|
|
|
/// 3. Bitcast the result of the malloc call to the specified type.
|
2009-10-17 23:52:26 +00:00
|
|
|
Instruction *CallInst::CreateMalloc(Instruction *InsertBefore,
|
2011-07-18 04:54:35 +00:00
|
|
|
Type *IntPtrTy, Type *AllocTy,
|
2009-11-07 00:16:28 +00:00
|
|
|
Value *AllocSize, Value *ArraySize,
|
2016-02-03 21:34:39 +00:00
|
|
|
Function *MallocF,
|
2009-11-07 00:16:28 +00:00
|
|
|
const Twine &Name) {
|
2014-04-09 06:08:46 +00:00
|
|
|
return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize,
|
2016-04-29 08:07:22 +00:00
|
|
|
ArraySize, None, MallocF, Name);
|
|
|
|
}
|
|
|
|
Instruction *CallInst::CreateMalloc(Instruction *InsertBefore,
|
|
|
|
Type *IntPtrTy, Type *AllocTy,
|
|
|
|
Value *AllocSize, Value *ArraySize,
|
|
|
|
ArrayRef<OperandBundleDef> OpB,
|
|
|
|
Function *MallocF,
|
|
|
|
const Twine &Name) {
|
|
|
|
return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize,
|
|
|
|
ArraySize, OpB, MallocF, Name);
|
2009-09-10 04:36:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// CreateMalloc - Generate the IR for a call to malloc:
|
|
|
|
/// 1. Compute the malloc call's argument as the specified type's size,
|
|
|
|
/// possibly multiplied by the array size if the array size is not
|
|
|
|
/// constant 1.
|
|
|
|
/// 2. Call malloc with that argument.
|
|
|
|
/// 3. Bitcast the result of the malloc call to the specified type.
|
|
|
|
/// Note: This function does not add the bitcast to the basic block, that is the
|
|
|
|
/// responsibility of the caller.
|
2009-10-17 23:52:26 +00:00
|
|
|
Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd,
|
2011-07-18 04:54:35 +00:00
|
|
|
Type *IntPtrTy, Type *AllocTy,
|
2018-07-30 19:41:25 +00:00
|
|
|
Value *AllocSize, Value *ArraySize,
|
2009-11-07 00:16:28 +00:00
|
|
|
Function *MallocF, const Twine &Name) {
|
2014-04-09 06:08:46 +00:00
|
|
|
return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize,
|
2016-04-29 08:07:22 +00:00
|
|
|
ArraySize, None, MallocF, Name);
|
|
|
|
}
|
|
|
|
Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd,
|
|
|
|
Type *IntPtrTy, Type *AllocTy,
|
|
|
|
Value *AllocSize, Value *ArraySize,
|
|
|
|
ArrayRef<OperandBundleDef> OpB,
|
|
|
|
Function *MallocF, const Twine &Name) {
|
|
|
|
return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize,
|
|
|
|
ArraySize, OpB, MallocF, Name);
|
2009-09-10 04:36:43 +00:00
|
|
|
}
|
2007-11-28 17:07:01 +00:00
|
|
|
|
2016-04-29 08:07:22 +00:00
|
|
|
static Instruction *createFree(Value *Source,
|
|
|
|
ArrayRef<OperandBundleDef> Bundles,
|
|
|
|
Instruction *InsertBefore,
|
2009-10-24 04:23:03 +00:00
|
|
|
BasicBlock *InsertAtEnd) {
|
|
|
|
assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) &&
|
|
|
|
"createFree needs either InsertBefore or InsertAtEnd");
|
2010-02-16 11:11:14 +00:00
|
|
|
assert(Source->getType()->isPointerTy() &&
|
2009-10-24 04:23:03 +00:00
|
|
|
"Can not free something of nonpointer type!");
|
|
|
|
|
2016-02-03 21:34:39 +00:00
|
|
|
BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd;
|
|
|
|
Module *M = BB->getParent()->getParent();
|
2009-10-24 04:23:03 +00:00
|
|
|
|
2011-07-18 04:54:35 +00:00
|
|
|
Type *VoidTy = Type::getVoidTy(M->getContext());
|
|
|
|
Type *IntPtrTy = Type::getInt8PtrTy(M->getContext());
|
2009-10-24 04:23:03 +00:00
|
|
|
// prototype free as "void free(void*)"
|
[opaque pointer types] Add a FunctionCallee wrapper type, and use it.
Recommit r352791 after tweaking DerivedTypes.h slightly, so that gcc
doesn't choke on it, hopefully.
Original Message:
The FunctionCallee type is effectively a {FunctionType*,Value*} pair,
and is a useful convenience to enable code to continue passing the
result of getOrInsertFunction() through to EmitCall, even once pointer
types lose their pointee-type.
Then:
- update the CallInst/InvokeInst instruction creation functions to
take a Callee,
- modify getOrInsertFunction to return FunctionCallee, and
- update all callers appropriately.
One area of particular note is the change to the sanitizer
code. Previously, they had been casting the result of
`getOrInsertFunction` to a `Function*` via
`checkSanitizerInterfaceFunction`, and storing that. That would report
an error if someone had already inserted a function declaraction with
a mismatching signature.
However, in general, LLVM allows for such mismatches, as
`getOrInsertFunction` will automatically insert a bitcast if
needed. As part of this cleanup, cause the sanitizer code to do the
same. (It will call its functions using the expected signature,
however they may have been declared.)
Finally, in a small number of locations, callers of
`getOrInsertFunction` actually were expecting/requiring that a brand
new function was being created. In such cases, I've switched them to
Function::Create instead.
Differential Revision: https://reviews.llvm.org/D57315
llvm-svn: 352827
2019-02-01 02:28:03 +00:00
|
|
|
FunctionCallee FreeFunc = M->getOrInsertFunction("free", VoidTy, IntPtrTy);
|
2016-02-03 21:34:39 +00:00
|
|
|
CallInst *Result = nullptr;
|
2009-10-24 04:23:03 +00:00
|
|
|
Value *PtrCast = Source;
|
|
|
|
if (InsertBefore) {
|
|
|
|
if (Source->getType() != IntPtrTy)
|
|
|
|
PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertBefore);
|
2016-04-29 08:07:22 +00:00
|
|
|
Result = CallInst::Create(FreeFunc, PtrCast, Bundles, "", InsertBefore);
|
2009-10-24 04:23:03 +00:00
|
|
|
} else {
|
|
|
|
if (Source->getType() != IntPtrTy)
|
|
|
|
PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertAtEnd);
|
2016-04-29 08:07:22 +00:00
|
|
|
Result = CallInst::Create(FreeFunc, PtrCast, Bundles, "");
|
2009-10-24 04:23:03 +00:00
|
|
|
}
|
|
|
|
Result->setTailCall();
|
[opaque pointer types] Add a FunctionCallee wrapper type, and use it.
Recommit r352791 after tweaking DerivedTypes.h slightly, so that gcc
doesn't choke on it, hopefully.
Original Message:
The FunctionCallee type is effectively a {FunctionType*,Value*} pair,
and is a useful convenience to enable code to continue passing the
result of getOrInsertFunction() through to EmitCall, even once pointer
types lose their pointee-type.
Then:
- update the CallInst/InvokeInst instruction creation functions to
take a Callee,
- modify getOrInsertFunction to return FunctionCallee, and
- update all callers appropriately.
One area of particular note is the change to the sanitizer
code. Previously, they had been casting the result of
`getOrInsertFunction` to a `Function*` via
`checkSanitizerInterfaceFunction`, and storing that. That would report
an error if someone had already inserted a function declaraction with
a mismatching signature.
However, in general, LLVM allows for such mismatches, as
`getOrInsertFunction` will automatically insert a bitcast if
needed. As part of this cleanup, cause the sanitizer code to do the
same. (It will call its functions using the expected signature,
however they may have been declared.)
Finally, in a small number of locations, callers of
`getOrInsertFunction` actually were expecting/requiring that a brand
new function was being created. In such cases, I've switched them to
Function::Create instead.
Differential Revision: https://reviews.llvm.org/D57315
llvm-svn: 352827
2019-02-01 02:28:03 +00:00
|
|
|
if (Function *F = dyn_cast<Function>(FreeFunc.getCallee()))
|
2009-11-09 07:12:01 +00:00
|
|
|
Result->setCallingConv(F->getCallingConv());
|
2009-10-24 04:23:03 +00:00
|
|
|
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// CreateFree - Generate the IR for a call to the builtin free function.
|
2016-02-03 21:34:39 +00:00
|
|
|
Instruction *CallInst::CreateFree(Value *Source, Instruction *InsertBefore) {
|
2016-04-29 08:07:22 +00:00
|
|
|
return createFree(Source, None, InsertBefore, nullptr);
|
|
|
|
}
|
|
|
|
Instruction *CallInst::CreateFree(Value *Source,
|
|
|
|
ArrayRef<OperandBundleDef> Bundles,
|
|
|
|
Instruction *InsertBefore) {
|
|
|
|
return createFree(Source, Bundles, InsertBefore, nullptr);
|
2009-10-24 04:23:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// CreateFree - Generate the IR for a call to the builtin free function.
|
|
|
|
/// Note: This function does not add the call to the basic block, that is the
|
|
|
|
/// responsibility of the caller.
|
2016-02-03 21:34:39 +00:00
|
|
|
Instruction *CallInst::CreateFree(Value *Source, BasicBlock *InsertAtEnd) {
|
2016-04-29 08:07:22 +00:00
|
|
|
Instruction *FreeCall = createFree(Source, None, nullptr, InsertAtEnd);
|
|
|
|
assert(FreeCall && "CreateFree did not create a CallInst");
|
|
|
|
return FreeCall;
|
|
|
|
}
|
|
|
|
Instruction *CallInst::CreateFree(Value *Source,
|
|
|
|
ArrayRef<OperandBundleDef> Bundles,
|
|
|
|
BasicBlock *InsertAtEnd) {
|
|
|
|
Instruction *FreeCall = createFree(Source, Bundles, nullptr, InsertAtEnd);
|
2009-10-24 04:23:03 +00:00
|
|
|
assert(FreeCall && "CreateFree did not create a CallInst");
|
|
|
|
return FreeCall;
|
|
|
|
}
|
|
|
|
|
2004-07-29 12:33:25 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// InvokeInst Implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2015-05-13 18:35:26 +00:00
|
|
|
void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
|
|
|
|
BasicBlock *IfException, ArrayRef<Value *> Args,
|
2015-09-24 19:14:18 +00:00
|
|
|
ArrayRef<OperandBundleDef> Bundles,
|
2015-05-13 18:35:26 +00:00
|
|
|
const Twine &NameStr) {
|
|
|
|
this->FTy = FTy;
|
2015-04-23 21:36:23 +00:00
|
|
|
|
[TI removal] Leverage the fact that TerminatorInst is gone to create
a normal base class that provides all common "call" functionality.
This merges two complex CRTP mixins for the common "call" logic and
common operand bundle logic into a single, normal base class of
`CallInst` and `InvokeInst`. Going forward, users can typically
`dyn_cast<CallBase>` and use the resulting API. No more need for the
`CallSite` wrapper. I'm planning to migrate current usage of the wrapper
to directly use the base class and then it can be removed, but those are
simpler and much more incremental steps. The big change is to introduce
this abstraction into the type system.
I've tried to do some basic simplifications of the APIs that I couldn't
really help but touch as part of this:
- I've tried to organize the attribute API and bundle API into groups to
make understanding the API of `CallBase` easier. Without this,
I wasn't able to navigate the API sanely for all of the ways I needed
to modify it.
- I've added what seem like more clear and consistent APIs for getting
at the called operand. These ended up being especially useful to
consolidate the *numerous* duplicated code paths trying to do this.
- I've largely reworked the organization and implementation of the APIs
for computing the argument operands as they needed to change to work
with the new subclass approach.
To minimize any cost associated with this abstraction, I've moved the
operand layout in memory to store the called operand last. This makes
its position relative to the end of the operand array the same,
regardless of the subclass. It should make it much cheaper to reference
from the `CallBase` abstraction, and this is likely one of the most
frequent things to query.
We do still pay one abstraction penalty here: we have to branch to
determine whether there are 0 or 2 extra operands when computing the end
of the argument operand sequence. However, that seems both rare and
should optimize well. I've implemented this in a way specifically
designed to allow it to optimize fairly well. If this shows up in
profiles, we can add overrides of the relevant methods to the subclasses
that bypass this penalty. It seems very unlikely that this will be an
issue as the code was *already* dealing with an ever present abstraction
of whether or not there are operand bundles, so this isn't the first
branch to go into the computation.
I've tried to remove as much of the obvious vestigial API surface of the
old CRTP implementation as I could, but I suspect there is further
cleanup that should now be possible, especially around the operand
bundle APIs. I'm leaving all of that for future work in this patch as
enough things are changing here as-is.
One thing that made this harder for me to reason about and debug was the
pervasive use of unsigned values in subtraction and other arithmetic
computations. I had to debug more than one unintentional wrap. I've
switched a few of these to use `int` which seems substantially simpler,
but I've held back from doing this more broadly to avoid creating
confusing divergence within a single class's API.
I also worked to remove all of the magic numbers used to index into
operands, putting them behind named constants or putting them into
a single method with a comment and strictly using the method elsewhere.
This was necessary to be able to re-layout the operands as discussed
above.
Thanks to Ben for reviewing this (somewhat large and awkward) patch!
Differential Revision: https://reviews.llvm.org/D54788
llvm-svn: 347452
2018-11-22 10:31:35 +00:00
|
|
|
assert((int)getNumOperands() ==
|
|
|
|
ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) &&
|
2015-09-24 19:14:18 +00:00
|
|
|
"NumOperands not set up?");
|
[TI removal] Leverage the fact that TerminatorInst is gone to create
a normal base class that provides all common "call" functionality.
This merges two complex CRTP mixins for the common "call" logic and
common operand bundle logic into a single, normal base class of
`CallInst` and `InvokeInst`. Going forward, users can typically
`dyn_cast<CallBase>` and use the resulting API. No more need for the
`CallSite` wrapper. I'm planning to migrate current usage of the wrapper
to directly use the base class and then it can be removed, but those are
simpler and much more incremental steps. The big change is to introduce
this abstraction into the type system.
I've tried to do some basic simplifications of the APIs that I couldn't
really help but touch as part of this:
- I've tried to organize the attribute API and bundle API into groups to
make understanding the API of `CallBase` easier. Without this,
I wasn't able to navigate the API sanely for all of the ways I needed
to modify it.
- I've added what seem like more clear and consistent APIs for getting
at the called operand. These ended up being especially useful to
consolidate the *numerous* duplicated code paths trying to do this.
- I've largely reworked the organization and implementation of the APIs
for computing the argument operands as they needed to change to work
with the new subclass approach.
To minimize any cost associated with this abstraction, I've moved the
operand layout in memory to store the called operand last. This makes
its position relative to the end of the operand array the same,
regardless of the subclass. It should make it much cheaper to reference
from the `CallBase` abstraction, and this is likely one of the most
frequent things to query.
We do still pay one abstraction penalty here: we have to branch to
determine whether there are 0 or 2 extra operands when computing the end
of the argument operand sequence. However, that seems both rare and
should optimize well. I've implemented this in a way specifically
designed to allow it to optimize fairly well. If this shows up in
profiles, we can add overrides of the relevant methods to the subclasses
that bypass this penalty. It seems very unlikely that this will be an
issue as the code was *already* dealing with an ever present abstraction
of whether or not there are operand bundles, so this isn't the first
branch to go into the computation.
I've tried to remove as much of the obvious vestigial API surface of the
old CRTP implementation as I could, but I suspect there is further
cleanup that should now be possible, especially around the operand
bundle APIs. I'm leaving all of that for future work in this patch as
enough things are changing here as-is.
One thing that made this harder for me to reason about and debug was the
pervasive use of unsigned values in subtraction and other arithmetic
computations. I had to debug more than one unintentional wrap. I've
switched a few of these to use `int` which seems substantially simpler,
but I've held back from doing this more broadly to avoid creating
confusing divergence within a single class's API.
I also worked to remove all of the magic numbers used to index into
operands, putting them behind named constants or putting them into
a single method with a comment and strictly using the method elsewhere.
This was necessary to be able to re-layout the operands as discussed
above.
Thanks to Ben for reviewing this (somewhat large and awkward) patch!
Differential Revision: https://reviews.llvm.org/D54788
llvm-svn: 347452
2018-11-22 10:31:35 +00:00
|
|
|
setNormalDest(IfNormal);
|
|
|
|
setUnwindDest(IfException);
|
|
|
|
setCalledOperand(Fn);
|
2011-07-15 08:37:34 +00:00
|
|
|
|
|
|
|
#ifndef NDEBUG
|
|
|
|
assert(((Args.size() == FTy->getNumParams()) ||
|
|
|
|
(FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
|
2010-03-23 13:45:54 +00:00
|
|
|
"Invoking a function with bad signature");
|
2005-04-21 23:48:37 +00:00
|
|
|
|
2011-07-15 08:37:34 +00:00
|
|
|
for (unsigned i = 0, e = Args.size(); i != e; i++)
|
2018-07-30 19:41:25 +00:00
|
|
|
assert((i >= FTy->getNumParams() ||
|
2007-02-13 01:04:01 +00:00
|
|
|
FTy->getParamType(i) == Args[i]->getType()) &&
|
2006-05-03 00:48:22 +00:00
|
|
|
"Invoking a function with a bad signature!");
|
2011-07-15 08:37:34 +00:00
|
|
|
#endif
|
|
|
|
|
2018-11-17 01:44:25 +00:00
|
|
|
llvm::copy(Args, op_begin());
|
2015-09-24 19:14:18 +00:00
|
|
|
|
|
|
|
auto It = populateBundleOperandInfos(Bundles, Args.size());
|
|
|
|
(void)It;
|
|
|
|
assert(It + 3 == op_end() && "Should add up!");
|
|
|
|
|
2011-07-15 08:37:34 +00:00
|
|
|
setName(NameStr);
|
2004-07-29 12:33:25 +00:00
|
|
|
}
|
|
|
|
|
2005-04-21 23:48:37 +00:00
|
|
|
InvokeInst::InvokeInst(const InvokeInst &II)
|
[TI removal] Leverage the fact that TerminatorInst is gone to create
a normal base class that provides all common "call" functionality.
This merges two complex CRTP mixins for the common "call" logic and
common operand bundle logic into a single, normal base class of
`CallInst` and `InvokeInst`. Going forward, users can typically
`dyn_cast<CallBase>` and use the resulting API. No more need for the
`CallSite` wrapper. I'm planning to migrate current usage of the wrapper
to directly use the base class and then it can be removed, but those are
simpler and much more incremental steps. The big change is to introduce
this abstraction into the type system.
I've tried to do some basic simplifications of the APIs that I couldn't
really help but touch as part of this:
- I've tried to organize the attribute API and bundle API into groups to
make understanding the API of `CallBase` easier. Without this,
I wasn't able to navigate the API sanely for all of the ways I needed
to modify it.
- I've added what seem like more clear and consistent APIs for getting
at the called operand. These ended up being especially useful to
consolidate the *numerous* duplicated code paths trying to do this.
- I've largely reworked the organization and implementation of the APIs
for computing the argument operands as they needed to change to work
with the new subclass approach.
To minimize any cost associated with this abstraction, I've moved the
operand layout in memory to store the called operand last. This makes
its position relative to the end of the operand array the same,
regardless of the subclass. It should make it much cheaper to reference
from the `CallBase` abstraction, and this is likely one of the most
frequent things to query.
We do still pay one abstraction penalty here: we have to branch to
determine whether there are 0 or 2 extra operands when computing the end
of the argument operand sequence. However, that seems both rare and
should optimize well. I've implemented this in a way specifically
designed to allow it to optimize fairly well. If this shows up in
profiles, we can add overrides of the relevant methods to the subclasses
that bypass this penalty. It seems very unlikely that this will be an
issue as the code was *already* dealing with an ever present abstraction
of whether or not there are operand bundles, so this isn't the first
branch to go into the computation.
I've tried to remove as much of the obvious vestigial API surface of the
old CRTP implementation as I could, but I suspect there is further
cleanup that should now be possible, especially around the operand
bundle APIs. I'm leaving all of that for future work in this patch as
enough things are changing here as-is.
One thing that made this harder for me to reason about and debug was the
pervasive use of unsigned values in subtraction and other arithmetic
computations. I had to debug more than one unintentional wrap. I've
switched a few of these to use `int` which seems substantially simpler,
but I've held back from doing this more broadly to avoid creating
confusing divergence within a single class's API.
I also worked to remove all of the magic numbers used to index into
operands, putting them behind named constants or putting them into
a single method with a comment and strictly using the method elsewhere.
This was necessary to be able to re-layout the operands as discussed
above.
Thanks to Ben for reviewing this (somewhat large and awkward) patch!
Differential Revision: https://reviews.llvm.org/D54788
llvm-svn: 347452
2018-11-22 10:31:35 +00:00
|
|
|
: CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke,
|
|
|
|
OperandTraits<CallBase>::op_end(this) - II.getNumOperands(),
|
|
|
|
II.getNumOperands()) {
|
2009-12-29 02:14:09 +00:00
|
|
|
setCallingConv(II.getCallingConv());
|
2011-07-15 08:37:34 +00:00
|
|
|
std::copy(II.op_begin(), II.op_end(), op_begin());
|
2015-09-24 19:14:18 +00:00
|
|
|
std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(),
|
|
|
|
bundle_op_info_begin());
|
2009-08-25 22:11:20 +00:00
|
|
|
SubclassOptionalData = II.SubclassOptionalData;
|
2005-01-29 00:35:16 +00:00
|
|
|
}
|
|
|
|
|
2015-11-18 06:23:38 +00:00
|
|
|
InvokeInst *InvokeInst::Create(InvokeInst *II, ArrayRef<OperandBundleDef> OpB,
|
|
|
|
Instruction *InsertPt) {
|
2015-12-10 06:39:02 +00:00
|
|
|
std::vector<Value *> Args(II->arg_begin(), II->arg_end());
|
2015-11-18 06:23:38 +00:00
|
|
|
|
2020-04-27 20:15:59 -07:00
|
|
|
auto *NewII = InvokeInst::Create(
|
|
|
|
II->getFunctionType(), II->getCalledOperand(), II->getNormalDest(),
|
|
|
|
II->getUnwindDest(), Args, OpB, II->getName(), InsertPt);
|
2015-11-18 06:23:38 +00:00
|
|
|
NewII->setCallingConv(II->getCallingConv());
|
|
|
|
NewII->SubclassOptionalData = II->SubclassOptionalData;
|
2015-12-09 01:01:28 +00:00
|
|
|
NewII->setAttributes(II->getAttributes());
|
2016-01-14 06:21:42 +00:00
|
|
|
NewII->setDebugLoc(II->getDebugLoc());
|
2015-11-18 06:23:38 +00:00
|
|
|
return NewII;
|
|
|
|
}
|
|
|
|
|
2020-08-14 12:00:09 +07:00
|
|
|
InvokeInst *InvokeInst::CreateWithReplacedBundle(InvokeInst *II,
|
|
|
|
OperandBundleDef OpB,
|
|
|
|
Instruction *InsertPt) {
|
|
|
|
SmallVector<OperandBundleDef, 2> OpDefs;
|
|
|
|
for (unsigned i = 0, e = II->getNumOperandBundles(); i < e; ++i) {
|
|
|
|
auto ChildOB = II->getOperandBundleAt(i);
|
|
|
|
if (ChildOB.getTagName() != OpB.getTag())
|
|
|
|
OpDefs.emplace_back(ChildOB);
|
|
|
|
}
|
|
|
|
OpDefs.emplace_back(OpB);
|
|
|
|
return InvokeInst::Create(II, OpDefs, InsertPt);
|
|
|
|
}
|
2015-04-16 20:29:50 +00:00
|
|
|
|
2011-08-12 20:24:12 +00:00
|
|
|
LandingPadInst *InvokeInst::getLandingPadInst() const {
|
|
|
|
return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHI());
|
|
|
|
}
|
2007-11-28 17:07:01 +00:00
|
|
|
|
2019-02-08 20:48:56 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// CallBrInst Implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough,
|
|
|
|
ArrayRef<BasicBlock *> IndirectDests,
|
|
|
|
ArrayRef<Value *> Args,
|
|
|
|
ArrayRef<OperandBundleDef> Bundles,
|
|
|
|
const Twine &NameStr) {
|
|
|
|
this->FTy = FTy;
|
|
|
|
|
|
|
|
assert((int)getNumOperands() ==
|
|
|
|
ComputeNumOperands(Args.size(), IndirectDests.size(),
|
|
|
|
CountBundleInputs(Bundles)) &&
|
|
|
|
"NumOperands not set up?");
|
|
|
|
NumIndirectDests = IndirectDests.size();
|
|
|
|
setDefaultDest(Fallthrough);
|
|
|
|
for (unsigned i = 0; i != NumIndirectDests; ++i)
|
|
|
|
setIndirectDest(i, IndirectDests[i]);
|
|
|
|
setCalledOperand(Fn);
|
|
|
|
|
|
|
|
#ifndef NDEBUG
|
|
|
|
assert(((Args.size() == FTy->getNumParams()) ||
|
|
|
|
(FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
|
|
|
|
"Calling a function with bad signature");
|
|
|
|
|
|
|
|
for (unsigned i = 0, e = Args.size(); i != e; i++)
|
|
|
|
assert((i >= FTy->getNumParams() ||
|
|
|
|
FTy->getParamType(i) == Args[i]->getType()) &&
|
|
|
|
"Calling a function with a bad signature!");
|
|
|
|
#endif
|
|
|
|
|
|
|
|
std::copy(Args.begin(), Args.end(), op_begin());
|
|
|
|
|
|
|
|
auto It = populateBundleOperandInfos(Bundles, Args.size());
|
|
|
|
(void)It;
|
|
|
|
assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!");
|
|
|
|
|
|
|
|
setName(NameStr);
|
|
|
|
}
|
|
|
|
|
[IR] CallBrInst: scan+update arg list when indirect dest list changes
Summary:
There's an unspoken invariant of callbr that the list of BlockAddress
Constants in the "function args" list match the BasicBlocks in the
"other labels" list. (This invariant is being added to the LangRef in
https://reviews.llvm.org/D67196).
When modifying the any of the indirect destinations of a callbr
instruction (possible jump targets), we need to update the function
arguments if the argument is a BlockAddress whose BasicBlock refers to
the indirect destination BasicBlock being replaced. Otherwise, many
transforms that modify successors will end up violating that invariant.
A recent change to the arm64 Linux kernel exposed this bug, which
prevents the kernel from booting.
I considered maintaining a mapping from indirect destination BasicBlock
to argument operand BlockAddress, but this ends up being a one to
potentially many (though usually one) mapping. Also, the list of
arguments to a function (or more typically inline assembly) ends up
being less than 10. The implementation is significantly simpler to just
rescan the full list of arguments. Because of the one to potentially
many relationship, the full arg list must be scanned (we can't stop at
the first instance).
Thanks to the following folks that reported the issue and helped debug
it:
* Nathan Chancellor
* Will Deacon
* Andrew Murray
* Craig Topper
Link: https://bugs.llvm.org/show_bug.cgi?id=43222
Link: https://github.com/ClangBuiltLinux/linux/issues/649
Link: https://lists.infradead.org/pipermail/linux-arm-kernel/2019-September/678330.html
Reviewers: craig.topper, chandlerc
Reviewed By: craig.topper
Subscribers: void, javed.absar, kristof.beyls, hiraditya, llvm-commits, nathanchance, srhines
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D67252
llvm-svn: 371262
2019-09-06 21:50:11 +00:00
|
|
|
void CallBrInst::updateArgBlockAddresses(unsigned i, BasicBlock *B) {
|
|
|
|
assert(getNumIndirectDests() > i && "IndirectDest # out of range for callbr");
|
|
|
|
if (BasicBlock *OldBB = getIndirectDest(i)) {
|
|
|
|
BlockAddress *Old = BlockAddress::get(OldBB);
|
|
|
|
BlockAddress *New = BlockAddress::get(B);
|
|
|
|
for (unsigned ArgNo = 0, e = getNumArgOperands(); ArgNo != e; ++ArgNo)
|
|
|
|
if (dyn_cast<BlockAddress>(getArgOperand(ArgNo)) == Old)
|
|
|
|
setArgOperand(ArgNo, New);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-08 20:48:56 +00:00
|
|
|
CallBrInst::CallBrInst(const CallBrInst &CBI)
|
|
|
|
: CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr,
|
|
|
|
OperandTraits<CallBase>::op_end(this) - CBI.getNumOperands(),
|
|
|
|
CBI.getNumOperands()) {
|
|
|
|
setCallingConv(CBI.getCallingConv());
|
|
|
|
std::copy(CBI.op_begin(), CBI.op_end(), op_begin());
|
|
|
|
std::copy(CBI.bundle_op_info_begin(), CBI.bundle_op_info_end(),
|
|
|
|
bundle_op_info_begin());
|
|
|
|
SubclassOptionalData = CBI.SubclassOptionalData;
|
|
|
|
NumIndirectDests = CBI.NumIndirectDests;
|
|
|
|
}
|
|
|
|
|
|
|
|
CallBrInst *CallBrInst::Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> OpB,
|
|
|
|
Instruction *InsertPt) {
|
|
|
|
std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
|
|
|
|
|
2020-04-27 20:15:59 -07:00
|
|
|
auto *NewCBI = CallBrInst::Create(
|
|
|
|
CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(),
|
|
|
|
CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt);
|
2019-02-08 20:48:56 +00:00
|
|
|
NewCBI->setCallingConv(CBI->getCallingConv());
|
|
|
|
NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
|
|
|
|
NewCBI->setAttributes(CBI->getAttributes());
|
|
|
|
NewCBI->setDebugLoc(CBI->getDebugLoc());
|
|
|
|
NewCBI->NumIndirectDests = CBI->NumIndirectDests;
|
|
|
|
return NewCBI;
|
|
|
|
}
|
|
|
|
|
2004-07-29 12:33:25 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// ReturnInst Implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2007-02-24 00:55:48 +00:00
|
|
|
ReturnInst::ReturnInst(const ReturnInst &RI)
|
2018-10-19 00:22:37 +00:00
|
|
|
: Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret,
|
|
|
|
OperandTraits<ReturnInst>::op_end(this) - RI.getNumOperands(),
|
|
|
|
RI.getNumOperands()) {
|
2008-07-23 00:34:11 +00:00
|
|
|
if (RI.getNumOperands())
|
2008-05-26 21:33:52 +00:00
|
|
|
Op<0>() = RI.Op<0>();
|
2009-08-25 22:11:20 +00:00
|
|
|
SubclassOptionalData = RI.SubclassOptionalData;
|
2007-02-24 00:55:48 +00:00
|
|
|
}
|
|
|
|
|
2009-08-13 21:58:54 +00:00
|
|
|
ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, Instruction *InsertBefore)
|
2018-10-19 00:22:37 +00:00
|
|
|
: Instruction(Type::getVoidTy(C), Instruction::Ret,
|
|
|
|
OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
|
|
|
|
InsertBefore) {
|
2008-02-26 18:49:29 +00:00
|
|
|
if (retVal)
|
2008-07-23 00:34:11 +00:00
|
|
|
Op<0>() = retVal;
|
2007-02-24 00:55:48 +00:00
|
|
|
}
|
2017-05-15 21:57:41 +00:00
|
|
|
|
2009-08-13 21:58:54 +00:00
|
|
|
ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd)
|
2018-10-19 00:22:37 +00:00
|
|
|
: Instruction(Type::getVoidTy(C), Instruction::Ret,
|
|
|
|
OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
|
|
|
|
InsertAtEnd) {
|
2008-02-26 18:49:29 +00:00
|
|
|
if (retVal)
|
2008-07-23 00:34:11 +00:00
|
|
|
Op<0>() = retVal;
|
2007-02-24 00:55:48 +00:00
|
|
|
}
|
2017-05-15 21:57:41 +00:00
|
|
|
|
2009-08-13 21:58:54 +00:00
|
|
|
ReturnInst::ReturnInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
|
2018-10-19 00:22:37 +00:00
|
|
|
: Instruction(Type::getVoidTy(Context), Instruction::Ret,
|
|
|
|
OperandTraits<ReturnInst>::op_end(this), 0, InsertAtEnd) {}
|
2004-11-17 21:02:25 +00:00
|
|
|
|
2011-07-31 06:30:59 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// ResumeInst Implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
ResumeInst::ResumeInst(const ResumeInst &RI)
|
2018-10-19 00:22:37 +00:00
|
|
|
: Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume,
|
|
|
|
OperandTraits<ResumeInst>::op_begin(this), 1) {
|
2011-07-31 06:30:59 +00:00
|
|
|
Op<0>() = RI.Op<0>();
|
|
|
|
}
|
|
|
|
|
|
|
|
ResumeInst::ResumeInst(Value *Exn, Instruction *InsertBefore)
|
2018-10-19 00:22:37 +00:00
|
|
|
: Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
|
|
|
|
OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) {
|
2011-07-31 06:30:59 +00:00
|
|
|
Op<0>() = Exn;
|
|
|
|
}
|
|
|
|
|
|
|
|
ResumeInst::ResumeInst(Value *Exn, BasicBlock *InsertAtEnd)
|
2018-10-19 00:22:37 +00:00
|
|
|
: Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
|
|
|
|
OperandTraits<ResumeInst>::op_begin(this), 1, InsertAtEnd) {
|
2011-07-31 06:30:59 +00:00
|
|
|
Op<0>() = Exn;
|
|
|
|
}
|
|
|
|
|
2015-07-31 17:58:14 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// CleanupReturnInst Implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI)
|
2018-10-19 00:22:37 +00:00
|
|
|
: Instruction(CRI.getType(), Instruction::CleanupRet,
|
|
|
|
OperandTraits<CleanupReturnInst>::op_end(this) -
|
|
|
|
CRI.getNumOperands(),
|
|
|
|
CRI.getNumOperands()) {
|
2020-07-03 07:20:22 +00:00
|
|
|
setSubclassData<Instruction::OpaqueField>(
|
|
|
|
CRI.getSubclassData<Instruction::OpaqueField>());
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 05:38:55 +00:00
|
|
|
Op<0>() = CRI.Op<0>();
|
2015-08-23 00:26:33 +00:00
|
|
|
if (CRI.hasUnwindDest())
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 05:38:55 +00:00
|
|
|
Op<1>() = CRI.Op<1>();
|
2015-07-31 17:58:14 +00:00
|
|
|
}
|
|
|
|
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 05:38:55 +00:00
|
|
|
void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {
|
2015-07-31 17:58:14 +00:00
|
|
|
if (UnwindBB)
|
2020-07-03 07:20:22 +00:00
|
|
|
setSubclassData<UnwindDestField>(true);
|
2015-07-31 17:58:14 +00:00
|
|
|
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 05:38:55 +00:00
|
|
|
Op<0>() = CleanupPad;
|
2015-07-31 17:58:14 +00:00
|
|
|
if (UnwindBB)
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 05:38:55 +00:00
|
|
|
Op<1>() = UnwindBB;
|
2015-07-31 17:58:14 +00:00
|
|
|
}
|
|
|
|
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 05:38:55 +00:00
|
|
|
CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
|
|
|
|
unsigned Values, Instruction *InsertBefore)
|
2018-10-19 00:22:37 +00:00
|
|
|
: Instruction(Type::getVoidTy(CleanupPad->getContext()),
|
|
|
|
Instruction::CleanupRet,
|
|
|
|
OperandTraits<CleanupReturnInst>::op_end(this) - Values,
|
|
|
|
Values, InsertBefore) {
|
2015-08-23 00:26:33 +00:00
|
|
|
init(CleanupPad, UnwindBB);
|
2015-07-31 17:58:14 +00:00
|
|
|
}
|
|
|
|
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 05:38:55 +00:00
|
|
|
CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
|
|
|
|
unsigned Values, BasicBlock *InsertAtEnd)
|
2018-10-19 00:22:37 +00:00
|
|
|
: Instruction(Type::getVoidTy(CleanupPad->getContext()),
|
|
|
|
Instruction::CleanupRet,
|
|
|
|
OperandTraits<CleanupReturnInst>::op_end(this) - Values,
|
|
|
|
Values, InsertAtEnd) {
|
2015-08-23 00:26:33 +00:00
|
|
|
init(CleanupPad, UnwindBB);
|
2015-07-31 17:58:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// CatchReturnInst Implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 05:38:55 +00:00
|
|
|
void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {
|
2015-08-23 00:26:33 +00:00
|
|
|
Op<0>() = CatchPad;
|
|
|
|
Op<1>() = BB;
|
2015-08-15 02:46:08 +00:00
|
|
|
}
|
2015-07-31 17:58:14 +00:00
|
|
|
|
|
|
|
CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)
|
2018-10-19 00:22:37 +00:00
|
|
|
: Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet,
|
|
|
|
OperandTraits<CatchReturnInst>::op_begin(this), 2) {
|
2015-08-23 00:26:33 +00:00
|
|
|
Op<0>() = CRI.Op<0>();
|
|
|
|
Op<1>() = CRI.Op<1>();
|
2015-07-31 17:58:14 +00:00
|
|
|
}
|
|
|
|
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 05:38:55 +00:00
|
|
|
CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
|
2015-08-15 02:46:08 +00:00
|
|
|
Instruction *InsertBefore)
|
2018-10-19 00:22:37 +00:00
|
|
|
: Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
|
|
|
|
OperandTraits<CatchReturnInst>::op_begin(this), 2,
|
|
|
|
InsertBefore) {
|
2015-08-23 00:26:33 +00:00
|
|
|
init(CatchPad, BB);
|
2015-07-31 17:58:14 +00:00
|
|
|
}
|
|
|
|
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 05:38:55 +00:00
|
|
|
CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
|
2015-08-15 02:46:08 +00:00
|
|
|
BasicBlock *InsertAtEnd)
|
2018-10-19 00:22:37 +00:00
|
|
|
: Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
|
|
|
|
OperandTraits<CatchReturnInst>::op_begin(this), 2,
|
|
|
|
InsertAtEnd) {
|
2015-08-23 00:26:33 +00:00
|
|
|
init(CatchPad, BB);
|
2015-07-31 17:58:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 05:38:55 +00:00
|
|
|
// CatchSwitchInst Implementation
|
2015-07-31 17:58:14 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 05:38:55 +00:00
|
|
|
|
|
|
|
CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
|
|
|
|
unsigned NumReservedValues,
|
|
|
|
const Twine &NameStr,
|
|
|
|
Instruction *InsertBefore)
|
2018-10-19 00:22:37 +00:00
|
|
|
: Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
|
|
|
|
InsertBefore) {
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 05:38:55 +00:00
|
|
|
if (UnwindDest)
|
|
|
|
++NumReservedValues;
|
|
|
|
init(ParentPad, UnwindDest, NumReservedValues + 1);
|
2015-07-31 17:58:14 +00:00
|
|
|
setName(NameStr);
|
|
|
|
}
|
|
|
|
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 05:38:55 +00:00
|
|
|
CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
|
|
|
|
unsigned NumReservedValues,
|
|
|
|
const Twine &NameStr, BasicBlock *InsertAtEnd)
|
2018-10-19 00:22:37 +00:00
|
|
|
: Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
|
|
|
|
InsertAtEnd) {
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 05:38:55 +00:00
|
|
|
if (UnwindDest)
|
|
|
|
++NumReservedValues;
|
|
|
|
init(ParentPad, UnwindDest, NumReservedValues + 1);
|
|
|
|
setName(NameStr);
|
2015-07-31 17:58:14 +00:00
|
|
|
}
|
|
|
|
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 05:38:55 +00:00
|
|
|
CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)
|
2018-10-19 00:22:37 +00:00
|
|
|
: Instruction(CSI.getType(), Instruction::CatchSwitch, nullptr,
|
|
|
|
CSI.getNumOperands()) {
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 05:38:55 +00:00
|
|
|
init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands());
|
|
|
|
setNumHungOffUseOperands(ReservedSpace);
|
|
|
|
Use *OL = getOperandList();
|
|
|
|
const Use *InOL = CSI.getOperandList();
|
|
|
|
for (unsigned I = 1, E = ReservedSpace; I != E; ++I)
|
|
|
|
OL[I] = InOL[I];
|
2015-07-31 17:58:14 +00:00
|
|
|
}
|
|
|
|
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 05:38:55 +00:00
|
|
|
void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest,
|
|
|
|
unsigned NumReservedValues) {
|
|
|
|
assert(ParentPad && NumReservedValues);
|
|
|
|
|
|
|
|
ReservedSpace = NumReservedValues;
|
|
|
|
setNumHungOffUseOperands(UnwindDest ? 2 : 1);
|
|
|
|
allocHungoffUses(ReservedSpace);
|
|
|
|
|
|
|
|
Op<0>() = ParentPad;
|
|
|
|
if (UnwindDest) {
|
2020-07-03 07:20:22 +00:00
|
|
|
setSubclassData<UnwindDestField>(true);
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 05:38:55 +00:00
|
|
|
setUnwindDest(UnwindDest);
|
|
|
|
}
|
2015-07-31 17:58:14 +00:00
|
|
|
}
|
|
|
|
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 05:38:55 +00:00
|
|
|
/// growOperands - grow operands - This grows the operand list in response to a
|
|
|
|
/// push_back style of operation. This grows the number of ops by 2 times.
|
|
|
|
void CatchSwitchInst::growOperands(unsigned Size) {
|
|
|
|
unsigned NumOperands = getNumOperands();
|
|
|
|
assert(NumOperands >= 1);
|
|
|
|
if (ReservedSpace >= NumOperands + Size)
|
|
|
|
return;
|
|
|
|
ReservedSpace = (NumOperands + Size / 2) * 2;
|
|
|
|
growHungoffUses(ReservedSpace);
|
|
|
|
}
|
|
|
|
|
|
|
|
void CatchSwitchInst::addHandler(BasicBlock *Handler) {
|
|
|
|
unsigned OpNo = getNumOperands();
|
|
|
|
growOperands(1);
|
|
|
|
assert(OpNo < ReservedSpace && "Growing didn't work!");
|
|
|
|
setNumHungOffUseOperands(getNumOperands() + 1);
|
|
|
|
getOperandList()[OpNo] = Handler;
|
|
|
|
}
|
|
|
|
|
2016-01-05 02:37:41 +00:00
|
|
|
void CatchSwitchInst::removeHandler(handler_iterator HI) {
|
|
|
|
// Move all subsequent handlers up one.
|
|
|
|
Use *EndDst = op_end() - 1;
|
|
|
|
for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
|
|
|
|
*CurDst = *(CurDst + 1);
|
|
|
|
// Null out the last handler use.
|
|
|
|
*EndDst = nullptr;
|
|
|
|
|
|
|
|
setNumHungOffUseOperands(getNumOperands() - 1);
|
|
|
|
}
|
|
|
|
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 05:38:55 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// FuncletPadInst Implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args,
|
|
|
|
const Twine &NameStr) {
|
|
|
|
assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?");
|
2018-11-17 01:44:25 +00:00
|
|
|
llvm::copy(Args, op_begin());
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 05:38:55 +00:00
|
|
|
setParentPad(ParentPad);
|
|
|
|
setName(NameStr);
|
|
|
|
}
|
|
|
|
|
|
|
|
FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI)
|
|
|
|
: Instruction(FPI.getType(), FPI.getOpcode(),
|
|
|
|
OperandTraits<FuncletPadInst>::op_end(this) -
|
|
|
|
FPI.getNumOperands(),
|
|
|
|
FPI.getNumOperands()) {
|
|
|
|
std::copy(FPI.op_begin(), FPI.op_end(), op_begin());
|
|
|
|
setParentPad(FPI.getParentPad());
|
|
|
|
}
|
|
|
|
|
|
|
|
FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
|
|
|
|
ArrayRef<Value *> Args, unsigned Values,
|
|
|
|
const Twine &NameStr, Instruction *InsertBefore)
|
|
|
|
: Instruction(ParentPad->getType(), Op,
|
|
|
|
OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
|
|
|
|
InsertBefore) {
|
|
|
|
init(ParentPad, Args, NameStr);
|
|
|
|
}
|
|
|
|
|
|
|
|
FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
|
|
|
|
ArrayRef<Value *> Args, unsigned Values,
|
|
|
|
const Twine &NameStr, BasicBlock *InsertAtEnd)
|
|
|
|
: Instruction(ParentPad->getType(), Op,
|
|
|
|
OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
|
|
|
|
InsertAtEnd) {
|
|
|
|
init(ParentPad, Args, NameStr);
|
2015-07-31 17:58:14 +00:00
|
|
|
}
|
|
|
|
|
2004-10-16 18:08:06 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// UnreachableInst Implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2018-07-30 19:41:25 +00:00
|
|
|
UnreachableInst::UnreachableInst(LLVMContext &Context,
|
2009-08-13 21:58:54 +00:00
|
|
|
Instruction *InsertBefore)
|
2018-10-19 00:22:37 +00:00
|
|
|
: Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
|
|
|
|
0, InsertBefore) {}
|
2009-08-13 21:58:54 +00:00
|
|
|
UnreachableInst::UnreachableInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
|
2018-10-19 00:22:37 +00:00
|
|
|
: Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
|
|
|
|
0, InsertAtEnd) {}
|
2007-02-24 00:55:48 +00:00
|
|
|
|
2004-07-29 12:33:25 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// BranchInst Implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2005-01-29 00:35:16 +00:00
|
|
|
void BranchInst::AssertOK() {
|
|
|
|
if (isConditional())
|
2010-02-15 16:12:20 +00:00
|
|
|
assert(getCondition()->getType()->isIntegerTy(1) &&
|
2005-01-29 00:35:16 +00:00
|
|
|
"May only branch on boolean predicates!");
|
2004-07-29 12:33:25 +00:00
|
|
|
}
|
|
|
|
|
2007-02-24 00:55:48 +00:00
|
|
|
BranchInst::BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore)
|
2018-10-19 00:22:37 +00:00
|
|
|
: Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
|
|
|
|
OperandTraits<BranchInst>::op_end(this) - 1, 1,
|
|
|
|
InsertBefore) {
|
2014-04-15 06:32:26 +00:00
|
|
|
assert(IfTrue && "Branch destination may not be null!");
|
2009-03-12 18:34:49 +00:00
|
|
|
Op<-1>() = IfTrue;
|
2007-02-24 00:55:48 +00:00
|
|
|
}
|
2017-05-15 21:57:41 +00:00
|
|
|
|
2007-02-24 00:55:48 +00:00
|
|
|
BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
|
|
|
|
Instruction *InsertBefore)
|
2018-10-19 00:22:37 +00:00
|
|
|
: Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
|
|
|
|
OperandTraits<BranchInst>::op_end(this) - 3, 3,
|
|
|
|
InsertBefore) {
|
2009-03-12 18:34:49 +00:00
|
|
|
Op<-1>() = IfTrue;
|
|
|
|
Op<-2>() = IfFalse;
|
|
|
|
Op<-3>() = Cond;
|
2007-02-24 00:55:48 +00:00
|
|
|
#ifndef NDEBUG
|
|
|
|
AssertOK();
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd)
|
2018-10-19 00:22:37 +00:00
|
|
|
: Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
|
|
|
|
OperandTraits<BranchInst>::op_end(this) - 1, 1, InsertAtEnd) {
|
2014-04-15 06:32:26 +00:00
|
|
|
assert(IfTrue && "Branch destination may not be null!");
|
2009-03-12 18:34:49 +00:00
|
|
|
Op<-1>() = IfTrue;
|
2007-02-24 00:55:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
|
2018-10-19 00:22:37 +00:00
|
|
|
BasicBlock *InsertAtEnd)
|
|
|
|
: Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
|
|
|
|
OperandTraits<BranchInst>::op_end(this) - 3, 3, InsertAtEnd) {
|
2009-03-12 18:34:49 +00:00
|
|
|
Op<-1>() = IfTrue;
|
|
|
|
Op<-2>() = IfFalse;
|
|
|
|
Op<-3>() = Cond;
|
2007-02-24 00:55:48 +00:00
|
|
|
#ifndef NDEBUG
|
|
|
|
AssertOK();
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2018-10-19 00:22:37 +00:00
|
|
|
BranchInst::BranchInst(const BranchInst &BI)
|
|
|
|
: Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br,
|
|
|
|
OperandTraits<BranchInst>::op_end(this) - BI.getNumOperands(),
|
|
|
|
BI.getNumOperands()) {
|
2009-03-12 18:34:49 +00:00
|
|
|
Op<-1>() = BI.Op<-1>();
|
2005-01-29 00:35:16 +00:00
|
|
|
if (BI.getNumOperands() != 1) {
|
|
|
|
assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!");
|
2009-03-12 18:34:49 +00:00
|
|
|
Op<-3>() = BI.Op<-3>();
|
|
|
|
Op<-2>() = BI.Op<-2>();
|
2004-07-29 12:33:25 +00:00
|
|
|
}
|
2009-08-25 22:11:20 +00:00
|
|
|
SubclassOptionalData = BI.SubclassOptionalData;
|
2004-07-29 12:33:25 +00:00
|
|
|
}
|
|
|
|
|
2011-10-17 01:11:57 +00:00
|
|
|
void BranchInst::swapSuccessors() {
|
|
|
|
assert(isConditional() &&
|
|
|
|
"Cannot swap successors of an unconditional branch");
|
|
|
|
Op<-1>().swap(Op<-2>());
|
|
|
|
|
|
|
|
// Update profile metadata if present and it matches our structural
|
|
|
|
// expectations.
|
2016-08-23 15:39:03 +00:00
|
|
|
swapProfMetadata();
|
2011-10-17 01:11:57 +00:00
|
|
|
}
|
|
|
|
|
2004-07-29 12:33:25 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
2009-10-23 21:09:37 +00:00
|
|
|
// AllocaInst Implementation
|
2004-07-29 12:33:25 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2009-07-14 23:09:55 +00:00
|
|
|
static Value *getAISize(LLVMContext &Context, Value *Amt) {
|
2005-01-29 00:35:16 +00:00
|
|
|
if (!Amt)
|
2009-08-13 21:58:54 +00:00
|
|
|
Amt = ConstantInt::get(Type::getInt32Ty(Context), 1);
|
2006-05-10 04:32:43 +00:00
|
|
|
else {
|
|
|
|
assert(!isa<BasicBlock>(Amt) &&
|
2007-10-18 16:10:48 +00:00
|
|
|
"Passed basic block into allocation size parameter! Use other ctor");
|
2010-05-28 01:14:11 +00:00
|
|
|
assert(Amt->getType()->isIntegerTy() &&
|
|
|
|
"Allocation array size is not an integer!");
|
2006-05-10 04:32:43 +00:00
|
|
|
}
|
2005-04-21 23:48:37 +00:00
|
|
|
return Amt;
|
2004-07-29 12:33:25 +00:00
|
|
|
}
|
|
|
|
|
2020-05-19 22:16:15 +02:00
|
|
|
static Align computeAllocaDefaultAlign(Type *Ty, BasicBlock *BB) {
|
2020-07-14 18:47:14 -07:00
|
|
|
assert(BB && "Insertion BB cannot be null when alignment not provided!");
|
|
|
|
assert(BB->getParent() &&
|
|
|
|
"BB must be in a Function when alignment not provided!");
|
2020-05-15 13:23:14 -07:00
|
|
|
const DataLayout &DL = BB->getModule()->getDataLayout();
|
|
|
|
return DL.getPrefTypeAlign(Ty);
|
|
|
|
}
|
|
|
|
|
2020-05-19 22:16:15 +02:00
|
|
|
static Align computeAllocaDefaultAlign(Type *Ty, Instruction *I) {
|
2020-07-14 18:47:14 -07:00
|
|
|
assert(I && "Insertion position cannot be null when alignment not provided!");
|
2020-05-15 13:23:14 -07:00
|
|
|
return computeAllocaDefaultAlign(Ty, I->getParent());
|
|
|
|
}
|
|
|
|
|
2017-04-10 22:27:50 +00:00
|
|
|
AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
|
2009-10-23 21:09:37 +00:00
|
|
|
Instruction *InsertBefore)
|
2017-04-10 22:27:50 +00:00
|
|
|
: AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
|
2009-10-23 21:09:37 +00:00
|
|
|
|
2017-04-10 22:27:50 +00:00
|
|
|
AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
|
2009-10-23 21:09:37 +00:00
|
|
|
BasicBlock *InsertAtEnd)
|
2017-04-10 22:27:50 +00:00
|
|
|
: AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertAtEnd) {}
|
2009-10-23 21:09:37 +00:00
|
|
|
|
2017-04-10 22:27:50 +00:00
|
|
|
AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
|
2009-10-23 21:09:37 +00:00
|
|
|
const Twine &Name, Instruction *InsertBefore)
|
2020-05-15 13:23:14 -07:00
|
|
|
: AllocaInst(Ty, AddrSpace, ArraySize,
|
|
|
|
computeAllocaDefaultAlign(Ty, InsertBefore), Name,
|
|
|
|
InsertBefore) {}
|
2017-04-10 22:27:50 +00:00
|
|
|
|
|
|
|
AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
|
|
|
|
const Twine &Name, BasicBlock *InsertAtEnd)
|
2020-05-15 13:23:14 -07:00
|
|
|
: AllocaInst(Ty, AddrSpace, ArraySize,
|
|
|
|
computeAllocaDefaultAlign(Ty, InsertAtEnd), Name,
|
|
|
|
InsertAtEnd) {}
|
2017-04-10 22:27:50 +00:00
|
|
|
|
|
|
|
AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
|
2020-05-15 13:23:14 -07:00
|
|
|
Align Align, const Twine &Name,
|
2017-04-10 22:27:50 +00:00
|
|
|
Instruction *InsertBefore)
|
2019-10-25 22:26:23 +02:00
|
|
|
: UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
|
|
|
|
getAISize(Ty->getContext(), ArraySize), InsertBefore),
|
|
|
|
AllocatedType(Ty) {
|
2020-05-15 13:23:14 -07:00
|
|
|
setAlignment(Align);
|
2010-01-05 13:12:22 +00:00
|
|
|
assert(!Ty->isVoidTy() && "Cannot allocate void!");
|
2007-02-13 07:54:42 +00:00
|
|
|
setName(Name);
|
2004-07-29 12:33:25 +00:00
|
|
|
}
|
|
|
|
|
2017-04-10 22:27:50 +00:00
|
|
|
AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
|
2020-05-15 13:23:14 -07:00
|
|
|
Align Align, const Twine &Name, BasicBlock *InsertAtEnd)
|
2019-10-25 22:26:23 +02:00
|
|
|
: UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
|
|
|
|
getAISize(Ty->getContext(), ArraySize), InsertAtEnd),
|
2015-04-29 23:00:35 +00:00
|
|
|
AllocatedType(Ty) {
|
2019-10-25 22:26:23 +02:00
|
|
|
setAlignment(Align);
|
2010-01-05 13:12:22 +00:00
|
|
|
assert(!Ty->isVoidTy() && "Cannot allocate void!");
|
2007-02-13 07:54:42 +00:00
|
|
|
setName(Name);
|
2004-07-29 12:33:25 +00:00
|
|
|
}
|
|
|
|
|
2008-03-24 16:55:58 +00:00
|
|
|
|
2009-10-23 21:09:37 +00:00
|
|
|
bool AllocaInst::isArrayAllocation() const {
|
2007-03-01 20:27:41 +00:00
|
|
|
if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0)))
|
2010-09-27 15:15:44 +00:00
|
|
|
return !CI->isOne();
|
2005-01-29 00:35:16 +00:00
|
|
|
return true;
|
2004-07-29 12:33:25 +00:00
|
|
|
}
|
|
|
|
|
2008-11-26 02:54:17 +00:00
|
|
|
/// isStaticAlloca - Return true if this alloca is in the entry block of the
|
|
|
|
/// function and is a constant size. If so, the code generator will fold it
|
|
|
|
/// into the prolog/epilog code, so it is basically free.
|
|
|
|
bool AllocaInst::isStaticAlloca() const {
|
|
|
|
// Must be constant size.
|
|
|
|
if (!isa<ConstantInt>(getArraySize())) return false;
|
2018-07-30 19:41:25 +00:00
|
|
|
|
2008-11-26 02:54:17 +00:00
|
|
|
// Must be in the entry block.
|
|
|
|
const BasicBlock *Parent = getParent();
|
2014-01-17 23:58:17 +00:00
|
|
|
return Parent == &Parent->getParent()->front() && !isUsedWithInAlloca();
|
2008-11-26 02:54:17 +00:00
|
|
|
}
|
|
|
|
|
2004-07-29 12:33:25 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// LoadInst Implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2005-01-29 00:35:16 +00:00
|
|
|
void LoadInst::AssertOK() {
|
2010-02-16 11:11:14 +00:00
|
|
|
assert(getOperand(0)->getType()->isPointerTy() &&
|
2004-07-29 12:33:25 +00:00
|
|
|
"Ptr must have pointer type.");
|
2011-08-09 23:02:53 +00:00
|
|
|
assert(!(isAtomic() && getAlignment() == 0) &&
|
|
|
|
"Alignment required for atomic load");
|
2004-07-29 12:33:25 +00:00
|
|
|
}
|
|
|
|
|
2020-05-19 22:16:15 +02:00
|
|
|
static Align computeLoadStoreDefaultAlign(Type *Ty, BasicBlock *BB) {
|
2020-07-14 18:47:14 -07:00
|
|
|
assert(BB && "Insertion BB cannot be null when alignment not provided!");
|
|
|
|
assert(BB->getParent() &&
|
|
|
|
"BB must be in a Function when alignment not provided!");
|
2020-04-03 21:56:20 -07:00
|
|
|
const DataLayout &DL = BB->getModule()->getDataLayout();
|
|
|
|
return DL.getABITypeAlign(Ty);
|
|
|
|
}
|
|
|
|
|
2020-05-19 22:16:15 +02:00
|
|
|
static Align computeLoadStoreDefaultAlign(Type *Ty, Instruction *I) {
|
2020-07-14 18:47:14 -07:00
|
|
|
assert(I && "Insertion position cannot be null when alignment not provided!");
|
2020-05-14 14:48:10 -07:00
|
|
|
return computeLoadStoreDefaultAlign(Ty, I->getParent());
|
2020-04-03 21:56:20 -07:00
|
|
|
}
|
|
|
|
|
2019-01-14 21:37:53 +00:00
|
|
|
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,
|
|
|
|
Instruction *InsertBef)
|
|
|
|
: LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
|
2004-07-29 12:33:25 +00:00
|
|
|
|
2019-01-14 21:37:53 +00:00
|
|
|
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,
|
|
|
|
BasicBlock *InsertAE)
|
|
|
|
: LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertAE) {}
|
2004-07-29 12:33:25 +00:00
|
|
|
|
2015-05-20 21:46:30 +00:00
|
|
|
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
|
2004-07-29 12:33:25 +00:00
|
|
|
Instruction *InsertBef)
|
2020-05-14 14:48:10 -07:00
|
|
|
: LoadInst(Ty, Ptr, Name, isVolatile,
|
|
|
|
computeLoadStoreDefaultAlign(Ty, InsertBef), InsertBef) {}
|
2011-08-09 23:02:53 +00:00
|
|
|
|
2019-01-14 21:37:53 +00:00
|
|
|
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
|
2011-08-09 23:02:53 +00:00
|
|
|
BasicBlock *InsertAE)
|
2020-05-14 14:48:10 -07:00
|
|
|
: LoadInst(Ty, Ptr, Name, isVolatile,
|
|
|
|
computeLoadStoreDefaultAlign(Ty, InsertAE), InsertAE) {}
|
2007-04-22 19:24:39 +00:00
|
|
|
|
2015-04-17 19:56:21 +00:00
|
|
|
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
|
2020-04-03 21:56:20 -07:00
|
|
|
Align Align, Instruction *InsertBef)
|
2016-04-06 21:19:33 +00:00
|
|
|
: LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
|
2017-07-11 22:23:00 +00:00
|
|
|
SyncScope::System, InsertBef) {}
|
2004-07-29 12:33:25 +00:00
|
|
|
|
2019-01-14 21:37:53 +00:00
|
|
|
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
|
2020-04-03 21:56:20 -07:00
|
|
|
Align Align, BasicBlock *InsertAE)
|
2019-01-14 21:37:53 +00:00
|
|
|
: LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
|
2017-07-11 22:23:00 +00:00
|
|
|
SyncScope::System, InsertAE) {}
|
2007-07-18 20:51:11 +00:00
|
|
|
|
2015-04-06 20:59:48 +00:00
|
|
|
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
|
2020-04-03 21:56:20 -07:00
|
|
|
Align Align, AtomicOrdering Order, SyncScope::ID SSID,
|
2019-10-22 12:35:55 +00:00
|
|
|
Instruction *InsertBef)
|
2015-04-06 20:59:48 +00:00
|
|
|
: UnaryInstruction(Ty, Load, Ptr, InsertBef) {
|
2015-05-20 20:22:31 +00:00
|
|
|
assert(Ty == cast<PointerType>(Ptr->getType())->getElementType());
|
2011-08-09 23:02:53 +00:00
|
|
|
setVolatile(isVolatile);
|
2020-04-03 21:56:20 -07:00
|
|
|
setAlignment(Align);
|
2017-07-11 22:23:00 +00:00
|
|
|
setAtomic(Order, SSID);
|
2011-08-09 23:02:53 +00:00
|
|
|
AssertOK();
|
|
|
|
setName(Name);
|
|
|
|
}
|
|
|
|
|
2019-01-14 21:37:53 +00:00
|
|
|
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
|
2020-04-03 21:56:20 -07:00
|
|
|
Align Align, AtomicOrdering Order, SyncScope::ID SSID,
|
2004-07-29 12:33:25 +00:00
|
|
|
BasicBlock *InsertAE)
|
2019-01-14 21:37:53 +00:00
|
|
|
: UnaryInstruction(Ty, Load, Ptr, InsertAE) {
|
|
|
|
assert(Ty == cast<PointerType>(Ptr->getType())->getElementType());
|
2007-02-13 07:54:42 +00:00
|
|
|
setVolatile(isVolatile);
|
2019-10-22 12:35:55 +00:00
|
|
|
setAlignment(Align);
|
2017-07-11 22:23:00 +00:00
|
|
|
setAtomic(Order, SSID);
|
2007-02-13 07:54:42 +00:00
|
|
|
AssertOK();
|
|
|
|
setName(Name);
|
|
|
|
}
|
|
|
|
|
2004-07-29 12:33:25 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// StoreInst Implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2005-01-29 00:35:16 +00:00
|
|
|
void StoreInst::AssertOK() {
|
2008-07-29 15:49:41 +00:00
|
|
|
assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
|
2010-02-16 11:11:14 +00:00
|
|
|
assert(getOperand(1)->getType()->isPointerTy() &&
|
2005-01-29 00:35:16 +00:00
|
|
|
"Ptr must have pointer type!");
|
|
|
|
assert(getOperand(0)->getType() ==
|
|
|
|
cast<PointerType>(getOperand(1)->getType())->getElementType()
|
|
|
|
&& "Ptr must be a pointer to Val type!");
|
2011-08-09 23:02:53 +00:00
|
|
|
assert(!(isAtomic() && getAlignment() == 0) &&
|
2013-12-21 00:00:49 +00:00
|
|
|
"Alignment required for atomic store");
|
2004-07-29 12:33:25 +00:00
|
|
|
}
|
|
|
|
|
2005-01-29 00:35:16 +00:00
|
|
|
StoreInst::StoreInst(Value *val, Value *addr, Instruction *InsertBefore)
|
2015-03-05 22:05:26 +00:00
|
|
|
: StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
|
2004-07-29 12:33:25 +00:00
|
|
|
|
2005-01-29 00:35:16 +00:00
|
|
|
StoreInst::StoreInst(Value *val, Value *addr, BasicBlock *InsertAtEnd)
|
2015-03-05 22:05:26 +00:00
|
|
|
: StoreInst(val, addr, /*isVolatile=*/false, InsertAtEnd) {}
|
2004-07-29 12:33:25 +00:00
|
|
|
|
2005-04-21 23:48:37 +00:00
|
|
|
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
|
2005-01-29 00:35:16 +00:00
|
|
|
Instruction *InsertBefore)
|
2020-05-14 14:48:10 -07:00
|
|
|
: StoreInst(val, addr, isVolatile,
|
|
|
|
computeLoadStoreDefaultAlign(val->getType(), InsertBefore),
|
|
|
|
InsertBefore) {}
|
2007-04-22 19:24:39 +00:00
|
|
|
|
|
|
|
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
|
2015-03-05 22:05:26 +00:00
|
|
|
BasicBlock *InsertAtEnd)
|
2020-05-14 14:48:10 -07:00
|
|
|
: StoreInst(val, addr, isVolatile,
|
|
|
|
computeLoadStoreDefaultAlign(val->getType(), InsertAtEnd),
|
|
|
|
InsertAtEnd) {}
|
2015-03-05 22:05:26 +00:00
|
|
|
|
2020-05-14 14:48:10 -07:00
|
|
|
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
|
2015-03-05 22:05:26 +00:00
|
|
|
Instruction *InsertBefore)
|
2016-04-06 21:19:33 +00:00
|
|
|
: StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
|
2017-07-11 22:23:00 +00:00
|
|
|
SyncScope::System, InsertBefore) {}
|
2015-03-05 22:05:26 +00:00
|
|
|
|
2020-05-14 14:48:10 -07:00
|
|
|
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
|
2015-03-05 22:05:26 +00:00
|
|
|
BasicBlock *InsertAtEnd)
|
2016-04-06 21:19:33 +00:00
|
|
|
: StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
|
2017-07-11 22:23:00 +00:00
|
|
|
SyncScope::System, InsertAtEnd) {}
|
2011-08-09 23:02:53 +00:00
|
|
|
|
2020-05-14 14:48:10 -07:00
|
|
|
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
|
2019-10-22 12:55:32 +00:00
|
|
|
AtomicOrdering Order, SyncScope::ID SSID,
|
2011-08-09 23:02:53 +00:00
|
|
|
Instruction *InsertBefore)
|
2019-10-22 12:55:32 +00:00
|
|
|
: Instruction(Type::getVoidTy(val->getContext()), Store,
|
|
|
|
OperandTraits<StoreInst>::op_begin(this),
|
|
|
|
OperandTraits<StoreInst>::operands(this), InsertBefore) {
|
2011-08-09 23:02:53 +00:00
|
|
|
Op<0>() = val;
|
|
|
|
Op<1>() = addr;
|
|
|
|
setVolatile(isVolatile);
|
2019-10-22 12:55:32 +00:00
|
|
|
setAlignment(Align);
|
2017-07-11 22:23:00 +00:00
|
|
|
setAtomic(Order, SSID);
|
2011-08-09 23:02:53 +00:00
|
|
|
AssertOK();
|
|
|
|
}
|
|
|
|
|
2020-05-14 14:48:10 -07:00
|
|
|
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
|
2019-10-22 12:55:32 +00:00
|
|
|
AtomicOrdering Order, SyncScope::ID SSID,
|
2005-01-29 00:35:16 +00:00
|
|
|
BasicBlock *InsertAtEnd)
|
2019-10-22 12:55:32 +00:00
|
|
|
: Instruction(Type::getVoidTy(val->getContext()), Store,
|
|
|
|
OperandTraits<StoreInst>::op_begin(this),
|
|
|
|
OperandTraits<StoreInst>::operands(this), InsertAtEnd) {
|
2008-05-26 21:33:52 +00:00
|
|
|
Op<0>() = val;
|
|
|
|
Op<1>() = addr;
|
2005-02-05 01:38:38 +00:00
|
|
|
setVolatile(isVolatile);
|
2019-10-22 12:55:32 +00:00
|
|
|
setAlignment(Align);
|
2017-07-11 22:23:00 +00:00
|
|
|
setAtomic(Order, SSID);
|
2005-01-29 00:35:16 +00:00
|
|
|
AssertOK();
|
2004-07-29 12:33:25 +00:00
|
|
|
}
|
|
|
|
|
2007-04-22 19:24:39 +00:00
|
|
|
|
2011-07-28 21:48:00 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AtomicCmpXchgInst Implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
|
2020-07-07 09:54:13 +00:00
|
|
|
Align Alignment, AtomicOrdering SuccessOrdering,
|
2014-03-11 10:48:52 +00:00
|
|
|
AtomicOrdering FailureOrdering,
|
2017-07-11 22:23:00 +00:00
|
|
|
SyncScope::ID SSID) {
|
2011-07-28 21:48:00 +00:00
|
|
|
Op<0>() = Ptr;
|
|
|
|
Op<1>() = Cmp;
|
|
|
|
Op<2>() = NewVal;
|
2014-03-11 10:48:52 +00:00
|
|
|
setSuccessOrdering(SuccessOrdering);
|
|
|
|
setFailureOrdering(FailureOrdering);
|
2017-07-11 22:23:00 +00:00
|
|
|
setSyncScopeID(SSID);
|
2020-07-07 09:54:13 +00:00
|
|
|
setAlignment(Alignment);
|
2011-07-28 21:48:00 +00:00
|
|
|
|
|
|
|
assert(getOperand(0) && getOperand(1) && getOperand(2) &&
|
|
|
|
"All operands must be non-null!");
|
|
|
|
assert(getOperand(0)->getType()->isPointerTy() &&
|
|
|
|
"Ptr must have pointer type!");
|
|
|
|
assert(getOperand(1)->getType() ==
|
|
|
|
cast<PointerType>(getOperand(0)->getType())->getElementType()
|
|
|
|
&& "Ptr must be a pointer to Cmp type!");
|
|
|
|
assert(getOperand(2)->getType() ==
|
|
|
|
cast<PointerType>(getOperand(0)->getType())->getElementType()
|
|
|
|
&& "Ptr must be a pointer to NewVal type!");
|
2016-04-06 21:19:33 +00:00
|
|
|
assert(SuccessOrdering != AtomicOrdering::NotAtomic &&
|
2014-03-11 10:48:52 +00:00
|
|
|
"AtomicCmpXchg instructions must be atomic!");
|
2016-04-06 21:19:33 +00:00
|
|
|
assert(FailureOrdering != AtomicOrdering::NotAtomic &&
|
2011-07-28 21:48:00 +00:00
|
|
|
"AtomicCmpXchg instructions must be atomic!");
|
2016-04-06 21:19:33 +00:00
|
|
|
assert(!isStrongerThan(FailureOrdering, SuccessOrdering) &&
|
|
|
|
"AtomicCmpXchg failure argument shall be no stronger than the success "
|
|
|
|
"argument");
|
|
|
|
assert(FailureOrdering != AtomicOrdering::Release &&
|
|
|
|
FailureOrdering != AtomicOrdering::AcquireRelease &&
|
2014-03-11 10:48:52 +00:00
|
|
|
"AtomicCmpXchg failure ordering cannot include release semantics");
|
2011-07-28 21:48:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
|
2020-07-07 09:54:13 +00:00
|
|
|
Align Alignment,
|
2014-03-11 10:48:52 +00:00
|
|
|
AtomicOrdering SuccessOrdering,
|
|
|
|
AtomicOrdering FailureOrdering,
|
2017-07-11 22:23:00 +00:00
|
|
|
SyncScope::ID SSID,
|
2011-07-28 21:48:00 +00:00
|
|
|
Instruction *InsertBefore)
|
IR: add "cmpxchg weak" variant to support permitted failure.
This commit adds a weak variant of the cmpxchg operation, as described
in C++11. A cmpxchg instruction with this modifier is permitted to
fail to store, even if the comparison indicated it should.
As a result, cmpxchg instructions must return a flag indicating
success in addition to their original iN value loaded. Thus, for
uniformity *all* cmpxchg instructions now return "{ iN, i1 }". The
second flag is 1 when the store succeeded.
At the DAG level, a new ATOMIC_CMP_SWAP_WITH_SUCCESS node has been
added as the natural representation for the new cmpxchg instructions.
It is a strong cmpxchg.
By default this gets Expanded to the existing ATOMIC_CMP_SWAP during
Legalization, so existing backends should see no change in behaviour.
If they wish to deal with the enhanced node instead, they can call
setOperationAction on it. Beware: as a node with 2 results, it cannot
be selected from TableGen.
Currently, no use is made of the extra information provided in this
patch. Test updates are almost entirely adapting the input IR to the
new scheme.
Summary for out of tree users:
------------------------------
+ Legacy Bitcode files are upgraded during read.
+ Legacy assembly IR files will be invalid.
+ Front-ends must adapt to different type for "cmpxchg".
+ Backends should be unaffected by default.
llvm-svn: 210903
2014-06-13 14:24:07 +00:00
|
|
|
: Instruction(
|
2017-05-09 19:31:13 +00:00
|
|
|
StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
|
IR: add "cmpxchg weak" variant to support permitted failure.
This commit adds a weak variant of the cmpxchg operation, as described
in C++11. A cmpxchg instruction with this modifier is permitted to
fail to store, even if the comparison indicated it should.
As a result, cmpxchg instructions must return a flag indicating
success in addition to their original iN value loaded. Thus, for
uniformity *all* cmpxchg instructions now return "{ iN, i1 }". The
second flag is 1 when the store succeeded.
At the DAG level, a new ATOMIC_CMP_SWAP_WITH_SUCCESS node has been
added as the natural representation for the new cmpxchg instructions.
It is a strong cmpxchg.
By default this gets Expanded to the existing ATOMIC_CMP_SWAP during
Legalization, so existing backends should see no change in behaviour.
If they wish to deal with the enhanced node instead, they can call
setOperationAction on it. Beware: as a node with 2 results, it cannot
be selected from TableGen.
Currently, no use is made of the extra information provided in this
patch. Test updates are almost entirely adapting the input IR to the
new scheme.
Summary for out of tree users:
------------------------------
+ Legacy Bitcode files are upgraded during read.
+ Legacy assembly IR files will be invalid.
+ Front-ends must adapt to different type for "cmpxchg".
+ Backends should be unaffected by default.
llvm-svn: 210903
2014-06-13 14:24:07 +00:00
|
|
|
AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
|
|
|
|
OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) {
|
2020-07-07 09:54:13 +00:00
|
|
|
Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
|
2011-07-28 21:48:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
|
2020-07-07 09:54:13 +00:00
|
|
|
Align Alignment,
|
2014-03-11 10:48:52 +00:00
|
|
|
AtomicOrdering SuccessOrdering,
|
|
|
|
AtomicOrdering FailureOrdering,
|
2017-07-11 22:23:00 +00:00
|
|
|
SyncScope::ID SSID,
|
2011-07-28 21:48:00 +00:00
|
|
|
BasicBlock *InsertAtEnd)
|
IR: add "cmpxchg weak" variant to support permitted failure.
This commit adds a weak variant of the cmpxchg operation, as described
in C++11. A cmpxchg instruction with this modifier is permitted to
fail to store, even if the comparison indicated it should.
As a result, cmpxchg instructions must return a flag indicating
success in addition to their original iN value loaded. Thus, for
uniformity *all* cmpxchg instructions now return "{ iN, i1 }". The
second flag is 1 when the store succeeded.
At the DAG level, a new ATOMIC_CMP_SWAP_WITH_SUCCESS node has been
added as the natural representation for the new cmpxchg instructions.
It is a strong cmpxchg.
By default this gets Expanded to the existing ATOMIC_CMP_SWAP during
Legalization, so existing backends should see no change in behaviour.
If they wish to deal with the enhanced node instead, they can call
setOperationAction on it. Beware: as a node with 2 results, it cannot
be selected from TableGen.
Currently, no use is made of the extra information provided in this
patch. Test updates are almost entirely adapting the input IR to the
new scheme.
Summary for out of tree users:
------------------------------
+ Legacy Bitcode files are upgraded during read.
+ Legacy assembly IR files will be invalid.
+ Front-ends must adapt to different type for "cmpxchg".
+ Backends should be unaffected by default.
llvm-svn: 210903
2014-06-13 14:24:07 +00:00
|
|
|
: Instruction(
|
2017-05-09 19:31:13 +00:00
|
|
|
StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
|
IR: add "cmpxchg weak" variant to support permitted failure.
This commit adds a weak variant of the cmpxchg operation, as described
in C++11. A cmpxchg instruction with this modifier is permitted to
fail to store, even if the comparison indicated it should.
As a result, cmpxchg instructions must return a flag indicating
success in addition to their original iN value loaded. Thus, for
uniformity *all* cmpxchg instructions now return "{ iN, i1 }". The
second flag is 1 when the store succeeded.
At the DAG level, a new ATOMIC_CMP_SWAP_WITH_SUCCESS node has been
added as the natural representation for the new cmpxchg instructions.
It is a strong cmpxchg.
By default this gets Expanded to the existing ATOMIC_CMP_SWAP during
Legalization, so existing backends should see no change in behaviour.
If they wish to deal with the enhanced node instead, they can call
setOperationAction on it. Beware: as a node with 2 results, it cannot
be selected from TableGen.
Currently, no use is made of the extra information provided in this
patch. Test updates are almost entirely adapting the input IR to the
new scheme.
Summary for out of tree users:
------------------------------
+ Legacy Bitcode files are upgraded during read.
+ Legacy assembly IR files will be invalid.
+ Front-ends must adapt to different type for "cmpxchg".
+ Backends should be unaffected by default.
llvm-svn: 210903
2014-06-13 14:24:07 +00:00
|
|
|
AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
|
|
|
|
OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) {
|
2020-07-07 09:54:13 +00:00
|
|
|
Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
|
2020-06-30 09:54:45 +00:00
|
|
|
}
|
|
|
|
|
2011-07-28 21:48:00 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AtomicRMWInst Implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
|
2020-07-07 09:54:13 +00:00
|
|
|
Align Alignment, AtomicOrdering Ordering,
|
2017-07-11 22:23:00 +00:00
|
|
|
SyncScope::ID SSID) {
|
2011-07-28 21:48:00 +00:00
|
|
|
Op<0>() = Ptr;
|
|
|
|
Op<1>() = Val;
|
|
|
|
setOperation(Operation);
|
|
|
|
setOrdering(Ordering);
|
2017-07-11 22:23:00 +00:00
|
|
|
setSyncScopeID(SSID);
|
2020-07-07 09:54:13 +00:00
|
|
|
setAlignment(Alignment);
|
2011-07-28 21:48:00 +00:00
|
|
|
|
|
|
|
assert(getOperand(0) && getOperand(1) &&
|
|
|
|
"All operands must be non-null!");
|
|
|
|
assert(getOperand(0)->getType()->isPointerTy() &&
|
|
|
|
"Ptr must have pointer type!");
|
|
|
|
assert(getOperand(1)->getType() ==
|
|
|
|
cast<PointerType>(getOperand(0)->getType())->getElementType()
|
|
|
|
&& "Ptr must be a pointer to Val type!");
|
2016-04-06 21:19:33 +00:00
|
|
|
assert(Ordering != AtomicOrdering::NotAtomic &&
|
2011-07-28 21:48:00 +00:00
|
|
|
"AtomicRMW instructions must be atomic!");
|
|
|
|
}
|
|
|
|
|
|
|
|
AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
|
2020-07-07 09:54:13 +00:00
|
|
|
Align Alignment, AtomicOrdering Ordering,
|
|
|
|
SyncScope::ID SSID, Instruction *InsertBefore)
|
|
|
|
: Instruction(Val->getType(), AtomicRMW,
|
|
|
|
OperandTraits<AtomicRMWInst>::op_begin(this),
|
|
|
|
OperandTraits<AtomicRMWInst>::operands(this), InsertBefore) {
|
|
|
|
Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
|
2011-07-28 21:48:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
|
2020-07-07 09:54:13 +00:00
|
|
|
Align Alignment, AtomicOrdering Ordering,
|
|
|
|
SyncScope::ID SSID, BasicBlock *InsertAtEnd)
|
|
|
|
: Instruction(Val->getType(), AtomicRMW,
|
|
|
|
OperandTraits<AtomicRMWInst>::op_begin(this),
|
|
|
|
OperandTraits<AtomicRMWInst>::operands(this), InsertAtEnd) {
|
|
|
|
Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
|
2011-07-28 21:48:00 +00:00
|
|
|
}
|
|
|
|
|
2018-10-02 23:44:11 +00:00
|
|
|
StringRef AtomicRMWInst::getOperationName(BinOp Op) {
|
|
|
|
switch (Op) {
|
|
|
|
case AtomicRMWInst::Xchg:
|
|
|
|
return "xchg";
|
|
|
|
case AtomicRMWInst::Add:
|
|
|
|
return "add";
|
|
|
|
case AtomicRMWInst::Sub:
|
|
|
|
return "sub";
|
|
|
|
case AtomicRMWInst::And:
|
|
|
|
return "and";
|
|
|
|
case AtomicRMWInst::Nand:
|
|
|
|
return "nand";
|
|
|
|
case AtomicRMWInst::Or:
|
|
|
|
return "or";
|
|
|
|
case AtomicRMWInst::Xor:
|
|
|
|
return "xor";
|
|
|
|
case AtomicRMWInst::Max:
|
|
|
|
return "max";
|
|
|
|
case AtomicRMWInst::Min:
|
|
|
|
return "min";
|
|
|
|
case AtomicRMWInst::UMax:
|
|
|
|
return "umax";
|
|
|
|
case AtomicRMWInst::UMin:
|
|
|
|
return "umin";
|
2019-01-22 18:18:02 +00:00
|
|
|
case AtomicRMWInst::FAdd:
|
|
|
|
return "fadd";
|
|
|
|
case AtomicRMWInst::FSub:
|
|
|
|
return "fsub";
|
2018-10-02 23:44:11 +00:00
|
|
|
case AtomicRMWInst::BAD_BINOP:
|
|
|
|
return "<invalid operation>";
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm_unreachable("invalid atomicrmw operation");
|
|
|
|
}
|
|
|
|
|
2011-07-25 23:16:38 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// FenceInst Implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2018-07-30 19:41:25 +00:00
|
|
|
FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
|
2017-07-11 22:23:00 +00:00
|
|
|
SyncScope::ID SSID,
|
2011-07-25 23:16:38 +00:00
|
|
|
Instruction *InsertBefore)
|
2014-04-09 06:08:46 +00:00
|
|
|
: Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) {
|
2011-07-25 23:16:38 +00:00
|
|
|
setOrdering(Ordering);
|
2017-07-11 22:23:00 +00:00
|
|
|
setSyncScopeID(SSID);
|
2011-07-25 23:16:38 +00:00
|
|
|
}
|
|
|
|
|
2018-07-30 19:41:25 +00:00
|
|
|
FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
|
2017-07-11 22:23:00 +00:00
|
|
|
SyncScope::ID SSID,
|
2011-07-25 23:16:38 +00:00
|
|
|
BasicBlock *InsertAtEnd)
|
2014-04-09 06:08:46 +00:00
|
|
|
: Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) {
|
2011-07-25 23:16:38 +00:00
|
|
|
setOrdering(Ordering);
|
2017-07-11 22:23:00 +00:00
|
|
|
setSyncScopeID(SSID);
|
2011-07-25 23:16:38 +00:00
|
|
|
}
|
|
|
|
|
2004-07-29 12:33:25 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// GetElementPtrInst Implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2011-07-25 09:48:08 +00:00
|
|
|
void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
|
2009-07-25 04:41:11 +00:00
|
|
|
const Twine &Name) {
|
2015-06-12 17:48:10 +00:00
|
|
|
assert(getNumOperands() == 1 + IdxList.size() &&
|
|
|
|
"NumOperands not initialized?");
|
2015-05-21 22:48:54 +00:00
|
|
|
Op<0>() = Ptr;
|
2018-11-17 01:44:25 +00:00
|
|
|
llvm::copy(IdxList, op_begin() + 1);
|
2008-06-04 16:14:12 +00:00
|
|
|
setName(Name);
|
2005-05-03 05:43:30 +00:00
|
|
|
}
|
|
|
|
|
2008-05-10 08:32:32 +00:00
|
|
|
GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI)
|
2015-05-05 18:03:48 +00:00
|
|
|
: Instruction(GEPI.getType(), GetElementPtr,
|
|
|
|
OperandTraits<GetElementPtrInst>::op_end(this) -
|
|
|
|
GEPI.getNumOperands(),
|
|
|
|
GEPI.getNumOperands()),
|
2015-06-01 03:09:34 +00:00
|
|
|
SourceElementType(GEPI.SourceElementType),
|
|
|
|
ResultElementType(GEPI.ResultElementType) {
|
2011-07-25 09:48:08 +00:00
|
|
|
std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin());
|
2009-08-25 22:11:20 +00:00
|
|
|
SubclassOptionalData = GEPI.SubclassOptionalData;
|
2008-05-10 08:32:32 +00:00
|
|
|
}
|
|
|
|
|
2020-03-03 15:42:16 -08:00
|
|
|
Type *GetElementPtrInst::getTypeAtIndex(Type *Ty, Value *Idx) {
|
2020-07-01 15:46:43 +01:00
|
|
|
if (auto *Struct = dyn_cast<StructType>(Ty)) {
|
2020-03-03 15:42:16 -08:00
|
|
|
if (!Struct->indexValid(Idx))
|
|
|
|
return nullptr;
|
|
|
|
return Struct->getTypeAtIndex(Idx);
|
|
|
|
}
|
|
|
|
if (!Idx->getType()->isIntOrIntVectorTy())
|
2014-04-09 06:08:46 +00:00
|
|
|
return nullptr;
|
2020-07-01 15:46:43 +01:00
|
|
|
if (auto *Array = dyn_cast<ArrayType>(Ty))
|
2020-03-03 15:42:16 -08:00
|
|
|
return Array->getElementType();
|
2020-07-01 15:46:43 +01:00
|
|
|
if (auto *Vector = dyn_cast<VectorType>(Ty))
|
2020-03-03 15:42:16 -08:00
|
|
|
return Vector->getElementType();
|
|
|
|
return nullptr;
|
|
|
|
}
|
2004-07-29 12:33:25 +00:00
|
|
|
|
2020-03-03 15:42:16 -08:00
|
|
|
Type *GetElementPtrInst::getTypeAtIndex(Type *Ty, uint64_t Idx) {
|
2020-07-01 15:46:43 +01:00
|
|
|
if (auto *Struct = dyn_cast<StructType>(Ty)) {
|
2020-03-03 15:42:16 -08:00
|
|
|
if (Idx >= Struct->getNumElements())
|
|
|
|
return nullptr;
|
|
|
|
return Struct->getElementType(Idx);
|
2008-05-31 00:58:22 +00:00
|
|
|
}
|
2020-07-01 15:46:43 +01:00
|
|
|
if (auto *Array = dyn_cast<ArrayType>(Ty))
|
2020-03-03 15:42:16 -08:00
|
|
|
return Array->getElementType();
|
2020-07-01 15:46:43 +01:00
|
|
|
if (auto *Vector = dyn_cast<VectorType>(Ty))
|
2020-03-03 15:42:16 -08:00
|
|
|
return Vector->getElementType();
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename IndexTy>
|
|
|
|
static Type *getIndexedTypeInternal(Type *Ty, ArrayRef<IndexTy> IdxList) {
|
|
|
|
if (IdxList.empty())
|
|
|
|
return Ty;
|
|
|
|
for (IndexTy V : IdxList.slice(1)) {
|
|
|
|
Ty = GetElementPtrInst::getTypeAtIndex(Ty, V);
|
|
|
|
if (!Ty)
|
|
|
|
return Ty;
|
|
|
|
}
|
|
|
|
return Ty;
|
2004-07-29 12:33:25 +00:00
|
|
|
}
|
|
|
|
|
2015-03-30 21:41:43 +00:00
|
|
|
Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<Value *> IdxList) {
|
|
|
|
return getIndexedTypeInternal(Ty, IdxList);
|
2011-01-14 08:07:43 +00:00
|
|
|
}
|
|
|
|
|
2015-03-30 21:41:43 +00:00
|
|
|
Type *GetElementPtrInst::getIndexedType(Type *Ty,
|
2011-07-25 09:48:08 +00:00
|
|
|
ArrayRef<Constant *> IdxList) {
|
2015-03-30 21:41:43 +00:00
|
|
|
return getIndexedTypeInternal(Ty, IdxList);
|
2008-07-29 08:46:11 +00:00
|
|
|
}
|
|
|
|
|
2015-03-30 21:41:43 +00:00
|
|
|
Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList) {
|
|
|
|
return getIndexedTypeInternal(Ty, IdxList);
|
2005-05-03 05:43:30 +00:00
|
|
|
}
|
|
|
|
|
2007-04-14 00:12:57 +00:00
|
|
|
/// hasAllZeroIndices - Return true if all of the indices of this GEP are
|
|
|
|
/// zeros. If so, the result pointer and the first operand have the same
|
|
|
|
/// value, just potentially different types.
|
|
|
|
bool GetElementPtrInst::hasAllZeroIndices() const {
|
|
|
|
for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
|
|
|
|
if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) {
|
|
|
|
if (!CI->isZero()) return false;
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2007-04-27 20:35:56 +00:00
|
|
|
/// hasAllConstantIndices - Return true if all of the indices of this GEP are
|
|
|
|
/// constant integers. If so, the result pointer and the first operand have
|
|
|
|
/// a constant offset between them.
|
|
|
|
bool GetElementPtrInst::hasAllConstantIndices() const {
|
|
|
|
for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
|
|
|
|
if (!isa<ConstantInt>(getOperand(i)))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-09-07 23:54:19 +00:00
|
|
|
void GetElementPtrInst::setIsInBounds(bool B) {
|
|
|
|
cast<GEPOperator>(this)->setIsInBounds(B);
|
|
|
|
}
|
2007-04-14 00:12:57 +00:00
|
|
|
|
2009-09-27 21:33:04 +00:00
|
|
|
bool GetElementPtrInst::isInBounds() const {
|
|
|
|
return cast<GEPOperator>(this)->isInBounds();
|
|
|
|
}
|
|
|
|
|
2012-12-11 10:29:10 +00:00
|
|
|
bool GetElementPtrInst::accumulateConstantOffset(const DataLayout &DL,
|
|
|
|
APInt &Offset) const {
|
2012-12-11 11:05:15 +00:00
|
|
|
// Delegate to the generic GEPOperator implementation.
|
|
|
|
return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset);
|
2012-12-11 10:29:10 +00:00
|
|
|
}
|
|
|
|
|
2006-01-10 19:05:34 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// ExtractElementInst Implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
|
2009-07-25 04:41:11 +00:00
|
|
|
const Twine &Name,
|
2006-04-08 01:18:18 +00:00
|
|
|
Instruction *InsertBef)
|
2007-02-15 02:26:10 +00:00
|
|
|
: Instruction(cast<VectorType>(Val->getType())->getElementType(),
|
2008-05-10 08:32:32 +00:00
|
|
|
ExtractElement,
|
|
|
|
OperandTraits<ExtractElementInst>::op_begin(this),
|
|
|
|
2, InsertBef) {
|
2006-04-08 04:05:48 +00:00
|
|
|
assert(isValidOperands(Val, Index) &&
|
|
|
|
"Invalid extractelement instruction operands!");
|
2008-05-26 21:33:52 +00:00
|
|
|
Op<0>() = Val;
|
|
|
|
Op<1>() = Index;
|
2007-02-24 00:55:48 +00:00
|
|
|
setName(Name);
|
2006-01-10 19:05:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
|
2009-07-25 04:41:11 +00:00
|
|
|
const Twine &Name,
|
2006-04-08 01:18:18 +00:00
|
|
|
BasicBlock *InsertAE)
|
2007-02-15 02:26:10 +00:00
|
|
|
: Instruction(cast<VectorType>(Val->getType())->getElementType(),
|
2008-05-10 08:32:32 +00:00
|
|
|
ExtractElement,
|
|
|
|
OperandTraits<ExtractElementInst>::op_begin(this),
|
|
|
|
2, InsertAE) {
|
2006-04-08 04:05:48 +00:00
|
|
|
assert(isValidOperands(Val, Index) &&
|
|
|
|
"Invalid extractelement instruction operands!");
|
|
|
|
|
2008-05-26 21:33:52 +00:00
|
|
|
Op<0>() = Val;
|
|
|
|
Op<1>() = Index;
|
2007-02-24 00:55:48 +00:00
|
|
|
setName(Name);
|
2006-01-10 19:05:34 +00:00
|
|
|
}
|
|
|
|
|
2006-04-08 04:05:48 +00:00
|
|
|
bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) {
|
[IR] Make {extract,insert}element accept an index of any integer type.
Given the following C code llvm currently generates suboptimal code for
x86-64:
__m128 bss4( const __m128 *ptr, size_t i, size_t j )
{
float f = ptr[i][j];
return (__m128) { f, f, f, f };
}
=================================================
define <4 x float> @_Z4bss4PKDv4_fmm(<4 x float>* nocapture readonly %ptr, i64 %i, i64 %j) #0 {
%a1 = getelementptr inbounds <4 x float>* %ptr, i64 %i
%a2 = load <4 x float>* %a1, align 16, !tbaa !1
%a3 = trunc i64 %j to i32
%a4 = extractelement <4 x float> %a2, i32 %a3
%a5 = insertelement <4 x float> undef, float %a4, i32 0
%a6 = insertelement <4 x float> %a5, float %a4, i32 1
%a7 = insertelement <4 x float> %a6, float %a4, i32 2
%a8 = insertelement <4 x float> %a7, float %a4, i32 3
ret <4 x float> %a8
}
=================================================
shlq $4, %rsi
addq %rdi, %rsi
movslq %edx, %rax
vbroadcastss (%rsi,%rax,4), %xmm0
retq
=================================================
The movslq is uneeded, but is present because of the trunc to i32 and then
sext back to i64 that the backend adds for vbroadcastss.
We can't remove it because it changes the meaning. The IR that clang
generates is already suboptimal. What clang really should emit is:
%a4 = extractelement <4 x float> %a2, i64 %j
This patch makes that legal. A separate patch will teach clang to do it.
Differential Revision: http://reviews.llvm.org/D3519
llvm-svn: 207801
2014-05-01 22:12:39 +00:00
|
|
|
if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
|
2006-04-08 04:05:48 +00:00
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2006-01-17 20:07:22 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// InsertElementInst Implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2006-04-08 04:05:48 +00:00
|
|
|
InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
|
2009-07-25 04:41:11 +00:00
|
|
|
const Twine &Name,
|
2006-04-08 01:18:18 +00:00
|
|
|
Instruction *InsertBef)
|
2008-05-10 08:32:32 +00:00
|
|
|
: Instruction(Vec->getType(), InsertElement,
|
|
|
|
OperandTraits<InsertElementInst>::op_begin(this),
|
|
|
|
3, InsertBef) {
|
2006-04-08 04:05:48 +00:00
|
|
|
assert(isValidOperands(Vec, Elt, Index) &&
|
|
|
|
"Invalid insertelement instruction operands!");
|
2008-05-26 21:33:52 +00:00
|
|
|
Op<0>() = Vec;
|
|
|
|
Op<1>() = Elt;
|
|
|
|
Op<2>() = Index;
|
2007-02-24 00:55:48 +00:00
|
|
|
setName(Name);
|
2006-01-17 20:07:22 +00:00
|
|
|
}
|
|
|
|
|
2006-04-08 04:05:48 +00:00
|
|
|
InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
|
2009-07-25 04:41:11 +00:00
|
|
|
const Twine &Name,
|
2006-04-08 01:18:18 +00:00
|
|
|
BasicBlock *InsertAE)
|
2008-05-10 08:32:32 +00:00
|
|
|
: Instruction(Vec->getType(), InsertElement,
|
|
|
|
OperandTraits<InsertElementInst>::op_begin(this),
|
|
|
|
3, InsertAE) {
|
2006-04-08 04:05:48 +00:00
|
|
|
assert(isValidOperands(Vec, Elt, Index) &&
|
|
|
|
"Invalid insertelement instruction operands!");
|
|
|
|
|
2008-05-26 21:33:52 +00:00
|
|
|
Op<0>() = Vec;
|
|
|
|
Op<1>() = Elt;
|
|
|
|
Op<2>() = Index;
|
2007-02-24 00:55:48 +00:00
|
|
|
setName(Name);
|
2006-01-17 20:07:22 +00:00
|
|
|
}
|
|
|
|
|
2018-07-30 19:41:25 +00:00
|
|
|
bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt,
|
2006-04-08 04:05:48 +00:00
|
|
|
const Value *Index) {
|
2010-02-16 11:11:14 +00:00
|
|
|
if (!Vec->getType()->isVectorTy())
|
2007-02-15 03:39:18 +00:00
|
|
|
return false; // First operand of insertelement must be vector type.
|
2018-07-30 19:41:25 +00:00
|
|
|
|
2007-02-15 02:26:10 +00:00
|
|
|
if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
|
2007-05-11 21:43:24 +00:00
|
|
|
return false;// Second operand of insertelement must be vector element type.
|
2018-07-30 19:41:25 +00:00
|
|
|
|
[IR] Make {extract,insert}element accept an index of any integer type.
Given the following C code llvm currently generates suboptimal code for
x86-64:
__m128 bss4( const __m128 *ptr, size_t i, size_t j )
{
float f = ptr[i][j];
return (__m128) { f, f, f, f };
}
=================================================
define <4 x float> @_Z4bss4PKDv4_fmm(<4 x float>* nocapture readonly %ptr, i64 %i, i64 %j) #0 {
%a1 = getelementptr inbounds <4 x float>* %ptr, i64 %i
%a2 = load <4 x float>* %a1, align 16, !tbaa !1
%a3 = trunc i64 %j to i32
%a4 = extractelement <4 x float> %a2, i32 %a3
%a5 = insertelement <4 x float> undef, float %a4, i32 0
%a6 = insertelement <4 x float> %a5, float %a4, i32 1
%a7 = insertelement <4 x float> %a6, float %a4, i32 2
%a8 = insertelement <4 x float> %a7, float %a4, i32 3
ret <4 x float> %a8
}
=================================================
shlq $4, %rsi
addq %rdi, %rsi
movslq %edx, %rax
vbroadcastss (%rsi,%rax,4), %xmm0
retq
=================================================
The movslq is uneeded, but is present because of the trunc to i32 and then
sext back to i64 that the backend adds for vbroadcastss.
We can't remove it because it changes the meaning. The IR that clang
generates is already suboptimal. What clang really should emit is:
%a4 = extractelement <4 x float> %a2, i64 %j
This patch makes that legal. A separate patch will teach clang to do it.
Differential Revision: http://reviews.llvm.org/D3519
llvm-svn: 207801
2014-05-01 22:12:39 +00:00
|
|
|
if (!Index->getType()->isIntegerTy())
|
2009-06-14 23:30:43 +00:00
|
|
|
return false; // Third operand of insertelement must be i32.
|
2006-04-08 04:05:48 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2006-04-08 01:18:18 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// ShuffleVectorInst Implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
|
2009-07-25 04:41:11 +00:00
|
|
|
const Twine &Name,
|
2006-04-08 01:18:18 +00:00
|
|
|
Instruction *InsertBefore)
|
2020-03-31 13:08:59 -07:00
|
|
|
: Instruction(
|
|
|
|
VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
|
|
|
|
cast<VectorType>(Mask->getType())->getElementCount()),
|
|
|
|
ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
|
|
|
|
OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
|
2006-04-08 01:18:18 +00:00
|
|
|
assert(isValidOperands(V1, V2, Mask) &&
|
|
|
|
"Invalid shuffle vector instruction operands!");
|
2020-03-31 13:08:59 -07:00
|
|
|
|
2008-05-26 21:33:52 +00:00
|
|
|
Op<0>() = V1;
|
|
|
|
Op<1>() = V2;
|
2020-03-31 13:08:59 -07:00
|
|
|
SmallVector<int, 16> MaskArr;
|
|
|
|
getShuffleMask(cast<Constant>(Mask), MaskArr);
|
|
|
|
setShuffleMask(MaskArr);
|
2007-02-24 00:55:48 +00:00
|
|
|
setName(Name);
|
2006-04-08 01:18:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
|
2020-03-31 13:08:59 -07:00
|
|
|
const Twine &Name, BasicBlock *InsertAtEnd)
|
|
|
|
: Instruction(
|
|
|
|
VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
|
|
|
|
cast<VectorType>(Mask->getType())->getElementCount()),
|
|
|
|
ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
|
|
|
|
OperandTraits<ShuffleVectorInst>::operands(this), InsertAtEnd) {
|
|
|
|
assert(isValidOperands(V1, V2, Mask) &&
|
|
|
|
"Invalid shuffle vector instruction operands!");
|
|
|
|
|
|
|
|
Op<0>() = V1;
|
|
|
|
Op<1>() = V2;
|
|
|
|
SmallVector<int, 16> MaskArr;
|
|
|
|
getShuffleMask(cast<Constant>(Mask), MaskArr);
|
|
|
|
setShuffleMask(MaskArr);
|
|
|
|
setName(Name);
|
|
|
|
}
|
|
|
|
|
|
|
|
ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
|
2009-07-25 04:41:11 +00:00
|
|
|
const Twine &Name,
|
2020-03-31 13:08:59 -07:00
|
|
|
Instruction *InsertBefore)
|
|
|
|
: Instruction(
|
|
|
|
VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
|
2020-04-23 10:58:37 -07:00
|
|
|
Mask.size(), isa<ScalableVectorType>(V1->getType())),
|
2020-03-31 13:08:59 -07:00
|
|
|
ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
|
|
|
|
OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
|
|
|
|
assert(isValidOperands(V1, V2, Mask) &&
|
|
|
|
"Invalid shuffle vector instruction operands!");
|
|
|
|
Op<0>() = V1;
|
|
|
|
Op<1>() = V2;
|
|
|
|
setShuffleMask(Mask);
|
|
|
|
setName(Name);
|
|
|
|
}
|
|
|
|
|
|
|
|
ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
|
|
|
|
const Twine &Name, BasicBlock *InsertAtEnd)
|
|
|
|
: Instruction(
|
|
|
|
VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
|
2020-04-23 10:58:37 -07:00
|
|
|
Mask.size(), isa<ScalableVectorType>(V1->getType())),
|
2020-03-31 13:08:59 -07:00
|
|
|
ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
|
|
|
|
OperandTraits<ShuffleVectorInst>::operands(this), InsertAtEnd) {
|
2006-04-08 01:18:18 +00:00
|
|
|
assert(isValidOperands(V1, V2, Mask) &&
|
|
|
|
"Invalid shuffle vector instruction operands!");
|
|
|
|
|
2008-05-26 21:33:52 +00:00
|
|
|
Op<0>() = V1;
|
|
|
|
Op<1>() = V2;
|
2020-03-31 13:08:59 -07:00
|
|
|
setShuffleMask(Mask);
|
2007-02-24 00:55:48 +00:00
|
|
|
setName(Name);
|
2006-04-08 01:18:18 +00:00
|
|
|
}
|
|
|
|
|
2019-03-31 15:01:30 +00:00
|
|
|
void ShuffleVectorInst::commute() {
|
2020-08-27 10:39:18 -07:00
|
|
|
int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
|
2020-03-31 13:08:59 -07:00
|
|
|
int NumMaskElts = ShuffleMask.size();
|
|
|
|
SmallVector<int, 16> NewMask(NumMaskElts);
|
2019-03-31 15:01:30 +00:00
|
|
|
for (int i = 0; i != NumMaskElts; ++i) {
|
|
|
|
int MaskElt = getMaskValue(i);
|
2020-03-31 13:08:59 -07:00
|
|
|
if (MaskElt == UndefMaskElem) {
|
|
|
|
NewMask[i] = UndefMaskElem;
|
2019-03-31 15:01:30 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask");
|
|
|
|
MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;
|
2020-03-31 13:08:59 -07:00
|
|
|
NewMask[i] = MaskElt;
|
2019-03-31 15:01:30 +00:00
|
|
|
}
|
2020-03-31 13:08:59 -07:00
|
|
|
setShuffleMask(NewMask);
|
2019-03-31 15:01:30 +00:00
|
|
|
Op<0>().swap(Op<1>());
|
|
|
|
}
|
|
|
|
|
2020-03-31 13:08:59 -07:00
|
|
|
bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
|
|
|
|
ArrayRef<int> Mask) {
|
|
|
|
// V1 and V2 must be vectors of the same type.
|
2020-05-04 08:29:08 -07:00
|
|
|
if (!isa<VectorType>(V1->getType()) || V1->getType() != V2->getType())
|
2020-03-31 13:08:59 -07:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Make sure the mask elements make sense.
|
2020-08-14 12:15:59 +01:00
|
|
|
int V1Size =
|
|
|
|
cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();
|
2020-03-31 13:08:59 -07:00
|
|
|
for (int Elem : Mask)
|
|
|
|
if (Elem != UndefMaskElem && Elem >= V1Size * 2)
|
|
|
|
return false;
|
|
|
|
|
2020-04-23 10:58:37 -07:00
|
|
|
if (isa<ScalableVectorType>(V1->getType()))
|
2020-03-31 13:08:59 -07:00
|
|
|
if ((Mask[0] != 0 && Mask[0] != UndefMaskElem) || !is_splat(Mask))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2008-11-10 04:46:22 +00:00
|
|
|
bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
|
2006-04-08 01:18:18 +00:00
|
|
|
const Value *Mask) {
|
2012-01-25 23:49:49 +00:00
|
|
|
// V1 and V2 must be vectors of the same type.
|
2010-02-16 11:11:14 +00:00
|
|
|
if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
|
2008-03-02 05:28:33 +00:00
|
|
|
return false;
|
2018-07-30 19:41:25 +00:00
|
|
|
|
2020-04-23 10:58:37 -07:00
|
|
|
// Mask must be vector of i32, and must be the same kind of vector as the
|
|
|
|
// input vectors
|
2017-04-19 16:22:19 +00:00
|
|
|
auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
|
2020-03-31 13:08:59 -07:00
|
|
|
if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) ||
|
2020-04-23 10:58:37 -07:00
|
|
|
isa<ScalableVectorType>(MaskTy) != isa<ScalableVectorType>(V1->getType()))
|
2006-04-08 01:18:18 +00:00
|
|
|
return false;
|
2010-08-13 00:16:46 +00:00
|
|
|
|
|
|
|
// Check to see if Mask is valid.
|
2012-01-25 23:49:49 +00:00
|
|
|
if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask))
|
|
|
|
return true;
|
|
|
|
|
2017-04-19 16:22:19 +00:00
|
|
|
if (const auto *MV = dyn_cast<ConstantVector>(Mask)) {
|
2020-08-27 10:39:18 -07:00
|
|
|
unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
|
2014-03-10 15:03:06 +00:00
|
|
|
for (Value *Op : MV->operands()) {
|
2017-04-19 16:22:19 +00:00
|
|
|
if (auto *CI = dyn_cast<ConstantInt>(Op)) {
|
2012-01-25 23:49:49 +00:00
|
|
|
if (CI->uge(V1Size*2))
|
2010-08-13 00:16:46 +00:00
|
|
|
return false;
|
2014-03-10 15:03:06 +00:00
|
|
|
} else if (!isa<UndefValue>(Op)) {
|
2010-08-13 00:16:46 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2012-01-25 23:49:49 +00:00
|
|
|
return true;
|
2011-10-26 00:34:48 +00:00
|
|
|
}
|
2018-07-30 19:41:25 +00:00
|
|
|
|
2017-04-19 16:22:19 +00:00
|
|
|
if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
|
2020-08-27 10:39:18 -07:00
|
|
|
unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
|
|
|
|
for (unsigned i = 0, e = cast<FixedVectorType>(MaskTy)->getNumElements();
|
|
|
|
i != e; ++i)
|
2012-01-25 23:49:49 +00:00
|
|
|
if (CDS->getElementAsInteger(i) >= V1Size*2)
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
2018-07-30 19:41:25 +00:00
|
|
|
|
2012-01-25 23:49:49 +00:00
|
|
|
return false;
|
2006-04-08 01:18:18 +00:00
|
|
|
}
|
|
|
|
|
2018-06-19 18:44:00 +00:00
|
|
|
void ShuffleVectorInst::getShuffleMask(const Constant *Mask,
|
2012-01-26 02:51:13 +00:00
|
|
|
SmallVectorImpl<int> &Result) {
|
2020-08-27 10:39:18 -07:00
|
|
|
ElementCount EC = cast<VectorType>(Mask->getType())->getElementCount();
|
|
|
|
|
2020-03-30 10:36:21 -07:00
|
|
|
if (isa<ConstantAggregateZero>(Mask)) {
|
2020-08-14 12:15:59 +01:00
|
|
|
Result.resize(EC.getKnownMinValue(), 0);
|
2020-03-30 10:36:21 -07:00
|
|
|
return;
|
|
|
|
}
|
2020-08-27 10:39:18 -07:00
|
|
|
|
2020-08-14 12:15:59 +01:00
|
|
|
Result.reserve(EC.getKnownMinValue());
|
2020-08-27 10:39:18 -07:00
|
|
|
|
2020-08-14 12:15:59 +01:00
|
|
|
if (EC.isScalable()) {
|
2020-08-27 10:39:18 -07:00
|
|
|
assert((isa<ConstantAggregateZero>(Mask) || isa<UndefValue>(Mask)) &&
|
|
|
|
"Scalable vector shuffle mask must be undef or zeroinitializer");
|
|
|
|
int MaskVal = isa<UndefValue>(Mask) ? -1 : 0;
|
2020-08-14 12:15:59 +01:00
|
|
|
for (unsigned I = 0; I < EC.getKnownMinValue(); ++I)
|
2020-08-27 10:39:18 -07:00
|
|
|
Result.emplace_back(MaskVal);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-08-14 12:15:59 +01:00
|
|
|
unsigned NumElts = EC.getKnownMinValue();
|
2020-08-27 10:39:18 -07:00
|
|
|
|
2017-04-19 16:22:19 +00:00
|
|
|
if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
|
2012-01-25 23:49:49 +00:00
|
|
|
for (unsigned i = 0; i != NumElts; ++i)
|
|
|
|
Result.push_back(CDS->getElementAsInteger(i));
|
|
|
|
return;
|
2018-07-30 19:41:25 +00:00
|
|
|
}
|
2012-01-25 23:49:49 +00:00
|
|
|
for (unsigned i = 0; i != NumElts; ++i) {
|
|
|
|
Constant *C = Mask->getAggregateElement(i);
|
|
|
|
Result.push_back(isa<UndefValue>(C) ? -1 :
|
2012-01-26 00:41:50 +00:00
|
|
|
cast<ConstantInt>(C)->getZExtValue());
|
2012-01-25 23:49:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-31 13:08:59 -07:00
|
|
|
void ShuffleVectorInst::setShuffleMask(ArrayRef<int> Mask) {
|
|
|
|
ShuffleMask.assign(Mask.begin(), Mask.end());
|
|
|
|
ShuffleMaskForBitcode = convertShuffleMaskForBitcode(Mask, getType());
|
|
|
|
}
|
|
|
|
Constant *ShuffleVectorInst::convertShuffleMaskForBitcode(ArrayRef<int> Mask,
|
|
|
|
Type *ResultTy) {
|
|
|
|
Type *Int32Ty = Type::getInt32Ty(ResultTy->getContext());
|
2020-04-23 10:58:37 -07:00
|
|
|
if (isa<ScalableVectorType>(ResultTy)) {
|
2020-03-31 13:08:59 -07:00
|
|
|
assert(is_splat(Mask) && "Unexpected shuffle");
|
|
|
|
Type *VecTy = VectorType::get(Int32Ty, Mask.size(), true);
|
|
|
|
if (Mask[0] == 0)
|
|
|
|
return Constant::getNullValue(VecTy);
|
|
|
|
return UndefValue::get(VecTy);
|
|
|
|
}
|
|
|
|
SmallVector<Constant *, 16> MaskConst;
|
|
|
|
for (int Elem : Mask) {
|
|
|
|
if (Elem == UndefMaskElem)
|
|
|
|
MaskConst.push_back(UndefValue::get(Int32Ty));
|
|
|
|
else
|
|
|
|
MaskConst.push_back(ConstantInt::get(Int32Ty, Elem));
|
|
|
|
}
|
|
|
|
return ConstantVector::get(MaskConst);
|
|
|
|
}
|
|
|
|
|
2018-08-30 15:05:38 +00:00
|
|
|
static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
|
2018-06-19 18:44:00 +00:00
|
|
|
assert(!Mask.empty() && "Shuffle mask must contain elements");
|
|
|
|
bool UsesLHS = false;
|
|
|
|
bool UsesRHS = false;
|
2018-08-30 15:05:38 +00:00
|
|
|
for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
|
2018-06-19 18:44:00 +00:00
|
|
|
if (Mask[i] == -1)
|
|
|
|
continue;
|
2018-08-30 15:05:38 +00:00
|
|
|
assert(Mask[i] >= 0 && Mask[i] < (NumOpElts * 2) &&
|
2018-06-19 18:44:00 +00:00
|
|
|
"Out-of-bounds shuffle mask element");
|
2018-08-30 15:05:38 +00:00
|
|
|
UsesLHS |= (Mask[i] < NumOpElts);
|
|
|
|
UsesRHS |= (Mask[i] >= NumOpElts);
|
2018-06-19 18:44:00 +00:00
|
|
|
if (UsesLHS && UsesRHS)
|
|
|
|
return false;
|
|
|
|
}
|
2020-06-11 14:52:17 +01:00
|
|
|
// Allow for degenerate case: completely undef mask means neither source is used.
|
|
|
|
return UsesLHS || UsesRHS;
|
2018-06-19 18:44:00 +00:00
|
|
|
}
|
|
|
|
|
2018-08-30 15:05:38 +00:00
|
|
|
bool ShuffleVectorInst::isSingleSourceMask(ArrayRef<int> Mask) {
|
|
|
|
// We don't have vector operand size information, so assume operands are the
|
|
|
|
// same size as the mask.
|
|
|
|
return isSingleSourceMaskImpl(Mask, Mask.size());
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
|
|
|
|
if (!isSingleSourceMaskImpl(Mask, NumOpElts))
|
2018-06-19 18:44:00 +00:00
|
|
|
return false;
|
2018-08-30 15:05:38 +00:00
|
|
|
for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
|
2018-06-19 18:44:00 +00:00
|
|
|
if (Mask[i] == -1)
|
|
|
|
continue;
|
2018-08-30 15:05:38 +00:00
|
|
|
if (Mask[i] != i && Mask[i] != (NumOpElts + i))
|
2018-06-19 18:44:00 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-08-30 15:05:38 +00:00
|
|
|
bool ShuffleVectorInst::isIdentityMask(ArrayRef<int> Mask) {
|
|
|
|
// We don't have vector operand size information, so assume operands are the
|
|
|
|
// same size as the mask.
|
|
|
|
return isIdentityMaskImpl(Mask, Mask.size());
|
|
|
|
}
|
|
|
|
|
2018-06-19 18:44:00 +00:00
|
|
|
bool ShuffleVectorInst::isReverseMask(ArrayRef<int> Mask) {
|
|
|
|
if (!isSingleSourceMask(Mask))
|
|
|
|
return false;
|
|
|
|
for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) {
|
|
|
|
if (Mask[i] == -1)
|
|
|
|
continue;
|
|
|
|
if (Mask[i] != (NumElts - 1 - i) && Mask[i] != (NumElts + NumElts - 1 - i))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ShuffleVectorInst::isZeroEltSplatMask(ArrayRef<int> Mask) {
|
|
|
|
if (!isSingleSourceMask(Mask))
|
|
|
|
return false;
|
|
|
|
for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) {
|
|
|
|
if (Mask[i] == -1)
|
|
|
|
continue;
|
|
|
|
if (Mask[i] != 0 && Mask[i] != NumElts)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ShuffleVectorInst::isSelectMask(ArrayRef<int> Mask) {
|
|
|
|
// Select is differentiated from identity. It requires using both sources.
|
|
|
|
if (isSingleSourceMask(Mask))
|
|
|
|
return false;
|
|
|
|
for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) {
|
|
|
|
if (Mask[i] == -1)
|
|
|
|
continue;
|
|
|
|
if (Mask[i] != i && Mask[i] != (NumElts + i))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ShuffleVectorInst::isTransposeMask(ArrayRef<int> Mask) {
|
|
|
|
// Example masks that will return true:
|
|
|
|
// v1 = <a, b, c, d>
|
|
|
|
// v2 = <e, f, g, h>
|
|
|
|
// trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g>
|
|
|
|
// trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h>
|
|
|
|
|
|
|
|
// 1. The number of elements in the mask must be a power-of-2 and at least 2.
|
|
|
|
int NumElts = Mask.size();
|
|
|
|
if (NumElts < 2 || !isPowerOf2_32(NumElts))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// 2. The first element of the mask must be either a 0 or a 1.
|
|
|
|
if (Mask[0] != 0 && Mask[0] != 1)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// 3. The difference between the first 2 elements must be equal to the
|
|
|
|
// number of elements in the mask.
|
|
|
|
if ((Mask[1] - Mask[0]) != NumElts)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// 4. The difference between consecutive even-numbered and odd-numbered
|
|
|
|
// elements must be equal to 2.
|
|
|
|
for (int i = 2; i < NumElts; ++i) {
|
|
|
|
int MaskEltVal = Mask[i];
|
|
|
|
if (MaskEltVal == -1)
|
|
|
|
return false;
|
|
|
|
int MaskEltPrevVal = Mask[i - 2];
|
|
|
|
if (MaskEltVal - MaskEltPrevVal != 2)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-11-09 16:28:19 +00:00
|
|
|
bool ShuffleVectorInst::isExtractSubvectorMask(ArrayRef<int> Mask,
|
|
|
|
int NumSrcElts, int &Index) {
|
|
|
|
// Must extract from a single source.
|
|
|
|
if (!isSingleSourceMaskImpl(Mask, NumSrcElts))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Must be smaller (else this is an Identity shuffle).
|
2018-11-09 16:45:37 +00:00
|
|
|
if (NumSrcElts <= (int)Mask.size())
|
2018-11-09 16:28:19 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Find start of extraction, accounting that we may start with an UNDEF.
|
|
|
|
int SubIndex = -1;
|
|
|
|
for (int i = 0, e = Mask.size(); i != e; ++i) {
|
|
|
|
int M = Mask[i];
|
|
|
|
if (M < 0)
|
|
|
|
continue;
|
|
|
|
int Offset = (M % NumSrcElts) - i;
|
|
|
|
if (0 <= SubIndex && SubIndex != Offset)
|
|
|
|
return false;
|
|
|
|
SubIndex = Offset;
|
|
|
|
}
|
|
|
|
|
2019-11-08 13:34:17 +00:00
|
|
|
if (0 <= SubIndex && SubIndex + (int)Mask.size() <= NumSrcElts) {
|
2018-11-09 16:28:19 +00:00
|
|
|
Index = SubIndex;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-08-30 15:05:38 +00:00
|
|
|
bool ShuffleVectorInst::isIdentityWithPadding() const {
|
2020-06-11 14:52:17 +01:00
|
|
|
if (isa<UndefValue>(Op<2>()))
|
|
|
|
return false;
|
2020-08-27 10:39:18 -07:00
|
|
|
int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
|
|
|
|
int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
|
2018-08-30 15:05:38 +00:00
|
|
|
if (NumMaskElts <= NumOpElts)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// The first part of the mask must choose elements from exactly 1 source op.
|
2020-03-31 13:08:59 -07:00
|
|
|
ArrayRef<int> Mask = getShuffleMask();
|
2018-08-30 15:05:38 +00:00
|
|
|
if (!isIdentityMaskImpl(Mask, NumOpElts))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// All extending must be with undef elements.
|
|
|
|
for (int i = NumOpElts; i < NumMaskElts; ++i)
|
|
|
|
if (Mask[i] != -1)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ShuffleVectorInst::isIdentityWithExtract() const {
|
2020-06-11 14:52:17 +01:00
|
|
|
if (isa<UndefValue>(Op<2>()))
|
|
|
|
return false;
|
[SVE] Fix invalid usages of getNumElements in ShuffleVectorInstruction
Summary:
Fix invalid usages of getNumElements identified by test case
LLVM.Transforms/InstCombine::vscale_extractelement.ll.
changesLength: Since the length of the llvm::SmallVector shufflemask
is related to the minimum number of elements in a scalable vector, it is
fine to just get the Min field of the ElementCount
isIdentityWithExtract: Since it is not possible to express the mask
needed for this pattern for scalable vectors, we can just bail before
calling getNumElements()
Reviewers: efriedma, sdesmalen, fpetrogalli, gchatelet, yrouban, craig.topper
Reviewed By: sdesmalen
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D81969
2020-06-17 12:30:44 -07:00
|
|
|
|
|
|
|
// FIXME: Not currently possible to express a shuffle mask for a scalable
|
|
|
|
// vector for this case
|
|
|
|
if (isa<ScalableVectorType>(getType()))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
|
|
|
|
int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
|
2018-08-30 15:05:38 +00:00
|
|
|
if (NumMaskElts >= NumOpElts)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return isIdentityMaskImpl(getShuffleMask(), NumOpElts);
|
|
|
|
}
|
2018-06-19 18:44:00 +00:00
|
|
|
|
2018-09-20 15:21:52 +00:00
|
|
|
bool ShuffleVectorInst::isConcat() const {
|
|
|
|
// Vector concatenation is differentiated from identity with padding.
|
2020-06-11 14:52:17 +01:00
|
|
|
if (isa<UndefValue>(Op<0>()) || isa<UndefValue>(Op<1>()) ||
|
|
|
|
isa<UndefValue>(Op<2>()))
|
2018-09-20 15:21:52 +00:00
|
|
|
return false;
|
|
|
|
|
2020-08-27 10:39:18 -07:00
|
|
|
int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
|
|
|
|
int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
|
2018-09-20 15:21:52 +00:00
|
|
|
if (NumMaskElts != NumOpElts * 2)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Use the mask length rather than the operands' vector lengths here. We
|
|
|
|
// already know that the shuffle returns a vector twice as long as the inputs,
|
|
|
|
// and neither of the inputs are undef vectors. If the mask picks consecutive
|
|
|
|
// elements from both inputs, then this is a concatenation of the inputs.
|
|
|
|
return isIdentityMaskImpl(getShuffleMask(), NumMaskElts);
|
|
|
|
}
|
|
|
|
|
2008-05-23 00:36:11 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// InsertValueInst Class
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2018-07-30 19:41:25 +00:00
|
|
|
void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
|
2011-07-13 10:26:04 +00:00
|
|
|
const Twine &Name) {
|
2015-06-12 17:48:10 +00:00
|
|
|
assert(getNumOperands() == 2 && "NumOperands not initialized?");
|
2008-05-23 00:36:11 +00:00
|
|
|
|
2011-07-13 10:26:04 +00:00
|
|
|
// There's no fundamental reason why we require at least one index
|
|
|
|
// (other than weirdness with &*IdxBegin being invalid; see
|
|
|
|
// getelementptr's init routine for example). But there's no
|
|
|
|
// present need to support it.
|
2017-05-15 21:57:41 +00:00
|
|
|
assert(!Idxs.empty() && "InsertValueInst must have at least one index");
|
2008-05-23 00:36:11 +00:00
|
|
|
|
2011-07-13 10:26:04 +00:00
|
|
|
assert(ExtractValueInst::getIndexedType(Agg->getType(), Idxs) ==
|
|
|
|
Val->getType() && "Inserted value must match indexed type!");
|
2008-05-31 00:58:22 +00:00
|
|
|
Op<0>() = Agg;
|
|
|
|
Op<1>() = Val;
|
|
|
|
|
2011-07-13 10:26:04 +00:00
|
|
|
Indices.append(Idxs.begin(), Idxs.end());
|
2008-06-04 14:40:55 +00:00
|
|
|
setName(Name);
|
2008-05-23 00:36:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
InsertValueInst::InsertValueInst(const InsertValueInst &IVI)
|
2008-05-27 11:03:29 +00:00
|
|
|
: Instruction(IVI.getType(), InsertValue,
|
2008-05-31 00:58:22 +00:00
|
|
|
OperandTraits<InsertValueInst>::op_begin(this), 2),
|
|
|
|
Indices(IVI.Indices) {
|
2008-06-17 23:25:49 +00:00
|
|
|
Op<0>() = IVI.getOperand(0);
|
|
|
|
Op<1>() = IVI.getOperand(1);
|
2009-08-25 22:11:20 +00:00
|
|
|
SubclassOptionalData = IVI.SubclassOptionalData;
|
2008-05-23 00:36:11 +00:00
|
|
|
}
|
|
|
|
|
2008-05-15 19:50:34 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// ExtractValueInst Class
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2011-07-13 10:26:04 +00:00
|
|
|
void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
|
2015-06-12 17:48:10 +00:00
|
|
|
assert(getNumOperands() == 1 && "NumOperands not initialized?");
|
2008-05-23 00:36:11 +00:00
|
|
|
|
2011-07-13 10:26:04 +00:00
|
|
|
// There's no fundamental reason why we require at least one index.
|
|
|
|
// But there's no present need to support it.
|
2017-05-15 21:57:41 +00:00
|
|
|
assert(!Idxs.empty() && "ExtractValueInst must have at least one index");
|
2008-05-31 00:58:22 +00:00
|
|
|
|
2011-07-13 10:26:04 +00:00
|
|
|
Indices.append(Idxs.begin(), Idxs.end());
|
2008-06-04 14:40:55 +00:00
|
|
|
setName(Name);
|
2008-05-23 00:36:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
|
2008-06-06 20:28:12 +00:00
|
|
|
: UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0)),
|
2008-05-31 00:58:22 +00:00
|
|
|
Indices(EVI.Indices) {
|
2009-08-25 22:11:20 +00:00
|
|
|
SubclassOptionalData = EVI.SubclassOptionalData;
|
2008-05-23 00:36:11 +00:00
|
|
|
}
|
|
|
|
|
2008-05-15 19:50:34 +00:00
|
|
|
// getIndexedType - Returns the type of the element that would be extracted
|
|
|
|
// with an extractvalue instruction with the specified parameters.
|
|
|
|
//
|
|
|
|
// A null type is returned if the indices are invalid for the specified
|
|
|
|
// pointer type.
|
|
|
|
//
|
2011-07-18 04:54:35 +00:00
|
|
|
Type *ExtractValueInst::getIndexedType(Type *Agg,
|
2011-07-13 10:26:04 +00:00
|
|
|
ArrayRef<unsigned> Idxs) {
|
2014-03-10 15:03:06 +00:00
|
|
|
for (unsigned Index : Idxs) {
|
2010-12-05 20:50:26 +00:00
|
|
|
// We can't use CompositeType::indexValid(Index) here.
|
|
|
|
// indexValid() always returns true for arrays because getelementptr allows
|
|
|
|
// out-of-bounds indices. Since we don't allow those for extractvalue and
|
|
|
|
// insertvalue we need to check array indexing manually.
|
|
|
|
// Since the only other types we can index into are struct types it's just
|
|
|
|
// as easy to check those manually as well.
|
2011-07-18 04:54:35 +00:00
|
|
|
if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
|
2010-12-05 20:50:26 +00:00
|
|
|
if (Index >= AT->getNumElements())
|
2014-04-09 06:08:46 +00:00
|
|
|
return nullptr;
|
2020-03-03 15:42:16 -08:00
|
|
|
Agg = AT->getElementType();
|
2011-07-18 04:54:35 +00:00
|
|
|
} else if (StructType *ST = dyn_cast<StructType>(Agg)) {
|
2010-12-05 20:50:26 +00:00
|
|
|
if (Index >= ST->getNumElements())
|
2014-04-09 06:08:46 +00:00
|
|
|
return nullptr;
|
2020-03-03 15:42:16 -08:00
|
|
|
Agg = ST->getElementType(Index);
|
2010-12-05 20:50:26 +00:00
|
|
|
} else {
|
|
|
|
// Not a valid type to index into.
|
2014-04-09 06:08:46 +00:00
|
|
|
return nullptr;
|
2010-12-05 20:50:26 +00:00
|
|
|
}
|
2008-05-15 19:50:34 +00:00
|
|
|
}
|
Land the long talked about "type system rewrite" patch. This
patch brings numerous advantages to LLVM. One way to look at it
is through diffstat:
109 files changed, 3005 insertions(+), 5906 deletions(-)
Removing almost 3K lines of code is a good thing. Other advantages
include:
1. Value::getType() is a simple load that can be CSE'd, not a mutating
union-find operation.
2. Types a uniqued and never move once created, defining away PATypeHolder.
3. Structs can be "named" now, and their name is part of the identity that
uniques them. This means that the compiler doesn't merge them structurally
which makes the IR much less confusing.
4. Now that there is no way to get a cycle in a type graph without a named
struct type, "upreferences" go away.
5. Type refinement is completely gone, which should make LTO much MUCH faster
in some common cases with C++ code.
6. Types are now generally immutable, so we can use "Type *" instead
"const Type *" everywhere.
Downsides of this patch are that it removes some functions from the C API,
so people using those will have to upgrade to (not yet added) new API.
"LLVM 3.0" is the right time to do this.
There are still some cleanups pending after this, this patch is large enough
as-is.
llvm-svn: 134829
2011-07-09 17:41:24 +00:00
|
|
|
return const_cast<Type*>(Agg);
|
2008-05-15 19:50:34 +00:00
|
|
|
}
|
2006-04-08 01:18:18 +00:00
|
|
|
|
2018-11-13 18:15:47 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// UnaryOperator Class
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
UnaryOperator::UnaryOperator(UnaryOps iType, Value *S,
|
|
|
|
Type *Ty, const Twine &Name,
|
|
|
|
Instruction *InsertBefore)
|
|
|
|
: UnaryInstruction(Ty, iType, S, InsertBefore) {
|
|
|
|
Op<0>() = S;
|
|
|
|
setName(Name);
|
|
|
|
AssertOK();
|
|
|
|
}
|
|
|
|
|
|
|
|
UnaryOperator::UnaryOperator(UnaryOps iType, Value *S,
|
|
|
|
Type *Ty, const Twine &Name,
|
|
|
|
BasicBlock *InsertAtEnd)
|
|
|
|
: UnaryInstruction(Ty, iType, S, InsertAtEnd) {
|
|
|
|
Op<0>() = S;
|
|
|
|
setName(Name);
|
|
|
|
AssertOK();
|
|
|
|
}
|
|
|
|
|
|
|
|
UnaryOperator *UnaryOperator::Create(UnaryOps Op, Value *S,
|
|
|
|
const Twine &Name,
|
|
|
|
Instruction *InsertBefore) {
|
|
|
|
return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
|
|
|
|
}
|
|
|
|
|
|
|
|
UnaryOperator *UnaryOperator::Create(UnaryOps Op, Value *S,
|
|
|
|
const Twine &Name,
|
|
|
|
BasicBlock *InsertAtEnd) {
|
|
|
|
UnaryOperator *Res = Create(Op, S, Name);
|
|
|
|
InsertAtEnd->getInstList().push_back(Res);
|
|
|
|
return Res;
|
|
|
|
}
|
|
|
|
|
|
|
|
void UnaryOperator::AssertOK() {
|
|
|
|
Value *LHS = getOperand(0);
|
|
|
|
(void)LHS; // Silence warnings.
|
|
|
|
#ifndef NDEBUG
|
|
|
|
switch (getOpcode()) {
|
|
|
|
case FNeg:
|
|
|
|
assert(getType() == LHS->getType() &&
|
|
|
|
"Unary operation should return same type as operand!");
|
|
|
|
assert(getType()->isFPOrFPVectorTy() &&
|
|
|
|
"Tried to create a floating-point operation on a "
|
|
|
|
"non-floating-point type!");
|
|
|
|
break;
|
|
|
|
default: llvm_unreachable("Invalid opcode provided");
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2004-07-29 12:33:25 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// BinaryOperator Class
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2007-02-24 00:55:48 +00:00
|
|
|
BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2,
|
2011-07-18 04:54:35 +00:00
|
|
|
Type *Ty, const Twine &Name,
|
2007-02-24 00:55:48 +00:00
|
|
|
Instruction *InsertBefore)
|
2010-05-03 22:44:19 +00:00
|
|
|
: Instruction(Ty, iType,
|
2008-05-10 08:32:32 +00:00
|
|
|
OperandTraits<BinaryOperator>::op_begin(this),
|
|
|
|
OperandTraits<BinaryOperator>::operands(this),
|
|
|
|
InsertBefore) {
|
2008-05-26 21:33:52 +00:00
|
|
|
Op<0>() = S1;
|
|
|
|
Op<1>() = S2;
|
2007-02-24 00:55:48 +00:00
|
|
|
setName(Name);
|
2017-06-26 07:15:59 +00:00
|
|
|
AssertOK();
|
2007-02-24 00:55:48 +00:00
|
|
|
}
|
|
|
|
|
2018-07-30 19:41:25 +00:00
|
|
|
BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2,
|
2011-07-18 04:54:35 +00:00
|
|
|
Type *Ty, const Twine &Name,
|
2007-02-24 00:55:48 +00:00
|
|
|
BasicBlock *InsertAtEnd)
|
2010-05-03 22:44:19 +00:00
|
|
|
: Instruction(Ty, iType,
|
2008-05-10 08:32:32 +00:00
|
|
|
OperandTraits<BinaryOperator>::op_begin(this),
|
|
|
|
OperandTraits<BinaryOperator>::operands(this),
|
|
|
|
InsertAtEnd) {
|
2008-05-26 21:33:52 +00:00
|
|
|
Op<0>() = S1;
|
|
|
|
Op<1>() = S2;
|
2007-02-24 00:55:48 +00:00
|
|
|
setName(Name);
|
2017-06-26 07:15:59 +00:00
|
|
|
AssertOK();
|
2007-02-24 00:55:48 +00:00
|
|
|
}
|
|
|
|
|
2017-06-26 07:15:59 +00:00
|
|
|
void BinaryOperator::AssertOK() {
|
2005-01-29 00:35:16 +00:00
|
|
|
Value *LHS = getOperand(0), *RHS = getOperand(1);
|
2010-12-23 00:58:24 +00:00
|
|
|
(void)LHS; (void)RHS; // Silence warnings.
|
2005-01-29 00:35:16 +00:00
|
|
|
assert(LHS->getType() == RHS->getType() &&
|
|
|
|
"Binary operator operand types must match!");
|
2004-07-29 12:33:25 +00:00
|
|
|
#ifndef NDEBUG
|
2017-06-26 07:15:59 +00:00
|
|
|
switch (getOpcode()) {
|
2004-07-29 12:33:25 +00:00
|
|
|
case Add: case Sub:
|
2009-06-04 22:49:04 +00:00
|
|
|
case Mul:
|
|
|
|
assert(getType() == LHS->getType() &&
|
|
|
|
"Arithmetic operation should return same type as operands!");
|
2010-02-15 16:12:20 +00:00
|
|
|
assert(getType()->isIntOrIntVectorTy() &&
|
2009-06-04 22:49:04 +00:00
|
|
|
"Tried to create an integer operation on a non-integer type!");
|
|
|
|
break;
|
|
|
|
case FAdd: case FSub:
|
|
|
|
case FMul:
|
2005-01-29 00:35:16 +00:00
|
|
|
assert(getType() == LHS->getType() &&
|
2004-07-29 12:33:25 +00:00
|
|
|
"Arithmetic operation should return same type as operands!");
|
2010-02-15 16:12:20 +00:00
|
|
|
assert(getType()->isFPOrFPVectorTy() &&
|
2009-06-04 22:49:04 +00:00
|
|
|
"Tried to create a floating-point operation on a "
|
|
|
|
"non-floating-point type!");
|
2004-07-29 12:33:25 +00:00
|
|
|
break;
|
2018-07-30 19:41:25 +00:00
|
|
|
case UDiv:
|
|
|
|
case SDiv:
|
2006-10-26 06:15:43 +00:00
|
|
|
assert(getType() == LHS->getType() &&
|
|
|
|
"Arithmetic operation should return same type as operands!");
|
2017-06-25 17:33:48 +00:00
|
|
|
assert(getType()->isIntOrIntVectorTy() &&
|
2006-10-26 06:15:43 +00:00
|
|
|
"Incorrect operand type (not integer) for S/UDIV");
|
|
|
|
break;
|
|
|
|
case FDiv:
|
|
|
|
assert(getType() == LHS->getType() &&
|
|
|
|
"Arithmetic operation should return same type as operands!");
|
2010-02-15 16:12:20 +00:00
|
|
|
assert(getType()->isFPOrFPVectorTy() &&
|
2009-06-15 22:25:12 +00:00
|
|
|
"Incorrect operand type (not floating point) for FDIV");
|
2006-10-26 06:15:43 +00:00
|
|
|
break;
|
2018-07-30 19:41:25 +00:00
|
|
|
case URem:
|
|
|
|
case SRem:
|
2006-11-02 01:53:59 +00:00
|
|
|
assert(getType() == LHS->getType() &&
|
|
|
|
"Arithmetic operation should return same type as operands!");
|
2017-06-25 17:33:48 +00:00
|
|
|
assert(getType()->isIntOrIntVectorTy() &&
|
2006-11-02 01:53:59 +00:00
|
|
|
"Incorrect operand type (not integer) for S/UREM");
|
|
|
|
break;
|
|
|
|
case FRem:
|
|
|
|
assert(getType() == LHS->getType() &&
|
|
|
|
"Arithmetic operation should return same type as operands!");
|
2010-02-15 16:12:20 +00:00
|
|
|
assert(getType()->isFPOrFPVectorTy() &&
|
2009-06-15 22:25:12 +00:00
|
|
|
"Incorrect operand type (not floating point) for FREM");
|
2006-11-02 01:53:59 +00:00
|
|
|
break;
|
2007-02-02 02:16:23 +00:00
|
|
|
case Shl:
|
|
|
|
case LShr:
|
|
|
|
case AShr:
|
|
|
|
assert(getType() == LHS->getType() &&
|
|
|
|
"Shift operation should return same type as operands!");
|
2017-06-25 17:33:48 +00:00
|
|
|
assert(getType()->isIntOrIntVectorTy() &&
|
2008-07-29 15:49:41 +00:00
|
|
|
"Tried to create a shift operation on a non-integral type!");
|
2007-02-02 02:16:23 +00:00
|
|
|
break;
|
2004-07-29 12:33:25 +00:00
|
|
|
case And: case Or:
|
|
|
|
case Xor:
|
2005-01-29 00:35:16 +00:00
|
|
|
assert(getType() == LHS->getType() &&
|
2004-07-29 12:33:25 +00:00
|
|
|
"Logical operation should return same type as operands!");
|
2017-06-25 17:33:48 +00:00
|
|
|
assert(getType()->isIntOrIntVectorTy() &&
|
2005-01-27 06:46:38 +00:00
|
|
|
"Tried to create a logical operation on a non-integral type!");
|
2004-07-29 12:33:25 +00:00
|
|
|
break;
|
2017-06-26 07:15:59 +00:00
|
|
|
default: llvm_unreachable("Invalid opcode provided");
|
2004-07-29 12:33:25 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2008-05-16 19:29:10 +00:00
|
|
|
BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2,
|
2009-07-25 04:41:11 +00:00
|
|
|
const Twine &Name,
|
2004-07-29 12:33:25 +00:00
|
|
|
Instruction *InsertBefore) {
|
|
|
|
assert(S1->getType() == S2->getType() &&
|
|
|
|
"Cannot create binary operator with two operands of differing type!");
|
2006-12-23 06:05:41 +00:00
|
|
|
return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
|
2004-07-29 12:33:25 +00:00
|
|
|
}
|
|
|
|
|
2008-05-16 19:29:10 +00:00
|
|
|
BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2,
|
2009-07-25 04:41:11 +00:00
|
|
|
const Twine &Name,
|
2004-07-29 12:33:25 +00:00
|
|
|
BasicBlock *InsertAtEnd) {
|
2008-05-16 19:29:10 +00:00
|
|
|
BinaryOperator *Res = Create(Op, S1, S2, Name);
|
2004-07-29 12:33:25 +00:00
|
|
|
InsertAtEnd->getInstList().push_back(Res);
|
|
|
|
return Res;
|
|
|
|
}
|
|
|
|
|
2009-08-12 16:23:25 +00:00
|
|
|
BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name,
|
2004-07-29 12:33:25 +00:00
|
|
|
Instruction *InsertBefore) {
|
2009-07-27 20:59:43 +00:00
|
|
|
Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
|
2007-01-21 00:29:26 +00:00
|
|
|
return new BinaryOperator(Instruction::Sub,
|
|
|
|
zero, Op,
|
|
|
|
Op->getType(), Name, InsertBefore);
|
2004-07-29 12:33:25 +00:00
|
|
|
}
|
|
|
|
|
2009-08-12 16:23:25 +00:00
|
|
|
BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name,
|
2004-07-29 12:33:25 +00:00
|
|
|
BasicBlock *InsertAtEnd) {
|
2009-07-27 20:59:43 +00:00
|
|
|
Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
|
2007-01-21 00:29:26 +00:00
|
|
|
return new BinaryOperator(Instruction::Sub,
|
|
|
|
zero, Op,
|
|
|
|
Op->getType(), Name, InsertAtEnd);
|
2004-07-29 12:33:25 +00:00
|
|
|
}
|
|
|
|
|
2009-12-18 02:58:50 +00:00
|
|
|
BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name,
|
|
|
|
Instruction *InsertBefore) {
|
|
|
|
Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
|
|
|
|
return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertBefore);
|
|
|
|
}
|
|
|
|
|
|
|
|
BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name,
|
|
|
|
BasicBlock *InsertAtEnd) {
|
|
|
|
Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
|
|
|
|
return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertAtEnd);
|
|
|
|
}
|
|
|
|
|
2010-02-02 12:53:04 +00:00
|
|
|
BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name,
|
|
|
|
Instruction *InsertBefore) {
|
|
|
|
Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
|
|
|
|
return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertBefore);
|
|
|
|
}
|
|
|
|
|
|
|
|
BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name,
|
|
|
|
BasicBlock *InsertAtEnd) {
|
|
|
|
Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
|
|
|
|
return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertAtEnd);
|
|
|
|
}
|
|
|
|
|
2009-08-12 16:23:25 +00:00
|
|
|
BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
|
2004-07-29 12:33:25 +00:00
|
|
|
Instruction *InsertBefore) {
|
2012-01-25 06:02:56 +00:00
|
|
|
Constant *C = Constant::getAllOnesValue(Op->getType());
|
2006-03-25 21:54:21 +00:00
|
|
|
return new BinaryOperator(Instruction::Xor, Op, C,
|
2004-07-29 12:33:25 +00:00
|
|
|
Op->getType(), Name, InsertBefore);
|
|
|
|
}
|
|
|
|
|
2009-08-12 16:23:25 +00:00
|
|
|
BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
|
2004-07-29 12:33:25 +00:00
|
|
|
BasicBlock *InsertAtEnd) {
|
2012-01-25 06:02:56 +00:00
|
|
|
Constant *AllOnes = Constant::getAllOnesValue(Op->getType());
|
2005-12-21 18:22:19 +00:00
|
|
|
return new BinaryOperator(Instruction::Xor, Op, AllOnes,
|
2004-07-29 12:33:25 +00:00
|
|
|
Op->getType(), Name, InsertAtEnd);
|
|
|
|
}
|
|
|
|
|
2016-11-16 18:09:44 +00:00
|
|
|
// Exchange the two operands to this instruction. This instruction is safe to
|
|
|
|
// use on any binary instruction and does not modify the semantics of the
|
|
|
|
// instruction. If the instruction is order-dependent (SetLT f.e.), the opcode
|
|
|
|
// is changed.
|
2004-07-29 12:33:25 +00:00
|
|
|
bool BinaryOperator::swapOperands() {
|
2006-12-23 06:05:41 +00:00
|
|
|
if (!isCommutative())
|
|
|
|
return true; // Can't commute operands
|
2008-05-13 22:51:52 +00:00
|
|
|
Op<0>().swap(Op<1>());
|
2004-07-29 12:33:25 +00:00
|
|
|
return false;
|
2006-09-17 19:29:56 +00:00
|
|
|
}
|
|
|
|
|
2012-04-16 16:28:59 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// FPMathOperator Class
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
float FPMathOperator::getFPAccuracy() const {
|
2014-11-11 21:30:22 +00:00
|
|
|
const MDNode *MD =
|
|
|
|
cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath);
|
2012-04-16 16:28:59 +00:00
|
|
|
if (!MD)
|
|
|
|
return 0.0;
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-09 18:38:53 +00:00
|
|
|
ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD->getOperand(0));
|
2012-04-16 19:39:33 +00:00
|
|
|
return Accuracy->getValueAPF().convertToFloat();
|
2012-04-16 16:28:59 +00:00
|
|
|
}
|
|
|
|
|
2006-09-18 04:54:57 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// CastInst Class
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2006-11-27 01:05:10 +00:00
|
|
|
// Just determine if this cast only deals with integral->integral conversion.
|
|
|
|
bool CastInst::isIntegerCast() const {
|
|
|
|
switch (getOpcode()) {
|
|
|
|
default: return false;
|
|
|
|
case Instruction::ZExt:
|
|
|
|
case Instruction::SExt:
|
|
|
|
case Instruction::Trunc:
|
|
|
|
return true;
|
|
|
|
case Instruction::BitCast:
|
2010-02-15 16:12:20 +00:00
|
|
|
return getOperand(0)->getType()->isIntegerTy() &&
|
|
|
|
getType()->isIntegerTy();
|
2006-11-27 01:05:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool CastInst::isLosslessCast() const {
|
|
|
|
// Only BitCast can be lossless, exit fast if we're not BitCast
|
|
|
|
if (getOpcode() != Instruction::BitCast)
|
2006-09-18 04:54:57 +00:00
|
|
|
return false;
|
|
|
|
|
2006-11-27 01:05:10 +00:00
|
|
|
// Identity cast is always lossless
|
2016-02-03 21:34:39 +00:00
|
|
|
Type *SrcTy = getOperand(0)->getType();
|
|
|
|
Type *DstTy = getType();
|
2006-11-27 01:05:10 +00:00
|
|
|
if (SrcTy == DstTy)
|
|
|
|
return true;
|
2018-07-30 19:41:25 +00:00
|
|
|
|
2006-12-31 05:26:44 +00:00
|
|
|
// Pointer to pointer is always lossless.
|
2010-02-16 11:11:14 +00:00
|
|
|
if (SrcTy->isPointerTy())
|
|
|
|
return DstTy->isPointerTy();
|
2006-11-27 01:05:10 +00:00
|
|
|
return false; // Other types have no identity values
|
|
|
|
}
|
|
|
|
|
|
|
|
/// This function determines if the CastInst does not require any bits to be
|
|
|
|
/// changed in order to effect the cast. Essentially, it identifies cases where
|
2018-07-30 19:41:25 +00:00
|
|
|
/// no code gen is necessary for the cast, hence the name no-op cast. For
|
2006-11-27 01:05:10 +00:00
|
|
|
/// example, the following are all no-op casts:
|
2008-05-12 16:34:30 +00:00
|
|
|
/// # bitcast i32* %x to i8*
|
2018-07-30 19:41:25 +00:00
|
|
|
/// # bitcast <2 x i32> %x to <4 x i16>
|
2008-05-12 16:34:30 +00:00
|
|
|
/// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
|
2018-05-01 16:10:38 +00:00
|
|
|
/// Determine if the described cast is a no-op.
|
2010-05-28 21:41:37 +00:00
|
|
|
bool CastInst::isNoopCast(Instruction::CastOps Opcode,
|
2011-07-18 04:54:35 +00:00
|
|
|
Type *SrcTy,
|
|
|
|
Type *DestTy,
|
2017-10-05 07:07:09 +00:00
|
|
|
const DataLayout &DL) {
|
2020-10-07 14:02:10 -07:00
|
|
|
assert(castIsValid(Opcode, SrcTy, DestTy) && "method precondition");
|
2010-05-28 21:41:37 +00:00
|
|
|
switch (Opcode) {
|
2012-02-05 22:14:15 +00:00
|
|
|
default: llvm_unreachable("Invalid CastOp");
|
2006-11-27 01:05:10 +00:00
|
|
|
case Instruction::Trunc:
|
|
|
|
case Instruction::ZExt:
|
2018-07-30 19:41:25 +00:00
|
|
|
case Instruction::SExt:
|
2006-11-27 01:05:10 +00:00
|
|
|
case Instruction::FPTrunc:
|
|
|
|
case Instruction::FPExt:
|
|
|
|
case Instruction::UIToFP:
|
|
|
|
case Instruction::SIToFP:
|
|
|
|
case Instruction::FPToUI:
|
|
|
|
case Instruction::FPToSI:
|
2013-11-15 01:34:59 +00:00
|
|
|
case Instruction::AddrSpaceCast:
|
|
|
|
// TODO: Target informations may give a more accurate answer here.
|
|
|
|
return false;
|
2006-11-27 01:05:10 +00:00
|
|
|
case Instruction::BitCast:
|
|
|
|
return true; // BitCast never modifies bits.
|
|
|
|
case Instruction::PtrToInt:
|
2017-10-05 07:07:09 +00:00
|
|
|
return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
|
2010-05-28 21:41:37 +00:00
|
|
|
DestTy->getScalarSizeInBits();
|
2006-11-27 01:05:10 +00:00
|
|
|
case Instruction::IntToPtr:
|
2017-10-05 07:07:09 +00:00
|
|
|
return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==
|
2010-05-28 21:41:37 +00:00
|
|
|
SrcTy->getScalarSizeInBits();
|
2006-11-27 01:05:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-10 02:37:25 +00:00
|
|
|
bool CastInst::isNoopCast(const DataLayout &DL) const {
|
2017-10-03 06:03:49 +00:00
|
|
|
return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL);
|
2014-03-06 17:33:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// This function determines if a pair of casts can be eliminated and what
|
|
|
|
/// opcode should be used in the elimination. This assumes that there are two
|
2006-11-27 01:05:10 +00:00
|
|
|
/// instructions like this:
|
|
|
|
/// * %F = firstOpcode SrcTy %x to MidTy
|
|
|
|
/// * %S = secondOpcode MidTy %F to DstTy
|
|
|
|
/// The function returns a resultOpcode so these two casts can be replaced with:
|
|
|
|
/// * %Replacement = resultOpcode %SrcTy %x to DstTy
|
2015-12-11 18:12:01 +00:00
|
|
|
/// If no such cast is permitted, the function returns 0.
|
2006-11-27 01:05:10 +00:00
|
|
|
unsigned CastInst::isEliminableCastPair(
|
|
|
|
Instruction::CastOps firstOp, Instruction::CastOps secondOp,
|
2012-10-30 16:03:32 +00:00
|
|
|
Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy,
|
|
|
|
Type *DstIntPtrTy) {
|
2006-11-27 01:05:10 +00:00
|
|
|
// Define the 144 possibilities for these two cast instructions. The values
|
|
|
|
// in this matrix determine what to do in a given situation and select the
|
2018-07-30 19:41:25 +00:00
|
|
|
// case in the switch below. The rows correspond to firstOp, the columns
|
2015-12-11 18:12:01 +00:00
|
|
|
// correspond to secondOp. In looking at the table below, keep in mind
|
2006-11-27 01:05:10 +00:00
|
|
|
// the following cast properties:
|
|
|
|
//
|
|
|
|
// Size Compare Source Destination
|
|
|
|
// Operator Src ? Size Type Sign Type Sign
|
|
|
|
// -------- ------------ ------------------- ---------------------
|
|
|
|
// TRUNC > Integer Any Integral Any
|
|
|
|
// ZEXT < Integral Unsigned Integer Any
|
|
|
|
// SEXT < Integral Signed Integer Any
|
|
|
|
// FPTOUI n/a FloatPt n/a Integral Unsigned
|
2013-11-15 01:34:59 +00:00
|
|
|
// FPTOSI n/a FloatPt n/a Integral Signed
|
|
|
|
// UITOFP n/a Integral Unsigned FloatPt n/a
|
|
|
|
// SITOFP n/a Integral Signed FloatPt n/a
|
|
|
|
// FPTRUNC > FloatPt n/a FloatPt n/a
|
|
|
|
// FPEXT < FloatPt n/a FloatPt n/a
|
2006-11-27 01:05:10 +00:00
|
|
|
// PTRTOINT n/a Pointer n/a Integral Unsigned
|
|
|
|
// INTTOPTR n/a Integral Unsigned Pointer n/a
|
2013-11-15 01:34:59 +00:00
|
|
|
// BITCAST = FirstClass n/a FirstClass n/a
|
|
|
|
// ADDRSPCST n/a Pointer n/a Pointer n/a
|
2006-12-05 23:43:59 +00:00
|
|
|
//
|
|
|
|
// NOTE: some transforms are safe, but we consider them to be non-profitable.
|
2009-06-14 23:30:43 +00:00
|
|
|
// For example, we could merge "fptoui double to i32" + "zext i32 to i64",
|
|
|
|
// into "fptoui double to i64", but this loses information about the range
|
2013-11-15 01:34:59 +00:00
|
|
|
// of the produced value (we no longer know the top-part is all zeros).
|
2006-12-05 23:43:59 +00:00
|
|
|
// Further this conversion is often much more expensive for typical hardware,
|
2013-11-15 01:34:59 +00:00
|
|
|
// and causes issues when building libgcc. We disallow fptosi+sext for the
|
2006-12-05 23:43:59 +00:00
|
|
|
// same reason.
|
2013-11-15 01:34:59 +00:00
|
|
|
const unsigned numCastOps =
|
2006-11-27 01:05:10 +00:00
|
|
|
Instruction::CastOpsEnd - Instruction::CastOpsBegin;
|
|
|
|
static const uint8_t CastResults[numCastOps][numCastOps] = {
|
2013-11-15 01:34:59 +00:00
|
|
|
// T F F U S F F P I B A -+
|
|
|
|
// R Z S P P I I T P 2 N T S |
|
|
|
|
// U E E 2 2 2 2 R E I T C C +- secondOp
|
|
|
|
// N X X U S F F N X N 2 V V |
|
|
|
|
// C T T I I P P C T T P T T -+
|
|
|
|
{ 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc -+
|
2015-04-21 00:05:41 +00:00
|
|
|
{ 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt |
|
2013-11-15 01:34:59 +00:00
|
|
|
{ 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt |
|
|
|
|
{ 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI |
|
|
|
|
{ 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI |
|
|
|
|
{ 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP +- firstOp
|
|
|
|
{ 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP |
|
2015-05-29 00:04:30 +00:00
|
|
|
{ 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc |
|
2018-03-02 18:16:51 +00:00
|
|
|
{ 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0}, // FPExt |
|
2013-11-15 01:34:59 +00:00
|
|
|
{ 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt |
|
|
|
|
{ 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr |
|
|
|
|
{ 5, 5, 5, 6, 6, 5, 5, 6, 6,16, 5, 1,14}, // BitCast |
|
|
|
|
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
|
2006-11-27 01:05:10 +00:00
|
|
|
};
|
2013-11-15 01:34:59 +00:00
|
|
|
|
2015-12-12 00:33:36 +00:00
|
|
|
// TODO: This logic could be encoded into the table above and handled in the
|
|
|
|
// switch below.
|
2010-07-12 01:19:22 +00:00
|
|
|
// If either of the casts are a bitcast from scalar to vector, disallow the
|
2015-12-12 00:33:36 +00:00
|
|
|
// merging. However, any pair of bitcasts are allowed.
|
|
|
|
bool IsFirstBitcast = (firstOp == Instruction::BitCast);
|
|
|
|
bool IsSecondBitcast = (secondOp == Instruction::BitCast);
|
|
|
|
bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
|
|
|
|
|
|
|
|
// Check if any of the casts convert scalars <-> vectors.
|
|
|
|
if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
|
|
|
|
(IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
|
|
|
|
if (!AreBothBitcasts)
|
|
|
|
return 0;
|
2006-11-27 01:05:10 +00:00
|
|
|
|
|
|
|
int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
|
|
|
|
[secondOp-Instruction::CastOpsBegin];
|
|
|
|
switch (ElimCase) {
|
2018-07-30 19:41:25 +00:00
|
|
|
case 0:
|
2013-11-15 01:34:59 +00:00
|
|
|
// Categorically disallowed.
|
2006-11-27 01:05:10 +00:00
|
|
|
return 0;
|
2018-07-30 19:41:25 +00:00
|
|
|
case 1:
|
2013-11-15 01:34:59 +00:00
|
|
|
// Allowed, use first cast's opcode.
|
2006-11-27 01:05:10 +00:00
|
|
|
return firstOp;
|
2018-07-30 19:41:25 +00:00
|
|
|
case 2:
|
2013-11-15 01:34:59 +00:00
|
|
|
// Allowed, use second cast's opcode.
|
2006-11-27 01:05:10 +00:00
|
|
|
return secondOp;
|
2018-07-30 19:41:25 +00:00
|
|
|
case 3:
|
2013-11-15 01:34:59 +00:00
|
|
|
// No-op cast in second op implies firstOp as long as the DestTy
|
2010-01-23 04:35:57 +00:00
|
|
|
// is integer and we are not converting between a vector and a
|
2013-12-05 05:44:44 +00:00
|
|
|
// non-vector type.
|
2010-02-16 11:11:14 +00:00
|
|
|
if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
|
2006-11-27 01:05:10 +00:00
|
|
|
return firstOp;
|
|
|
|
return 0;
|
|
|
|
case 4:
|
2013-11-15 01:34:59 +00:00
|
|
|
// No-op cast in second op implies firstOp as long as the DestTy
|
2010-01-23 04:42:42 +00:00
|
|
|
// is floating point.
|
2010-02-15 16:12:20 +00:00
|
|
|
if (DstTy->isFloatingPointTy())
|
2006-11-27 01:05:10 +00:00
|
|
|
return firstOp;
|
|
|
|
return 0;
|
2018-07-30 19:41:25 +00:00
|
|
|
case 5:
|
2013-11-15 01:34:59 +00:00
|
|
|
// No-op cast in first op implies secondOp as long as the SrcTy
|
2010-01-23 04:42:42 +00:00
|
|
|
// is an integer.
|
2010-02-15 16:12:20 +00:00
|
|
|
if (SrcTy->isIntegerTy())
|
2006-11-27 01:05:10 +00:00
|
|
|
return secondOp;
|
|
|
|
return 0;
|
|
|
|
case 6:
|
2013-11-15 01:34:59 +00:00
|
|
|
// No-op cast in first op implies secondOp as long as the SrcTy
|
2010-01-23 04:42:42 +00:00
|
|
|
// is a floating point.
|
2010-02-15 16:12:20 +00:00
|
|
|
if (SrcTy->isFloatingPointTy())
|
2006-11-27 01:05:10 +00:00
|
|
|
return secondOp;
|
|
|
|
return 0;
|
2013-07-30 22:27:10 +00:00
|
|
|
case 7: {
|
2013-11-15 01:34:59 +00:00
|
|
|
// Cannot simplify if address spaces are different!
|
|
|
|
if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
|
|
|
|
return 0;
|
|
|
|
|
2013-07-30 22:27:10 +00:00
|
|
|
unsigned MidSize = MidTy->getScalarSizeInBits();
|
2013-11-15 01:34:59 +00:00
|
|
|
// We can still fold this without knowing the actual sizes as long we
|
|
|
|
// know that the intermediate pointer is the largest possible
|
|
|
|
// pointer size.
|
|
|
|
// FIXME: Is this always true?
|
|
|
|
if (MidSize == 64)
|
2013-07-30 22:27:10 +00:00
|
|
|
return Instruction::BitCast;
|
|
|
|
|
|
|
|
// ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size.
|
2012-10-30 16:03:32 +00:00
|
|
|
if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy)
|
2009-07-21 23:19:40 +00:00
|
|
|
return 0;
|
2012-10-30 16:03:32 +00:00
|
|
|
unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits();
|
2006-11-27 01:05:10 +00:00
|
|
|
if (MidSize >= PtrSize)
|
|
|
|
return Instruction::BitCast;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
case 8: {
|
|
|
|
// ext, trunc -> bitcast, if the SrcTy and DstTy are same size
|
|
|
|
// ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy)
|
|
|
|
// ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy)
|
2009-06-15 22:12:54 +00:00
|
|
|
unsigned SrcSize = SrcTy->getScalarSizeInBits();
|
|
|
|
unsigned DstSize = DstTy->getScalarSizeInBits();
|
2006-11-27 01:05:10 +00:00
|
|
|
if (SrcSize == DstSize)
|
|
|
|
return Instruction::BitCast;
|
|
|
|
else if (SrcSize < DstSize)
|
|
|
|
return firstOp;
|
|
|
|
return secondOp;
|
|
|
|
}
|
2013-11-15 01:34:59 +00:00
|
|
|
case 9:
|
|
|
|
// zext, sext -> zext, because sext can't sign extend after zext
|
2006-11-27 01:05:10 +00:00
|
|
|
return Instruction::ZExt;
|
2013-07-30 22:27:10 +00:00
|
|
|
case 11: {
|
2006-11-27 01:05:10 +00:00
|
|
|
// inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize
|
2012-10-30 16:03:32 +00:00
|
|
|
if (!MidIntPtrTy)
|
2009-07-21 23:19:40 +00:00
|
|
|
return 0;
|
2012-10-30 16:03:32 +00:00
|
|
|
unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits();
|
2009-06-15 22:12:54 +00:00
|
|
|
unsigned SrcSize = SrcTy->getScalarSizeInBits();
|
|
|
|
unsigned DstSize = DstTy->getScalarSizeInBits();
|
2006-11-27 01:05:10 +00:00
|
|
|
if (SrcSize <= PtrSize && SrcSize == DstSize)
|
|
|
|
return Instruction::BitCast;
|
|
|
|
return 0;
|
|
|
|
}
|
2017-05-15 21:57:41 +00:00
|
|
|
case 12:
|
2013-11-15 01:34:59 +00:00
|
|
|
// addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS
|
|
|
|
// addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
|
|
|
|
if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
|
|
|
|
return Instruction::AddrSpaceCast;
|
|
|
|
return Instruction::BitCast;
|
|
|
|
case 13:
|
|
|
|
// FIXME: this state can be merged with (1), but the following assert
|
|
|
|
// is useful to check the correcteness of the sequence due to semantic
|
|
|
|
// change of bitcast.
|
|
|
|
assert(
|
|
|
|
SrcTy->isPtrOrPtrVectorTy() &&
|
|
|
|
MidTy->isPtrOrPtrVectorTy() &&
|
|
|
|
DstTy->isPtrOrPtrVectorTy() &&
|
|
|
|
SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() &&
|
|
|
|
MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
|
|
|
|
"Illegal addrspacecast, bitcast sequence!");
|
|
|
|
// Allowed, use first cast's opcode
|
|
|
|
return firstOp;
|
|
|
|
case 14:
|
2014-06-06 21:52:55 +00:00
|
|
|
// bitcast, addrspacecast -> addrspacecast if the element type of
|
|
|
|
// bitcast's source is the same as that of addrspacecast's destination.
|
2016-11-13 06:58:45 +00:00
|
|
|
if (SrcTy->getScalarType()->getPointerElementType() ==
|
|
|
|
DstTy->getScalarType()->getPointerElementType())
|
2014-06-06 21:52:55 +00:00
|
|
|
return Instruction::AddrSpaceCast;
|
|
|
|
return 0;
|
2013-11-15 01:34:59 +00:00
|
|
|
case 15:
|
|
|
|
// FIXME: this state can be merged with (1), but the following assert
|
|
|
|
// is useful to check the correcteness of the sequence due to semantic
|
|
|
|
// change of bitcast.
|
|
|
|
assert(
|
|
|
|
SrcTy->isIntOrIntVectorTy() &&
|
|
|
|
MidTy->isPtrOrPtrVectorTy() &&
|
|
|
|
DstTy->isPtrOrPtrVectorTy() &&
|
|
|
|
MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
|
|
|
|
"Illegal inttoptr, bitcast sequence!");
|
|
|
|
// Allowed, use first cast's opcode
|
|
|
|
return firstOp;
|
|
|
|
case 16:
|
|
|
|
// FIXME: this state can be merged with (2), but the following assert
|
|
|
|
// is useful to check the correcteness of the sequence due to semantic
|
|
|
|
// change of bitcast.
|
|
|
|
assert(
|
|
|
|
SrcTy->isPtrOrPtrVectorTy() &&
|
|
|
|
MidTy->isPtrOrPtrVectorTy() &&
|
|
|
|
DstTy->isIntOrIntVectorTy() &&
|
|
|
|
SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&
|
|
|
|
"Illegal bitcast, ptrtoint sequence!");
|
|
|
|
// Allowed, use second cast's opcode
|
|
|
|
return secondOp;
|
2015-04-21 00:05:41 +00:00
|
|
|
case 17:
|
|
|
|
// (sitofp (zext x)) -> (uitofp x)
|
|
|
|
return Instruction::UIToFP;
|
2018-07-30 19:41:25 +00:00
|
|
|
case 99:
|
2013-11-15 01:34:59 +00:00
|
|
|
// Cast combination can't happen (error in input). This is for all cases
|
2006-11-27 01:05:10 +00:00
|
|
|
// where the MidTy is not the same for the two cast instructions.
|
2012-02-05 22:14:15 +00:00
|
|
|
llvm_unreachable("Invalid Cast Combination");
|
2006-11-27 01:05:10 +00:00
|
|
|
default:
|
2012-02-05 22:14:15 +00:00
|
|
|
llvm_unreachable("Error in CastResults table!!!");
|
2006-11-27 01:05:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-30 19:41:25 +00:00
|
|
|
CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
|
2009-07-25 04:41:11 +00:00
|
|
|
const Twine &Name, Instruction *InsertBefore) {
|
2011-05-18 09:21:57 +00:00
|
|
|
assert(castIsValid(op, S, Ty) && "Invalid cast!");
|
2006-11-27 01:05:10 +00:00
|
|
|
// Construct and return the appropriate CastInst subclass
|
|
|
|
switch (op) {
|
2013-11-15 01:34:59 +00:00
|
|
|
case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);
|
|
|
|
case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);
|
|
|
|
case SExt: return new SExtInst (S, Ty, Name, InsertBefore);
|
|
|
|
case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);
|
|
|
|
case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);
|
|
|
|
case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);
|
|
|
|
case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
|
|
|
|
case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
|
|
|
|
case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
|
|
|
|
case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
|
|
|
|
case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
|
|
|
|
case BitCast: return new BitCastInst (S, Ty, Name, InsertBefore);
|
|
|
|
case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertBefore);
|
|
|
|
default: llvm_unreachable("Invalid opcode provided");
|
2006-11-27 01:05:10 +00:00
|
|
|
}
|
2004-07-29 12:33:25 +00:00
|
|
|
}
|
|
|
|
|
2011-07-18 04:54:35 +00:00
|
|
|
CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
|
2009-07-25 04:41:11 +00:00
|
|
|
const Twine &Name, BasicBlock *InsertAtEnd) {
|
2011-05-18 09:21:57 +00:00
|
|
|
assert(castIsValid(op, S, Ty) && "Invalid cast!");
|
2006-11-27 01:05:10 +00:00
|
|
|
// Construct and return the appropriate CastInst subclass
|
|
|
|
switch (op) {
|
2013-11-15 01:34:59 +00:00
|
|
|
case Trunc: return new TruncInst (S, Ty, Name, InsertAtEnd);
|
|
|
|
case ZExt: return new ZExtInst (S, Ty, Name, InsertAtEnd);
|
|
|
|
case SExt: return new SExtInst (S, Ty, Name, InsertAtEnd);
|
|
|
|
case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertAtEnd);
|
|
|
|
case FPExt: return new FPExtInst (S, Ty, Name, InsertAtEnd);
|
|
|
|
case UIToFP: return new UIToFPInst (S, Ty, Name, InsertAtEnd);
|
|
|
|
case SIToFP: return new SIToFPInst (S, Ty, Name, InsertAtEnd);
|
|
|
|
case FPToUI: return new FPToUIInst (S, Ty, Name, InsertAtEnd);
|
|
|
|
case FPToSI: return new FPToSIInst (S, Ty, Name, InsertAtEnd);
|
|
|
|
case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertAtEnd);
|
|
|
|
case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertAtEnd);
|
|
|
|
case BitCast: return new BitCastInst (S, Ty, Name, InsertAtEnd);
|
|
|
|
case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertAtEnd);
|
|
|
|
default: llvm_unreachable("Invalid opcode provided");
|
2006-11-27 01:05:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-30 19:41:25 +00:00
|
|
|
CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty,
|
2009-07-25 04:41:11 +00:00
|
|
|
const Twine &Name,
|
2006-12-04 20:17:56 +00:00
|
|
|
Instruction *InsertBefore) {
|
2009-06-15 22:12:54 +00:00
|
|
|
if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
|
2008-05-16 19:29:10 +00:00
|
|
|
return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
|
|
|
|
return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
|
2006-12-04 20:17:56 +00:00
|
|
|
}
|
|
|
|
|
2018-07-30 19:41:25 +00:00
|
|
|
CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty,
|
2009-07-25 04:41:11 +00:00
|
|
|
const Twine &Name,
|
2006-12-04 20:17:56 +00:00
|
|
|
BasicBlock *InsertAtEnd) {
|
2009-06-15 22:12:54 +00:00
|
|
|
if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
|
2008-05-16 19:29:10 +00:00
|
|
|
return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
|
|
|
|
return Create(Instruction::ZExt, S, Ty, Name, InsertAtEnd);
|
2006-12-04 20:17:56 +00:00
|
|
|
}
|
|
|
|
|
2018-07-30 19:41:25 +00:00
|
|
|
CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty,
|
2009-07-25 04:41:11 +00:00
|
|
|
const Twine &Name,
|
2006-12-04 20:17:56 +00:00
|
|
|
Instruction *InsertBefore) {
|
2009-06-15 22:12:54 +00:00
|
|
|
if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
|
2008-05-16 19:29:10 +00:00
|
|
|
return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
|
|
|
|
return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
|
2006-12-04 20:17:56 +00:00
|
|
|
}
|
|
|
|
|
2018-07-30 19:41:25 +00:00
|
|
|
CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty,
|
2009-07-25 04:41:11 +00:00
|
|
|
const Twine &Name,
|
2006-12-04 20:17:56 +00:00
|
|
|
BasicBlock *InsertAtEnd) {
|
2009-06-15 22:12:54 +00:00
|
|
|
if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
|
2008-05-16 19:29:10 +00:00
|
|
|
return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
|
|
|
|
return Create(Instruction::SExt, S, Ty, Name, InsertAtEnd);
|
2006-12-04 20:17:56 +00:00
|
|
|
}
|
|
|
|
|
2011-07-18 04:54:35 +00:00
|
|
|
CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty,
|
2009-07-25 04:41:11 +00:00
|
|
|
const Twine &Name,
|
2006-12-04 20:17:56 +00:00
|
|
|
Instruction *InsertBefore) {
|
2009-06-15 22:12:54 +00:00
|
|
|
if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
|
2008-05-16 19:29:10 +00:00
|
|
|
return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
|
|
|
|
return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
|
2006-12-04 20:17:56 +00:00
|
|
|
}
|
|
|
|
|
2011-07-18 04:54:35 +00:00
|
|
|
CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty,
|
2018-07-30 19:41:25 +00:00
|
|
|
const Twine &Name,
|
2006-12-04 20:17:56 +00:00
|
|
|
BasicBlock *InsertAtEnd) {
|
2009-06-15 22:12:54 +00:00
|
|
|
if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
|
2008-05-16 19:29:10 +00:00
|
|
|
return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
|
|
|
|
return Create(Instruction::Trunc, S, Ty, Name, InsertAtEnd);
|
2006-12-04 20:17:56 +00:00
|
|
|
}
|
|
|
|
|
2011-07-18 04:54:35 +00:00
|
|
|
CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty,
|
2009-07-25 04:41:11 +00:00
|
|
|
const Twine &Name,
|
2006-12-05 03:28:26 +00:00
|
|
|
BasicBlock *InsertAtEnd) {
|
2013-07-31 00:17:33 +00:00
|
|
|
assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
|
|
|
|
assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
|
|
|
|
"Invalid cast");
|
|
|
|
assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
|
2013-07-31 04:07:28 +00:00
|
|
|
assert((!Ty->isVectorTy() ||
|
2020-08-27 10:39:18 -07:00
|
|
|
cast<FixedVectorType>(Ty)->getNumElements() ==
|
|
|
|
cast<FixedVectorType>(S->getType())->getNumElements()) &&
|
2006-12-05 03:28:26 +00:00
|
|
|
"Invalid cast");
|
|
|
|
|
2013-07-31 00:17:33 +00:00
|
|
|
if (Ty->isIntOrIntVectorTy())
|
2008-05-16 19:29:10 +00:00
|
|
|
return Create(Instruction::PtrToInt, S, Ty, Name, InsertAtEnd);
|
2013-11-15 01:34:59 +00:00
|
|
|
|
2014-07-14 17:24:35 +00:00
|
|
|
return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertAtEnd);
|
2006-12-05 03:28:26 +00:00
|
|
|
}
|
|
|
|
|
2018-05-01 16:10:38 +00:00
|
|
|
/// Create a BitCast or a PtrToInt cast instruction
|
2013-07-31 00:17:33 +00:00
|
|
|
CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty,
|
|
|
|
const Twine &Name,
|
2006-12-05 03:28:26 +00:00
|
|
|
Instruction *InsertBefore) {
|
2013-01-15 16:43:00 +00:00
|
|
|
assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
|
|
|
|
assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
|
2006-12-05 03:28:26 +00:00
|
|
|
"Invalid cast");
|
2013-07-31 00:17:33 +00:00
|
|
|
assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
|
2013-07-31 04:07:28 +00:00
|
|
|
assert((!Ty->isVectorTy() ||
|
2020-08-27 10:39:18 -07:00
|
|
|
cast<FixedVectorType>(Ty)->getNumElements() ==
|
|
|
|
cast<FixedVectorType>(S->getType())->getNumElements()) &&
|
2013-07-31 00:17:33 +00:00
|
|
|
"Invalid cast");
|
2006-12-05 03:28:26 +00:00
|
|
|
|
2013-01-15 16:43:00 +00:00
|
|
|
if (Ty->isIntOrIntVectorTy())
|
2008-05-16 19:29:10 +00:00
|
|
|
return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
|
2013-11-15 01:34:59 +00:00
|
|
|
|
2014-07-14 17:24:35 +00:00
|
|
|
return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
|
|
|
|
}
|
|
|
|
|
|
|
|
CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast(
|
|
|
|
Value *S, Type *Ty,
|
|
|
|
const Twine &Name,
|
|
|
|
BasicBlock *InsertAtEnd) {
|
|
|
|
assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
|
|
|
|
assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
|
|
|
|
|
|
|
|
if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
|
|
|
|
return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertAtEnd);
|
|
|
|
|
|
|
|
return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
|
|
|
|
}
|
|
|
|
|
|
|
|
CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast(
|
|
|
|
Value *S, Type *Ty,
|
|
|
|
const Twine &Name,
|
|
|
|
Instruction *InsertBefore) {
|
|
|
|
assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
|
|
|
|
assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
|
|
|
|
|
|
|
|
if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
|
2013-11-15 01:34:59 +00:00
|
|
|
return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
|
|
|
|
|
2008-05-16 19:29:10 +00:00
|
|
|
return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
|
2006-12-05 03:28:26 +00:00
|
|
|
}
|
|
|
|
|
2014-11-25 08:20:27 +00:00
|
|
|
CastInst *CastInst::CreateBitOrPointerCast(Value *S, Type *Ty,
|
|
|
|
const Twine &Name,
|
|
|
|
Instruction *InsertBefore) {
|
|
|
|
if (S->getType()->isPointerTy() && Ty->isIntegerTy())
|
|
|
|
return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
|
|
|
|
if (S->getType()->isIntegerTy() && Ty->isPointerTy())
|
|
|
|
return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
|
|
|
|
|
|
|
|
return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
|
|
|
|
}
|
|
|
|
|
2014-07-14 17:24:35 +00:00
|
|
|
CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty,
|
2009-07-25 04:41:11 +00:00
|
|
|
bool isSigned, const Twine &Name,
|
2006-12-12 00:49:44 +00:00
|
|
|
Instruction *InsertBefore) {
|
2010-02-15 16:12:20 +00:00
|
|
|
assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
|
2010-01-10 20:21:42 +00:00
|
|
|
"Invalid integer cast");
|
2009-06-15 22:12:54 +00:00
|
|
|
unsigned SrcBits = C->getType()->getScalarSizeInBits();
|
|
|
|
unsigned DstBits = Ty->getScalarSizeInBits();
|
2006-12-12 00:49:44 +00:00
|
|
|
Instruction::CastOps opcode =
|
|
|
|
(SrcBits == DstBits ? Instruction::BitCast :
|
|
|
|
(SrcBits > DstBits ? Instruction::Trunc :
|
|
|
|
(isSigned ? Instruction::SExt : Instruction::ZExt)));
|
2008-05-16 19:29:10 +00:00
|
|
|
return Create(opcode, C, Ty, Name, InsertBefore);
|
2006-12-12 00:49:44 +00:00
|
|
|
}
|
|
|
|
|
2018-07-30 19:41:25 +00:00
|
|
|
CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty,
|
2009-07-25 04:41:11 +00:00
|
|
|
bool isSigned, const Twine &Name,
|
2006-12-12 00:49:44 +00:00
|
|
|
BasicBlock *InsertAtEnd) {
|
2010-02-15 16:12:20 +00:00
|
|
|
assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
|
2009-06-15 22:25:12 +00:00
|
|
|
"Invalid cast");
|
2009-06-15 22:12:54 +00:00
|
|
|
unsigned SrcBits = C->getType()->getScalarSizeInBits();
|
|
|
|
unsigned DstBits = Ty->getScalarSizeInBits();
|
2006-12-12 00:49:44 +00:00
|
|
|
Instruction::CastOps opcode =
|
|
|
|
(SrcBits == DstBits ? Instruction::BitCast :
|
|
|
|
(SrcBits > DstBits ? Instruction::Trunc :
|
|
|
|
(isSigned ? Instruction::SExt : Instruction::ZExt)));
|
2008-05-16 19:29:10 +00:00
|
|
|
return Create(opcode, C, Ty, Name, InsertAtEnd);
|
2006-12-12 00:49:44 +00:00
|
|
|
}
|
|
|
|
|
2018-07-30 19:41:25 +00:00
|
|
|
CastInst *CastInst::CreateFPCast(Value *C, Type *Ty,
|
|
|
|
const Twine &Name,
|
2006-12-12 00:49:44 +00:00
|
|
|
Instruction *InsertBefore) {
|
2010-02-15 16:12:20 +00:00
|
|
|
assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
|
2006-12-12 00:49:44 +00:00
|
|
|
"Invalid cast");
|
2009-06-15 22:12:54 +00:00
|
|
|
unsigned SrcBits = C->getType()->getScalarSizeInBits();
|
|
|
|
unsigned DstBits = Ty->getScalarSizeInBits();
|
2006-12-12 00:49:44 +00:00
|
|
|
Instruction::CastOps opcode =
|
|
|
|
(SrcBits == DstBits ? Instruction::BitCast :
|
|
|
|
(SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
|
2008-05-16 19:29:10 +00:00
|
|
|
return Create(opcode, C, Ty, Name, InsertBefore);
|
2006-12-12 00:49:44 +00:00
|
|
|
}
|
|
|
|
|
2018-07-30 19:41:25 +00:00
|
|
|
CastInst *CastInst::CreateFPCast(Value *C, Type *Ty,
|
|
|
|
const Twine &Name,
|
2006-12-12 00:49:44 +00:00
|
|
|
BasicBlock *InsertAtEnd) {
|
2010-02-15 16:12:20 +00:00
|
|
|
assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
|
2006-12-12 00:49:44 +00:00
|
|
|
"Invalid cast");
|
2009-06-15 22:12:54 +00:00
|
|
|
unsigned SrcBits = C->getType()->getScalarSizeInBits();
|
|
|
|
unsigned DstBits = Ty->getScalarSizeInBits();
|
2006-12-12 00:49:44 +00:00
|
|
|
Instruction::CastOps opcode =
|
|
|
|
(SrcBits == DstBits ? Instruction::BitCast :
|
|
|
|
(SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
|
2008-05-16 19:29:10 +00:00
|
|
|
return Create(opcode, C, Ty, Name, InsertAtEnd);
|
2006-12-12 00:49:44 +00:00
|
|
|
}
|
|
|
|
|
2013-07-30 22:02:14 +00:00
|
|
|
// Check whether it is valid to call getCastOpcode for these types.
|
|
|
|
// This routine must be kept in sync with getCastOpcode.
|
|
|
|
bool CastInst::isCastable(Type *SrcTy, Type *DestTy) {
|
|
|
|
if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (SrcTy == DestTy)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
|
|
|
|
if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
|
2020-08-27 10:39:18 -07:00
|
|
|
if (cast<FixedVectorType>(SrcVecTy)->getNumElements() ==
|
|
|
|
cast<FixedVectorType>(DestVecTy)->getNumElements()) {
|
2013-07-30 22:02:14 +00:00
|
|
|
// An element by element cast. Valid if casting the elements is valid.
|
|
|
|
SrcTy = SrcVecTy->getElementType();
|
|
|
|
DestTy = DestVecTy->getElementType();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the bit sizes, we'll need these
|
2019-10-16 16:33:41 +00:00
|
|
|
TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
|
|
|
|
TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
|
2013-07-30 22:02:14 +00:00
|
|
|
|
|
|
|
// Run through the possibilities ...
|
|
|
|
if (DestTy->isIntegerTy()) { // Casting to integral
|
2015-03-23 19:51:23 +00:00
|
|
|
if (SrcTy->isIntegerTy()) // Casting from integral
|
2013-07-30 22:02:14 +00:00
|
|
|
return true;
|
2015-03-23 19:51:23 +00:00
|
|
|
if (SrcTy->isFloatingPointTy()) // Casting from floating pt
|
2013-07-30 22:02:14 +00:00
|
|
|
return true;
|
2015-03-23 19:51:23 +00:00
|
|
|
if (SrcTy->isVectorTy()) // Casting from vector
|
2013-07-30 22:02:14 +00:00
|
|
|
return DestBits == SrcBits;
|
2015-03-23 19:51:23 +00:00
|
|
|
// Casting from something else
|
|
|
|
return SrcTy->isPointerTy();
|
2018-07-30 19:41:25 +00:00
|
|
|
}
|
2015-03-23 19:51:23 +00:00
|
|
|
if (DestTy->isFloatingPointTy()) { // Casting to floating pt
|
|
|
|
if (SrcTy->isIntegerTy()) // Casting from integral
|
2013-07-30 22:02:14 +00:00
|
|
|
return true;
|
2015-03-23 19:51:23 +00:00
|
|
|
if (SrcTy->isFloatingPointTy()) // Casting from floating pt
|
2013-07-30 22:02:14 +00:00
|
|
|
return true;
|
2015-03-23 19:51:23 +00:00
|
|
|
if (SrcTy->isVectorTy()) // Casting from vector
|
2013-07-30 22:02:14 +00:00
|
|
|
return DestBits == SrcBits;
|
2015-03-23 19:51:23 +00:00
|
|
|
// Casting from something else
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (DestTy->isVectorTy()) // Casting to vector
|
2013-07-30 22:02:14 +00:00
|
|
|
return DestBits == SrcBits;
|
2015-03-23 19:51:23 +00:00
|
|
|
if (DestTy->isPointerTy()) { // Casting to pointer
|
|
|
|
if (SrcTy->isPointerTy()) // Casting from pointer
|
2013-07-30 22:02:14 +00:00
|
|
|
return true;
|
2015-03-23 19:51:23 +00:00
|
|
|
return SrcTy->isIntegerTy(); // Casting from integral
|
2018-07-30 19:41:25 +00:00
|
|
|
}
|
2015-03-23 19:51:23 +00:00
|
|
|
if (DestTy->isX86_MMXTy()) {
|
|
|
|
if (SrcTy->isVectorTy())
|
2013-07-30 22:02:14 +00:00
|
|
|
return DestBits == SrcBits; // 64-bit vector to MMX
|
|
|
|
return false;
|
2015-03-23 19:51:23 +00:00
|
|
|
} // Casting to something else
|
|
|
|
return false;
|
2013-07-30 22:02:14 +00:00
|
|
|
}
|
|
|
|
|
2013-07-30 20:45:05 +00:00
|
|
|
bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
|
|
|
|
if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (SrcTy == DestTy)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
|
|
|
|
if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
|
2019-10-08 12:53:54 +00:00
|
|
|
if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
|
2013-07-30 20:45:05 +00:00
|
|
|
// An element by element cast. Valid if casting the elements is valid.
|
|
|
|
SrcTy = SrcVecTy->getElementType();
|
|
|
|
DestTy = DestVecTy->getElementType();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {
|
|
|
|
if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {
|
|
|
|
return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-16 16:33:41 +00:00
|
|
|
TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
|
|
|
|
TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
|
2013-07-30 20:45:05 +00:00
|
|
|
|
|
|
|
// Could still have vectors of pointers if the number of elements doesn't
|
|
|
|
// match
|
2019-10-08 12:53:54 +00:00
|
|
|
if (SrcBits.getKnownMinSize() == 0 || DestBits.getKnownMinSize() == 0)
|
2013-07-30 20:45:05 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
if (SrcBits != DestBits)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (DestTy->isX86_MMXTy() || SrcTy->isX86_MMXTy())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-11-25 08:20:27 +00:00
|
|
|
bool CastInst::isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy,
|
2015-03-10 02:37:25 +00:00
|
|
|
const DataLayout &DL) {
|
2017-10-22 20:28:17 +00:00
|
|
|
// ptrtoint and inttoptr are not allowed on non-integral pointers
|
2014-11-25 08:20:27 +00:00
|
|
|
if (auto *PtrTy = dyn_cast<PointerType>(SrcTy))
|
|
|
|
if (auto *IntTy = dyn_cast<IntegerType>(DestTy))
|
2017-10-22 20:28:17 +00:00
|
|
|
return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
|
|
|
|
!DL.isNonIntegralPointerType(PtrTy));
|
2014-11-25 08:20:27 +00:00
|
|
|
if (auto *PtrTy = dyn_cast<PointerType>(DestTy))
|
|
|
|
if (auto *IntTy = dyn_cast<IntegerType>(SrcTy))
|
2017-10-22 20:28:17 +00:00
|
|
|
return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
|
|
|
|
!DL.isNonIntegralPointerType(PtrTy));
|
2014-11-25 08:20:27 +00:00
|
|
|
|
|
|
|
return isBitCastable(SrcTy, DestTy);
|
|
|
|
}
|
|
|
|
|
2013-07-30 20:45:05 +00:00
|
|
|
// Provide a way to get a "cast" where the cast opcode is inferred from the
|
|
|
|
// types and size of the operand. This, basically, is a parallel of the
|
2007-01-17 02:46:11 +00:00
|
|
|
// logic in the castIsValid function below. This axiom should hold:
|
|
|
|
// castIsValid( getCastOpcode(Val, Ty), Val, Ty)
|
|
|
|
// should not assert in castIsValid. In other words, this produces a "correct"
|
2006-11-27 01:05:10 +00:00
|
|
|
// casting opcode for the arguments passed to it.
|
2008-01-06 10:12:28 +00:00
|
|
|
// This routine must be kept in sync with isCastable.
|
2006-11-27 01:05:10 +00:00
|
|
|
Instruction::CastOps
|
2006-12-04 02:43:42 +00:00
|
|
|
CastInst::getCastOpcode(
|
2011-07-18 04:54:35 +00:00
|
|
|
const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {
|
|
|
|
Type *SrcTy = Src->getType();
|
2006-11-27 01:05:10 +00:00
|
|
|
|
2008-01-06 10:12:28 +00:00
|
|
|
assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&
|
|
|
|
"Only first class types are castable!");
|
|
|
|
|
2011-05-18 07:13:41 +00:00
|
|
|
if (SrcTy == DestTy)
|
|
|
|
return BitCast;
|
|
|
|
|
2013-07-30 20:45:05 +00:00
|
|
|
// FIXME: Check address space sizes here
|
2011-07-18 04:54:35 +00:00
|
|
|
if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
|
|
|
|
if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
|
2020-08-27 10:39:18 -07:00
|
|
|
if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
|
2011-05-18 07:13:41 +00:00
|
|
|
// An element by element cast. Find the appropriate opcode based on the
|
|
|
|
// element types.
|
|
|
|
SrcTy = SrcVecTy->getElementType();
|
|
|
|
DestTy = DestVecTy->getElementType();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the bit sizes, we'll need these
|
2011-05-18 09:21:57 +00:00
|
|
|
unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
|
|
|
|
unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
|
2011-05-18 07:13:41 +00:00
|
|
|
|
2006-11-27 01:05:10 +00:00
|
|
|
// Run through the possibilities ...
|
2010-02-15 16:12:20 +00:00
|
|
|
if (DestTy->isIntegerTy()) { // Casting to integral
|
|
|
|
if (SrcTy->isIntegerTy()) { // Casting from integral
|
2006-11-27 01:05:10 +00:00
|
|
|
if (DestBits < SrcBits)
|
|
|
|
return Trunc; // int -> smaller int
|
|
|
|
else if (DestBits > SrcBits) { // its an extension
|
2006-12-04 02:43:42 +00:00
|
|
|
if (SrcIsSigned)
|
2006-11-27 01:05:10 +00:00
|
|
|
return SExt; // signed -> SEXT
|
|
|
|
else
|
|
|
|
return ZExt; // unsigned -> ZEXT
|
|
|
|
} else {
|
|
|
|
return BitCast; // Same size, No-op cast
|
|
|
|
}
|
2010-02-15 16:12:20 +00:00
|
|
|
} else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
|
2018-07-30 19:41:25 +00:00
|
|
|
if (DestIsSigned)
|
2006-11-27 01:05:10 +00:00
|
|
|
return FPToSI; // FP -> sint
|
|
|
|
else
|
2018-07-30 19:41:25 +00:00
|
|
|
return FPToUI; // FP -> uint
|
2011-05-18 10:59:25 +00:00
|
|
|
} else if (SrcTy->isVectorTy()) {
|
|
|
|
assert(DestBits == SrcBits &&
|
|
|
|
"Casting vector to integer of different width");
|
2006-11-27 01:05:10 +00:00
|
|
|
return BitCast; // Same size, no-op cast
|
|
|
|
} else {
|
2010-02-16 11:11:14 +00:00
|
|
|
assert(SrcTy->isPointerTy() &&
|
2006-11-27 01:05:10 +00:00
|
|
|
"Casting from a value that is not first-class type");
|
|
|
|
return PtrToInt; // ptr -> int
|
|
|
|
}
|
2010-02-15 16:12:20 +00:00
|
|
|
} else if (DestTy->isFloatingPointTy()) { // Casting to floating pt
|
|
|
|
if (SrcTy->isIntegerTy()) { // Casting from integral
|
2006-12-04 02:43:42 +00:00
|
|
|
if (SrcIsSigned)
|
2006-11-27 01:05:10 +00:00
|
|
|
return SIToFP; // sint -> FP
|
|
|
|
else
|
|
|
|
return UIToFP; // uint -> FP
|
2010-02-15 16:12:20 +00:00
|
|
|
} else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
|
2006-11-27 01:05:10 +00:00
|
|
|
if (DestBits < SrcBits) {
|
|
|
|
return FPTrunc; // FP -> smaller FP
|
|
|
|
} else if (DestBits > SrcBits) {
|
|
|
|
return FPExt; // FP -> larger FP
|
|
|
|
} else {
|
|
|
|
return BitCast; // same size, no-op cast
|
|
|
|
}
|
2011-05-18 10:59:25 +00:00
|
|
|
} else if (SrcTy->isVectorTy()) {
|
|
|
|
assert(DestBits == SrcBits &&
|
2007-05-11 21:43:24 +00:00
|
|
|
"Casting vector to floating point of different width");
|
2008-11-05 01:37:40 +00:00
|
|
|
return BitCast; // same size, no-op cast
|
2006-11-27 01:05:10 +00:00
|
|
|
}
|
2012-02-19 11:37:01 +00:00
|
|
|
llvm_unreachable("Casting pointer or non-first class to float");
|
2011-05-18 10:59:25 +00:00
|
|
|
} else if (DestTy->isVectorTy()) {
|
|
|
|
assert(DestBits == SrcBits &&
|
|
|
|
"Illegal cast to vector (wrong type or size)");
|
|
|
|
return BitCast;
|
2010-02-16 11:11:14 +00:00
|
|
|
} else if (DestTy->isPointerTy()) {
|
|
|
|
if (SrcTy->isPointerTy()) {
|
2013-11-15 01:34:59 +00:00
|
|
|
if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace())
|
|
|
|
return AddrSpaceCast;
|
2006-11-27 01:05:10 +00:00
|
|
|
return BitCast; // ptr -> ptr
|
2010-02-15 16:12:20 +00:00
|
|
|
} else if (SrcTy->isIntegerTy()) {
|
2006-11-27 01:05:10 +00:00
|
|
|
return IntToPtr; // int -> ptr
|
|
|
|
}
|
2012-02-19 11:37:01 +00:00
|
|
|
llvm_unreachable("Casting pointer to other than pointer or int");
|
2010-09-30 23:57:10 +00:00
|
|
|
} else if (DestTy->isX86_MMXTy()) {
|
2011-05-18 10:59:25 +00:00
|
|
|
if (SrcTy->isVectorTy()) {
|
|
|
|
assert(DestBits == SrcBits && "Casting vector of wrong width to X86_MMX");
|
2010-09-30 23:57:10 +00:00
|
|
|
return BitCast; // 64-bit vector to MMX
|
|
|
|
}
|
2012-02-19 11:37:01 +00:00
|
|
|
llvm_unreachable("Illegal cast to X86_MMX");
|
2006-11-27 01:05:10 +00:00
|
|
|
}
|
2012-02-19 11:37:01 +00:00
|
|
|
llvm_unreachable("Casting to type that is not first-class");
|
2006-11-27 01:05:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// CastInst SubClass Constructors
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// Check that the construction parameters for a CastInst are correct. This
|
|
|
|
/// could be broken out into the separate constructors but it is useful to have
|
|
|
|
/// it in one place and to eliminate the redundant code for getting the sizes
|
|
|
|
/// of the types involved.
|
2018-07-30 19:41:25 +00:00
|
|
|
bool
|
2020-10-07 14:02:10 -07:00
|
|
|
CastInst::castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy) {
|
2010-01-26 21:51:43 +00:00
|
|
|
if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||
|
|
|
|
SrcTy->isAggregateType() || DstTy->isAggregateType())
|
2006-11-27 01:05:10 +00:00
|
|
|
return false;
|
|
|
|
|
2020-03-24 22:38:12 +00:00
|
|
|
// Get the size of the types in bits, and whether we are dealing
|
|
|
|
// with vector types, we'll need this later.
|
|
|
|
bool SrcIsVec = isa<VectorType>(SrcTy);
|
|
|
|
bool DstIsVec = isa<VectorType>(DstTy);
|
|
|
|
unsigned SrcScalarBitSize = SrcTy->getScalarSizeInBits();
|
|
|
|
unsigned DstScalarBitSize = DstTy->getScalarSizeInBits();
|
2006-11-27 01:05:10 +00:00
|
|
|
|
2011-05-18 09:21:57 +00:00
|
|
|
// If these are vector types, get the lengths of the vectors (using zero for
|
|
|
|
// scalar types means that checking that vector lengths match also checks that
|
|
|
|
// scalars are not being converted to vectors or vectors to scalars).
|
2020-03-24 22:38:12 +00:00
|
|
|
ElementCount SrcEC = SrcIsVec ? cast<VectorType>(SrcTy)->getElementCount()
|
2020-08-19 17:26:36 +00:00
|
|
|
: ElementCount::getFixed(0);
|
2020-03-24 22:38:12 +00:00
|
|
|
ElementCount DstEC = DstIsVec ? cast<VectorType>(DstTy)->getElementCount()
|
2020-08-19 17:26:36 +00:00
|
|
|
: ElementCount::getFixed(0);
|
2011-05-18 09:21:57 +00:00
|
|
|
|
2006-11-27 01:05:10 +00:00
|
|
|
// Switch on the opcode provided
|
|
|
|
switch (op) {
|
|
|
|
default: return false; // This is an input error
|
|
|
|
case Instruction::Trunc:
|
2011-05-18 09:21:57 +00:00
|
|
|
return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
|
2020-03-24 22:38:12 +00:00
|
|
|
SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
|
2006-11-27 01:05:10 +00:00
|
|
|
case Instruction::ZExt:
|
2011-05-18 09:21:57 +00:00
|
|
|
return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
|
2020-03-24 22:38:12 +00:00
|
|
|
SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
|
2018-07-30 19:41:25 +00:00
|
|
|
case Instruction::SExt:
|
2011-05-18 09:21:57 +00:00
|
|
|
return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
|
2020-03-24 22:38:12 +00:00
|
|
|
SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
|
2006-11-27 01:05:10 +00:00
|
|
|
case Instruction::FPTrunc:
|
2011-05-18 09:21:57 +00:00
|
|
|
return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
|
2020-03-24 22:38:12 +00:00
|
|
|
SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
|
2006-11-27 01:05:10 +00:00
|
|
|
case Instruction::FPExt:
|
2011-05-18 09:21:57 +00:00
|
|
|
return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
|
2020-03-24 22:38:12 +00:00
|
|
|
SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
|
2006-11-27 01:05:10 +00:00
|
|
|
case Instruction::UIToFP:
|
|
|
|
case Instruction::SIToFP:
|
2011-05-18 09:21:57 +00:00
|
|
|
return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() &&
|
2020-03-24 22:38:12 +00:00
|
|
|
SrcEC == DstEC;
|
2006-11-27 01:05:10 +00:00
|
|
|
case Instruction::FPToUI:
|
|
|
|
case Instruction::FPToSI:
|
2011-05-18 09:21:57 +00:00
|
|
|
return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
|
2020-03-24 22:38:12 +00:00
|
|
|
SrcEC == DstEC;
|
2006-11-27 01:05:10 +00:00
|
|
|
case Instruction::PtrToInt:
|
2020-03-24 22:38:12 +00:00
|
|
|
if (SrcEC != DstEC)
|
2011-12-05 06:29:09 +00:00
|
|
|
return false;
|
2017-07-09 07:04:00 +00:00
|
|
|
return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy();
|
2006-11-27 01:05:10 +00:00
|
|
|
case Instruction::IntToPtr:
|
2020-03-24 22:38:12 +00:00
|
|
|
if (SrcEC != DstEC)
|
2011-12-05 06:29:09 +00:00
|
|
|
return false;
|
2017-07-09 07:04:00 +00:00
|
|
|
return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy();
|
2014-01-22 19:21:33 +00:00
|
|
|
case Instruction::BitCast: {
|
|
|
|
PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
|
|
|
|
PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
|
|
|
|
|
2006-11-27 01:05:10 +00:00
|
|
|
// BitCast implies a no-op cast of type only. No bits change.
|
|
|
|
// However, you can't cast pointers to anything but pointers.
|
2014-01-22 19:21:33 +00:00
|
|
|
if (!SrcPtrTy != !DstPtrTy)
|
2006-11-27 01:05:10 +00:00
|
|
|
return false;
|
|
|
|
|
2013-12-05 05:44:44 +00:00
|
|
|
// For non-pointer cases, the cast is okay if the source and destination bit
|
2013-11-15 01:34:59 +00:00
|
|
|
// widths are identical.
|
2014-01-22 19:21:33 +00:00
|
|
|
if (!SrcPtrTy)
|
2013-11-15 01:34:59 +00:00
|
|
|
return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
|
|
|
|
|
2014-01-22 19:21:33 +00:00
|
|
|
// If both are pointers then the address spaces must match.
|
|
|
|
if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// A vector of pointers must have the same number of elements.
|
2020-03-24 22:38:12 +00:00
|
|
|
if (SrcIsVec && DstIsVec)
|
|
|
|
return SrcEC == DstEC;
|
|
|
|
if (SrcIsVec)
|
2020-08-19 17:26:36 +00:00
|
|
|
return SrcEC == ElementCount::getFixed(1);
|
2020-03-24 22:38:12 +00:00
|
|
|
if (DstIsVec)
|
2020-08-19 17:26:36 +00:00
|
|
|
return DstEC == ElementCount::getFixed(1);
|
2014-01-22 19:21:33 +00:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
case Instruction::AddrSpaceCast: {
|
|
|
|
PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
|
|
|
|
if (!SrcPtrTy)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
|
|
|
|
if (!DstPtrTy)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
|
|
|
|
return false;
|
|
|
|
|
2020-03-24 22:38:12 +00:00
|
|
|
return SrcEC == DstEC;
|
2014-01-22 19:21:33 +00:00
|
|
|
}
|
2006-11-27 01:05:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TruncInst::TruncInst(
|
2011-07-18 04:54:35 +00:00
|
|
|
Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
|
2006-11-27 01:05:10 +00:00
|
|
|
) : CastInst(Ty, Trunc, S, Name, InsertBefore) {
|
2007-01-17 02:46:11 +00:00
|
|
|
assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
|
2006-11-27 01:05:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TruncInst::TruncInst(
|
2011-07-18 04:54:35 +00:00
|
|
|
Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
|
2018-07-30 19:41:25 +00:00
|
|
|
) : CastInst(Ty, Trunc, S, Name, InsertAtEnd) {
|
2007-01-17 02:46:11 +00:00
|
|
|
assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
|
2006-11-27 01:05:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ZExtInst::ZExtInst(
|
2011-07-18 04:54:35 +00:00
|
|
|
Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
|
2018-07-30 19:41:25 +00:00
|
|
|
) : CastInst(Ty, ZExt, S, Name, InsertBefore) {
|
2007-01-17 02:46:11 +00:00
|
|
|
assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
|
2006-11-27 01:05:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ZExtInst::ZExtInst(
|
2011-07-18 04:54:35 +00:00
|
|
|
Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
|
2018-07-30 19:41:25 +00:00
|
|
|
) : CastInst(Ty, ZExt, S, Name, InsertAtEnd) {
|
2007-01-17 02:46:11 +00:00
|
|
|
assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
|
2006-11-27 01:05:10 +00:00
|
|
|
}
|
|
|
|
SExtInst::SExtInst(
|
2011-07-18 04:54:35 +00:00
|
|
|
Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
|
2018-07-30 19:41:25 +00:00
|
|
|
) : CastInst(Ty, SExt, S, Name, InsertBefore) {
|
2007-01-17 02:46:11 +00:00
|
|
|
assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
|
2006-11-27 01:05:10 +00:00
|
|
|
}
|
|
|
|
|
2006-12-02 02:22:01 +00:00
|
|
|
SExtInst::SExtInst(
|
2011-07-18 04:54:35 +00:00
|
|
|
Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
|
2018-07-30 19:41:25 +00:00
|
|
|
) : CastInst(Ty, SExt, S, Name, InsertAtEnd) {
|
2007-01-17 02:46:11 +00:00
|
|
|
assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
|
2006-11-27 01:05:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
FPTruncInst::FPTruncInst(
|
2011-07-18 04:54:35 +00:00
|
|
|
Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
|
2018-07-30 19:41:25 +00:00
|
|
|
) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
|
2007-01-17 02:46:11 +00:00
|
|
|
assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
|
2006-11-27 01:05:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
FPTruncInst::FPTruncInst(
|
2011-07-18 04:54:35 +00:00
|
|
|
Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
|
2018-07-30 19:41:25 +00:00
|
|
|
) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) {
|
2007-01-17 02:46:11 +00:00
|
|
|
assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
|
2006-11-27 01:05:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
FPExtInst::FPExtInst(
|
2011-07-18 04:54:35 +00:00
|
|
|
Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
|
2018-07-30 19:41:25 +00:00
|
|
|
) : CastInst(Ty, FPExt, S, Name, InsertBefore) {
|
2007-01-17 02:46:11 +00:00
|
|
|
assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
|
2006-11-27 01:05:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
FPExtInst::FPExtInst(
|
2011-07-18 04:54:35 +00:00
|
|
|
Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
|
2018-07-30 19:41:25 +00:00
|
|
|
) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) {
|
2007-01-17 02:46:11 +00:00
|
|
|
assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
|
2006-11-27 01:05:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
UIToFPInst::UIToFPInst(
|
2011-07-18 04:54:35 +00:00
|
|
|
Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
|
2018-07-30 19:41:25 +00:00
|
|
|
) : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
|
2007-01-17 02:46:11 +00:00
|
|
|
assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
|
2006-11-27 01:05:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
UIToFPInst::UIToFPInst(
|
2011-07-18 04:54:35 +00:00
|
|
|
Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
|
2018-07-30 19:41:25 +00:00
|
|
|
) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) {
|
2007-01-17 02:46:11 +00:00
|
|
|
assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
|
2006-11-27 01:05:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
SIToFPInst::SIToFPInst(
|
2011-07-18 04:54:35 +00:00
|
|
|
Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
|
2018-07-30 19:41:25 +00:00
|
|
|
) : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
|
2007-01-17 02:46:11 +00:00
|
|
|
assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
|
2006-11-27 01:05:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
SIToFPInst::SIToFPInst(
|
2011-07-18 04:54:35 +00:00
|
|
|
Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
|
2018-07-30 19:41:25 +00:00
|
|
|
) : CastInst(Ty, SIToFP, S, Name, InsertAtEnd) {
|
2007-01-17 02:46:11 +00:00
|
|
|
assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
|
2006-11-27 01:05:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
FPToUIInst::FPToUIInst(
|
2011-07-18 04:54:35 +00:00
|
|
|
Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
|
2018-07-30 19:41:25 +00:00
|
|
|
) : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
|
2007-01-17 02:46:11 +00:00
|
|
|
assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
|
2006-11-27 01:05:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
FPToUIInst::FPToUIInst(
|
2011-07-18 04:54:35 +00:00
|
|
|
Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
|
2018-07-30 19:41:25 +00:00
|
|
|
) : CastInst(Ty, FPToUI, S, Name, InsertAtEnd) {
|
2007-01-17 02:46:11 +00:00
|
|
|
assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
|
2006-11-27 01:05:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
FPToSIInst::FPToSIInst(
|
2011-07-18 04:54:35 +00:00
|
|
|
Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
|
2018-07-30 19:41:25 +00:00
|
|
|
) : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
|
2007-01-17 02:46:11 +00:00
|
|
|
assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
|
2006-11-27 01:05:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
FPToSIInst::FPToSIInst(
|
2011-07-18 04:54:35 +00:00
|
|
|
Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
|
2018-07-30 19:41:25 +00:00
|
|
|
) : CastInst(Ty, FPToSI, S, Name, InsertAtEnd) {
|
2007-01-17 02:46:11 +00:00
|
|
|
assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
|
2006-11-27 01:05:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
PtrToIntInst::PtrToIntInst(
|
2011-07-18 04:54:35 +00:00
|
|
|
Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
|
2018-07-30 19:41:25 +00:00
|
|
|
) : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
|
2007-01-17 02:46:11 +00:00
|
|
|
assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
|
2006-11-27 01:05:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
PtrToIntInst::PtrToIntInst(
|
2011-07-18 04:54:35 +00:00
|
|
|
Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
|
2018-07-30 19:41:25 +00:00
|
|
|
) : CastInst(Ty, PtrToInt, S, Name, InsertAtEnd) {
|
2007-01-17 02:46:11 +00:00
|
|
|
assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
|
2006-11-27 01:05:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
IntToPtrInst::IntToPtrInst(
|
2011-07-18 04:54:35 +00:00
|
|
|
Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
|
2018-07-30 19:41:25 +00:00
|
|
|
) : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
|
2007-01-17 02:46:11 +00:00
|
|
|
assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
|
2006-11-27 01:05:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
IntToPtrInst::IntToPtrInst(
|
2011-07-18 04:54:35 +00:00
|
|
|
Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
|
2018-07-30 19:41:25 +00:00
|
|
|
) : CastInst(Ty, IntToPtr, S, Name, InsertAtEnd) {
|
2007-01-17 02:46:11 +00:00
|
|
|
assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
|
2006-11-27 01:05:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
BitCastInst::BitCastInst(
|
2011-07-18 04:54:35 +00:00
|
|
|
Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
|
2018-07-30 19:41:25 +00:00
|
|
|
) : CastInst(Ty, BitCast, S, Name, InsertBefore) {
|
2007-01-17 02:46:11 +00:00
|
|
|
assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
|
2006-11-27 01:05:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
BitCastInst::BitCastInst(
|
2011-07-18 04:54:35 +00:00
|
|
|
Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
|
2018-07-30 19:41:25 +00:00
|
|
|
) : CastInst(Ty, BitCast, S, Name, InsertAtEnd) {
|
2007-01-17 02:46:11 +00:00
|
|
|
assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
|
2006-11-27 01:05:10 +00:00
|
|
|
}
|
2004-07-29 12:33:25 +00:00
|
|
|
|
2013-11-15 01:34:59 +00:00
|
|
|
AddrSpaceCastInst::AddrSpaceCastInst(
|
|
|
|
Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
|
|
|
|
) : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
|
|
|
|
assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
|
|
|
|
}
|
|
|
|
|
|
|
|
AddrSpaceCastInst::AddrSpaceCastInst(
|
|
|
|
Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
|
|
|
|
) : CastInst(Ty, AddrSpaceCast, S, Name, InsertAtEnd) {
|
|
|
|
assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
|
|
|
|
}
|
|
|
|
|
2006-11-20 01:22:35 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// CmpInst Classes
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2015-12-15 06:11:33 +00:00
|
|
|
CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS,
|
2018-11-07 00:00:42 +00:00
|
|
|
Value *RHS, const Twine &Name, Instruction *InsertBefore,
|
|
|
|
Instruction *FlagsSource)
|
2008-05-12 20:11:05 +00:00
|
|
|
: Instruction(ty, op,
|
2008-05-10 08:32:32 +00:00
|
|
|
OperandTraits<CmpInst>::op_begin(this),
|
|
|
|
OperandTraits<CmpInst>::operands(this),
|
|
|
|
InsertBefore) {
|
2018-11-07 00:00:42 +00:00
|
|
|
Op<0>() = LHS;
|
|
|
|
Op<1>() = RHS;
|
2009-12-29 02:14:09 +00:00
|
|
|
setPredicate((Predicate)predicate);
|
2007-04-11 13:04:48 +00:00
|
|
|
setName(Name);
|
2018-11-07 00:00:42 +00:00
|
|
|
if (FlagsSource)
|
|
|
|
copyIRFlags(FlagsSource);
|
2008-05-12 19:01:56 +00:00
|
|
|
}
|
|
|
|
|
2015-12-15 06:11:33 +00:00
|
|
|
CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS,
|
|
|
|
Value *RHS, const Twine &Name, BasicBlock *InsertAtEnd)
|
2008-05-12 20:11:05 +00:00
|
|
|
: Instruction(ty, op,
|
2008-05-10 08:32:32 +00:00
|
|
|
OperandTraits<CmpInst>::op_begin(this),
|
|
|
|
OperandTraits<CmpInst>::operands(this),
|
|
|
|
InsertAtEnd) {
|
2008-05-26 21:33:52 +00:00
|
|
|
Op<0>() = LHS;
|
|
|
|
Op<1>() = RHS;
|
2009-12-29 02:14:09 +00:00
|
|
|
setPredicate((Predicate)predicate);
|
2007-04-11 13:04:48 +00:00
|
|
|
setName(Name);
|
2006-11-20 01:22:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
CmpInst *
|
2015-12-15 06:11:33 +00:00
|
|
|
CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2,
|
2009-07-25 04:41:11 +00:00
|
|
|
const Twine &Name, Instruction *InsertBefore) {
|
2006-11-20 01:22:35 +00:00
|
|
|
if (Op == Instruction::ICmp) {
|
2009-07-09 23:48:35 +00:00
|
|
|
if (InsertBefore)
|
|
|
|
return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
|
|
|
|
S1, S2, Name);
|
|
|
|
else
|
2009-08-25 23:17:54 +00:00
|
|
|
return new ICmpInst(CmpInst::Predicate(predicate),
|
2009-07-09 23:48:35 +00:00
|
|
|
S1, S2, Name);
|
2008-05-12 19:01:56 +00:00
|
|
|
}
|
2018-07-30 19:41:25 +00:00
|
|
|
|
2009-07-09 23:48:35 +00:00
|
|
|
if (InsertBefore)
|
|
|
|
return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
|
|
|
|
S1, S2, Name);
|
|
|
|
else
|
2009-08-25 23:17:54 +00:00
|
|
|
return new FCmpInst(CmpInst::Predicate(predicate),
|
2009-07-09 23:48:35 +00:00
|
|
|
S1, S2, Name);
|
2006-11-20 01:22:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
CmpInst *
|
2015-12-15 06:11:33 +00:00
|
|
|
CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2,
|
2009-07-25 04:41:11 +00:00
|
|
|
const Twine &Name, BasicBlock *InsertAtEnd) {
|
2006-11-20 01:22:35 +00:00
|
|
|
if (Op == Instruction::ICmp) {
|
2009-07-09 23:48:35 +00:00
|
|
|
return new ICmpInst(*InsertAtEnd, CmpInst::Predicate(predicate),
|
|
|
|
S1, S2, Name);
|
2008-05-12 19:01:56 +00:00
|
|
|
}
|
2009-07-09 23:48:35 +00:00
|
|
|
return new FCmpInst(*InsertAtEnd, CmpInst::Predicate(predicate),
|
|
|
|
S1, S2, Name);
|
2006-11-20 01:22:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void CmpInst::swapOperands() {
|
|
|
|
if (ICmpInst *IC = dyn_cast<ICmpInst>(this))
|
|
|
|
IC->swapOperands();
|
|
|
|
else
|
|
|
|
cast<FCmpInst>(this)->swapOperands();
|
|
|
|
}
|
|
|
|
|
2011-01-04 12:52:29 +00:00
|
|
|
bool CmpInst::isCommutative() const {
|
|
|
|
if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
|
2006-11-20 01:22:35 +00:00
|
|
|
return IC->isCommutative();
|
|
|
|
return cast<FCmpInst>(this)->isCommutative();
|
|
|
|
}
|
|
|
|
|
2020-11-06 10:36:30 +03:00
|
|
|
bool CmpInst::isEquality(Predicate P) {
|
|
|
|
if (ICmpInst::isIntPredicate(P))
|
|
|
|
return ICmpInst::isEquality(P);
|
|
|
|
if (FCmpInst::isFPPredicate(P))
|
|
|
|
return FCmpInst::isEquality(P);
|
|
|
|
llvm_unreachable("Unsupported predicate kind");
|
2006-11-20 01:22:35 +00:00
|
|
|
}
|
|
|
|
|
2008-05-31 02:47:54 +00:00
|
|
|
CmpInst::Predicate CmpInst::getInversePredicate(Predicate pred) {
|
2006-11-20 01:22:35 +00:00
|
|
|
switch (pred) {
|
2012-02-05 22:14:15 +00:00
|
|
|
default: llvm_unreachable("Unknown cmp predicate!");
|
2006-11-20 01:22:35 +00:00
|
|
|
case ICMP_EQ: return ICMP_NE;
|
|
|
|
case ICMP_NE: return ICMP_EQ;
|
|
|
|
case ICMP_UGT: return ICMP_ULE;
|
|
|
|
case ICMP_ULT: return ICMP_UGE;
|
|
|
|
case ICMP_UGE: return ICMP_ULT;
|
|
|
|
case ICMP_ULE: return ICMP_UGT;
|
|
|
|
case ICMP_SGT: return ICMP_SLE;
|
|
|
|
case ICMP_SLT: return ICMP_SGE;
|
|
|
|
case ICMP_SGE: return ICMP_SLT;
|
|
|
|
case ICMP_SLE: return ICMP_SGT;
|
|
|
|
|
2008-05-31 02:47:54 +00:00
|
|
|
case FCMP_OEQ: return FCMP_UNE;
|
|
|
|
case FCMP_ONE: return FCMP_UEQ;
|
|
|
|
case FCMP_OGT: return FCMP_ULE;
|
|
|
|
case FCMP_OLT: return FCMP_UGE;
|
|
|
|
case FCMP_OGE: return FCMP_ULT;
|
|
|
|
case FCMP_OLE: return FCMP_UGT;
|
|
|
|
case FCMP_UEQ: return FCMP_ONE;
|
|
|
|
case FCMP_UNE: return FCMP_OEQ;
|
|
|
|
case FCMP_UGT: return FCMP_OLE;
|
|
|
|
case FCMP_ULT: return FCMP_OGE;
|
|
|
|
case FCMP_UGE: return FCMP_OLT;
|
|
|
|
case FCMP_ULE: return FCMP_OGT;
|
|
|
|
case FCMP_ORD: return FCMP_UNO;
|
|
|
|
case FCMP_UNO: return FCMP_ORD;
|
|
|
|
case FCMP_TRUE: return FCMP_FALSE;
|
|
|
|
case FCMP_FALSE: return FCMP_TRUE;
|
2006-11-20 01:22:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-17 20:25:25 +00:00
|
|
|
StringRef CmpInst::getPredicateName(Predicate Pred) {
|
|
|
|
switch (Pred) {
|
|
|
|
default: return "unknown";
|
|
|
|
case FCmpInst::FCMP_FALSE: return "false";
|
|
|
|
case FCmpInst::FCMP_OEQ: return "oeq";
|
|
|
|
case FCmpInst::FCMP_OGT: return "ogt";
|
|
|
|
case FCmpInst::FCMP_OGE: return "oge";
|
|
|
|
case FCmpInst::FCMP_OLT: return "olt";
|
|
|
|
case FCmpInst::FCMP_OLE: return "ole";
|
|
|
|
case FCmpInst::FCMP_ONE: return "one";
|
|
|
|
case FCmpInst::FCMP_ORD: return "ord";
|
|
|
|
case FCmpInst::FCMP_UNO: return "uno";
|
|
|
|
case FCmpInst::FCMP_UEQ: return "ueq";
|
|
|
|
case FCmpInst::FCMP_UGT: return "ugt";
|
|
|
|
case FCmpInst::FCMP_UGE: return "uge";
|
|
|
|
case FCmpInst::FCMP_ULT: return "ult";
|
|
|
|
case FCmpInst::FCMP_ULE: return "ule";
|
|
|
|
case FCmpInst::FCMP_UNE: return "une";
|
|
|
|
case FCmpInst::FCMP_TRUE: return "true";
|
|
|
|
case ICmpInst::ICMP_EQ: return "eq";
|
|
|
|
case ICmpInst::ICMP_NE: return "ne";
|
|
|
|
case ICmpInst::ICMP_SGT: return "sgt";
|
|
|
|
case ICmpInst::ICMP_SGE: return "sge";
|
|
|
|
case ICmpInst::ICMP_SLT: return "slt";
|
|
|
|
case ICmpInst::ICMP_SLE: return "sle";
|
|
|
|
case ICmpInst::ICMP_UGT: return "ugt";
|
|
|
|
case ICmpInst::ICMP_UGE: return "uge";
|
|
|
|
case ICmpInst::ICMP_ULT: return "ult";
|
|
|
|
case ICmpInst::ICMP_ULE: return "ule";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-12-23 06:05:41 +00:00
|
|
|
ICmpInst::Predicate ICmpInst::getSignedPredicate(Predicate pred) {
|
|
|
|
switch (pred) {
|
2012-02-05 22:14:15 +00:00
|
|
|
default: llvm_unreachable("Unknown icmp predicate!");
|
2018-07-30 19:41:25 +00:00
|
|
|
case ICMP_EQ: case ICMP_NE:
|
|
|
|
case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
|
2006-12-23 06:05:41 +00:00
|
|
|
return pred;
|
|
|
|
case ICMP_UGT: return ICMP_SGT;
|
|
|
|
case ICMP_ULT: return ICMP_SLT;
|
|
|
|
case ICMP_UGE: return ICMP_SGE;
|
|
|
|
case ICMP_ULE: return ICMP_SLE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-01-28 03:48:02 +00:00
|
|
|
ICmpInst::Predicate ICmpInst::getUnsignedPredicate(Predicate pred) {
|
|
|
|
switch (pred) {
|
2012-02-05 22:14:15 +00:00
|
|
|
default: llvm_unreachable("Unknown icmp predicate!");
|
2018-07-30 19:41:25 +00:00
|
|
|
case ICMP_EQ: case ICMP_NE:
|
|
|
|
case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
|
2008-01-28 03:48:02 +00:00
|
|
|
return pred;
|
|
|
|
case ICMP_SGT: return ICMP_UGT;
|
|
|
|
case ICMP_SLT: return ICMP_ULT;
|
|
|
|
case ICMP_SGE: return ICMP_UGE;
|
|
|
|
case ICMP_SLE: return ICMP_ULE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-09 07:59:07 +00:00
|
|
|
CmpInst::Predicate CmpInst::getFlippedStrictnessPredicate(Predicate pred) {
|
|
|
|
switch (pred) {
|
|
|
|
default: llvm_unreachable("Unknown or unsupported cmp predicate!");
|
|
|
|
case ICMP_SGT: return ICMP_SGE;
|
|
|
|
case ICMP_SLT: return ICMP_SLE;
|
|
|
|
case ICMP_SGE: return ICMP_SGT;
|
|
|
|
case ICMP_SLE: return ICMP_SLT;
|
|
|
|
case ICMP_UGT: return ICMP_UGE;
|
|
|
|
case ICMP_ULT: return ICMP_ULE;
|
|
|
|
case ICMP_UGE: return ICMP_UGT;
|
|
|
|
case ICMP_ULE: return ICMP_ULT;
|
|
|
|
|
|
|
|
case FCMP_OGT: return FCMP_OGE;
|
|
|
|
case FCMP_OLT: return FCMP_OLE;
|
|
|
|
case FCMP_OGE: return FCMP_OGT;
|
|
|
|
case FCMP_OLE: return FCMP_OLT;
|
|
|
|
case FCMP_UGT: return FCMP_UGE;
|
|
|
|
case FCMP_ULT: return FCMP_ULE;
|
|
|
|
case FCMP_UGE: return FCMP_UGT;
|
|
|
|
case FCMP_ULE: return FCMP_ULT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-05-31 02:47:54 +00:00
|
|
|
CmpInst::Predicate CmpInst::getSwappedPredicate(Predicate pred) {
|
2006-11-20 01:22:35 +00:00
|
|
|
switch (pred) {
|
2012-02-05 22:14:15 +00:00
|
|
|
default: llvm_unreachable("Unknown cmp predicate!");
|
2008-05-31 02:47:54 +00:00
|
|
|
case ICMP_EQ: case ICMP_NE:
|
|
|
|
return pred;
|
|
|
|
case ICMP_SGT: return ICMP_SLT;
|
|
|
|
case ICMP_SLT: return ICMP_SGT;
|
|
|
|
case ICMP_SGE: return ICMP_SLE;
|
|
|
|
case ICMP_SLE: return ICMP_SGE;
|
|
|
|
case ICMP_UGT: return ICMP_ULT;
|
|
|
|
case ICMP_ULT: return ICMP_UGT;
|
|
|
|
case ICMP_UGE: return ICMP_ULE;
|
|
|
|
case ICMP_ULE: return ICMP_UGE;
|
2018-07-30 19:41:25 +00:00
|
|
|
|
2006-11-20 01:22:35 +00:00
|
|
|
case FCMP_FALSE: case FCMP_TRUE:
|
|
|
|
case FCMP_OEQ: case FCMP_ONE:
|
|
|
|
case FCMP_UEQ: case FCMP_UNE:
|
|
|
|
case FCMP_ORD: case FCMP_UNO:
|
|
|
|
return pred;
|
|
|
|
case FCMP_OGT: return FCMP_OLT;
|
|
|
|
case FCMP_OLT: return FCMP_OGT;
|
|
|
|
case FCMP_OGE: return FCMP_OLE;
|
|
|
|
case FCMP_OLE: return FCMP_OGE;
|
|
|
|
case FCMP_UGT: return FCMP_ULT;
|
|
|
|
case FCMP_ULT: return FCMP_UGT;
|
|
|
|
case FCMP_UGE: return FCMP_ULE;
|
|
|
|
case FCMP_ULE: return FCMP_UGE;
|
|
|
|
}
|
|
|
|
}
|
2015-10-22 19:57:34 +00:00
|
|
|
|
2018-02-07 11:16:29 +00:00
|
|
|
CmpInst::Predicate CmpInst::getNonStrictPredicate(Predicate pred) {
|
|
|
|
switch (pred) {
|
|
|
|
case ICMP_SGT: return ICMP_SGE;
|
|
|
|
case ICMP_SLT: return ICMP_SLE;
|
|
|
|
case ICMP_UGT: return ICMP_UGE;
|
|
|
|
case ICMP_ULT: return ICMP_ULE;
|
|
|
|
case FCMP_OGT: return FCMP_OGE;
|
|
|
|
case FCMP_OLT: return FCMP_OLE;
|
|
|
|
case FCMP_UGT: return FCMP_UGE;
|
|
|
|
case FCMP_ULT: return FCMP_ULE;
|
|
|
|
default: return pred;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-22 19:57:34 +00:00
|
|
|
CmpInst::Predicate CmpInst::getSignedPredicate(Predicate pred) {
|
2020-11-06 09:52:54 +03:00
|
|
|
assert(CmpInst::isUnsigned(pred) && "Call only with unsigned predicates!");
|
2015-10-22 19:57:34 +00:00
|
|
|
|
|
|
|
switch (pred) {
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unknown predicate!");
|
|
|
|
case CmpInst::ICMP_ULT:
|
|
|
|
return CmpInst::ICMP_SLT;
|
|
|
|
case CmpInst::ICMP_ULE:
|
|
|
|
return CmpInst::ICMP_SLE;
|
|
|
|
case CmpInst::ICMP_UGT:
|
|
|
|
return CmpInst::ICMP_SGT;
|
|
|
|
case CmpInst::ICMP_UGE:
|
|
|
|
return CmpInst::ICMP_SGE;
|
|
|
|
}
|
|
|
|
}
|
2006-11-20 01:22:35 +00:00
|
|
|
|
2020-11-06 09:52:54 +03:00
|
|
|
CmpInst::Predicate CmpInst::getUnsignedPredicate(Predicate pred) {
|
|
|
|
assert(CmpInst::isSigned(pred) && "Call only with signed predicates!");
|
|
|
|
|
|
|
|
switch (pred) {
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unknown predicate!");
|
|
|
|
case CmpInst::ICMP_SLT:
|
|
|
|
return CmpInst::ICMP_ULT;
|
|
|
|
case CmpInst::ICMP_SLE:
|
|
|
|
return CmpInst::ICMP_ULE;
|
|
|
|
case CmpInst::ICMP_SGT:
|
|
|
|
return CmpInst::ICMP_UGT;
|
|
|
|
case CmpInst::ICMP_SGE:
|
|
|
|
return CmpInst::ICMP_UGE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-15 06:11:33 +00:00
|
|
|
bool CmpInst::isUnsigned(Predicate predicate) {
|
2006-12-23 06:05:41 +00:00
|
|
|
switch (predicate) {
|
|
|
|
default: return false;
|
2018-07-30 19:41:25 +00:00
|
|
|
case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: case ICmpInst::ICMP_UGT:
|
2006-12-23 06:05:41 +00:00
|
|
|
case ICmpInst::ICMP_UGE: return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-15 06:11:33 +00:00
|
|
|
bool CmpInst::isSigned(Predicate predicate) {
|
2006-12-23 06:05:41 +00:00
|
|
|
switch (predicate) {
|
|
|
|
default: return false;
|
2018-07-30 19:41:25 +00:00
|
|
|
case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: case ICmpInst::ICMP_SGT:
|
2006-12-23 06:05:41 +00:00
|
|
|
case ICmpInst::ICMP_SGE: return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-06 11:14:03 +03:00
|
|
|
CmpInst::Predicate CmpInst::getFlippedSignednessPredicate(Predicate pred) {
|
|
|
|
assert(CmpInst::isRelational(pred) &&
|
|
|
|
"Call only with non-equality predicates!");
|
|
|
|
|
|
|
|
if (isSigned(pred))
|
|
|
|
return getUnsignedPredicate(pred);
|
|
|
|
if (isUnsigned(pred))
|
|
|
|
return getSignedPredicate(pred);
|
|
|
|
|
|
|
|
llvm_unreachable("Unknown predicate!");
|
|
|
|
}
|
|
|
|
|
2015-12-15 06:11:33 +00:00
|
|
|
bool CmpInst::isOrdered(Predicate predicate) {
|
2006-12-23 06:05:41 +00:00
|
|
|
switch (predicate) {
|
|
|
|
default: return false;
|
2018-07-30 19:41:25 +00:00
|
|
|
case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_OGT:
|
|
|
|
case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_OLE:
|
2006-12-23 06:05:41 +00:00
|
|
|
case FCmpInst::FCMP_ORD: return true;
|
|
|
|
}
|
|
|
|
}
|
2018-07-30 19:41:25 +00:00
|
|
|
|
2015-12-15 06:11:33 +00:00
|
|
|
bool CmpInst::isUnordered(Predicate predicate) {
|
2006-12-23 06:05:41 +00:00
|
|
|
switch (predicate) {
|
|
|
|
default: return false;
|
2018-07-30 19:41:25 +00:00
|
|
|
case FCmpInst::FCMP_UEQ: case FCmpInst::FCMP_UNE: case FCmpInst::FCMP_UGT:
|
|
|
|
case FCmpInst::FCMP_ULT: case FCmpInst::FCMP_UGE: case FCmpInst::FCMP_ULE:
|
2006-12-23 06:05:41 +00:00
|
|
|
case FCmpInst::FCMP_UNO: return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-15 06:11:33 +00:00
|
|
|
bool CmpInst::isTrueWhenEqual(Predicate predicate) {
|
2009-10-25 03:50:03 +00:00
|
|
|
switch(predicate) {
|
|
|
|
default: return false;
|
|
|
|
case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE:
|
|
|
|
case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-15 06:11:33 +00:00
|
|
|
bool CmpInst::isFalseWhenEqual(Predicate predicate) {
|
2009-10-25 03:50:03 +00:00
|
|
|
switch(predicate) {
|
|
|
|
case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT:
|
|
|
|
case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true;
|
|
|
|
default: return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-21 16:18:02 +00:00
|
|
|
bool CmpInst::isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2) {
|
2016-04-21 14:04:54 +00:00
|
|
|
// If the predicates match, then we know the first condition implies the
|
|
|
|
// second is true.
|
|
|
|
if (Pred1 == Pred2)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
switch (Pred1) {
|
|
|
|
default:
|
|
|
|
break;
|
2016-04-22 17:57:34 +00:00
|
|
|
case ICMP_EQ:
|
2016-04-25 13:25:14 +00:00
|
|
|
// A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true.
|
2016-04-22 17:57:34 +00:00
|
|
|
return Pred2 == ICMP_UGE || Pred2 == ICMP_ULE || Pred2 == ICMP_SGE ||
|
|
|
|
Pred2 == ICMP_SLE;
|
2016-04-22 17:14:12 +00:00
|
|
|
case ICMP_UGT: // A >u B implies A != B and A >=u B are true.
|
|
|
|
return Pred2 == ICMP_NE || Pred2 == ICMP_UGE;
|
|
|
|
case ICMP_ULT: // A <u B implies A != B and A <=u B are true.
|
|
|
|
return Pred2 == ICMP_NE || Pred2 == ICMP_ULE;
|
|
|
|
case ICMP_SGT: // A >s B implies A != B and A >=s B are true.
|
|
|
|
return Pred2 == ICMP_NE || Pred2 == ICMP_SGE;
|
|
|
|
case ICMP_SLT: // A <s B implies A != B and A <=s B are true.
|
|
|
|
return Pred2 == ICMP_NE || Pred2 == ICMP_SLE;
|
2016-04-21 14:04:54 +00:00
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-04-21 16:18:02 +00:00
|
|
|
bool CmpInst::isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2) {
|
2016-04-22 17:57:34 +00:00
|
|
|
return isImpliedTrueByMatchingCmp(Pred1, getInversePredicate(Pred2));
|
2016-04-21 14:04:54 +00:00
|
|
|
}
|
2009-10-25 03:50:03 +00:00
|
|
|
|
2004-07-29 12:33:25 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// SwitchInst Implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2010-11-17 05:41:46 +00:00
|
|
|
void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
|
|
|
|
assert(Value && Default && NumReserved);
|
|
|
|
ReservedSpace = NumReserved;
|
2015-06-12 17:48:10 +00:00
|
|
|
setNumHungOffUseOperands(2);
|
2015-06-10 22:38:46 +00:00
|
|
|
allocHungoffUses(ReservedSpace);
|
2005-01-29 00:35:16 +00:00
|
|
|
|
2015-05-21 22:48:54 +00:00
|
|
|
Op<0>() = Value;
|
|
|
|
Op<1>() = Default;
|
2004-07-29 12:33:25 +00:00
|
|
|
}
|
|
|
|
|
2007-02-24 00:55:48 +00:00
|
|
|
/// SwitchInst ctor - Create a new switch instruction, specifying a value to
|
|
|
|
/// switch on and a default destination. The number of additional cases can
|
|
|
|
/// be specified here to make memory allocation more efficient. This
|
|
|
|
/// constructor can also autoinsert before another instruction.
|
|
|
|
SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
|
|
|
|
Instruction *InsertBefore)
|
2018-10-19 00:22:37 +00:00
|
|
|
: Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
|
|
|
|
nullptr, 0, InsertBefore) {
|
2010-11-17 05:41:46 +00:00
|
|
|
init(Value, Default, 2+NumCases*2);
|
2007-02-24 00:55:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// SwitchInst ctor - Create a new switch instruction, specifying a value to
|
|
|
|
/// switch on and a default destination. The number of additional cases can
|
|
|
|
/// be specified here to make memory allocation more efficient. This
|
|
|
|
/// constructor also autoinserts at the end of the specified BasicBlock.
|
|
|
|
SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
|
|
|
|
BasicBlock *InsertAtEnd)
|
2018-10-19 00:22:37 +00:00
|
|
|
: Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
|
|
|
|
nullptr, 0, InsertAtEnd) {
|
2010-11-17 05:41:46 +00:00
|
|
|
init(Value, Default, 2+NumCases*2);
|
2007-02-24 00:55:48 +00:00
|
|
|
}
|
|
|
|
|
2005-04-21 23:48:37 +00:00
|
|
|
SwitchInst::SwitchInst(const SwitchInst &SI)
|
2018-10-19 00:22:37 +00:00
|
|
|
: Instruction(SI.getType(), Instruction::Switch, nullptr, 0) {
|
2010-11-17 05:41:46 +00:00
|
|
|
init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands());
|
2015-06-12 17:48:10 +00:00
|
|
|
setNumHungOffUseOperands(SI.getNumOperands());
|
2015-06-12 17:48:05 +00:00
|
|
|
Use *OL = getOperandList();
|
|
|
|
const Use *InOL = SI.getOperandList();
|
2010-11-17 05:41:46 +00:00
|
|
|
for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) {
|
2008-05-26 21:33:52 +00:00
|
|
|
OL[i] = InOL[i];
|
|
|
|
OL[i+1] = InOL[i+1];
|
2004-07-29 12:33:25 +00:00
|
|
|
}
|
2009-08-25 22:11:20 +00:00
|
|
|
SubclassOptionalData = SI.SubclassOptionalData;
|
2004-07-29 12:33:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// addCase - Add an entry to the switch instruction...
|
|
|
|
///
|
2005-02-24 05:32:09 +00:00
|
|
|
void SwitchInst::addCase(ConstantInt *OnVal, BasicBlock *Dest) {
|
2015-06-12 17:48:10 +00:00
|
|
|
unsigned NewCaseIdx = getNumCases();
|
|
|
|
unsigned OpNo = getNumOperands();
|
2005-01-29 00:35:16 +00:00
|
|
|
if (OpNo+2 > ReservedSpace)
|
2011-04-01 08:00:58 +00:00
|
|
|
growOperands(); // Get more space!
|
2005-01-29 00:35:16 +00:00
|
|
|
// Initialize some new operands.
|
2005-01-29 01:05:12 +00:00
|
|
|
assert(OpNo+1 < ReservedSpace && "Growing didn't work!");
|
2015-06-12 17:48:10 +00:00
|
|
|
setNumHungOffUseOperands(OpNo+2);
|
2017-04-12 07:27:28 +00:00
|
|
|
CaseHandle Case(this, NewCaseIdx);
|
Revert patches to add case-range support for PR1255.
The work on this project was left in an unfinished and inconsistent state.
Hopefully someone will eventually get a chance to implement this feature, but
in the meantime, it is better to put things back the way the were. I have
left support in the bitcode reader to handle the case-range bitcode format,
so that we do not lose bitcode compatibility with the llvm 3.3 release.
This reverts the following commits: 155464, 156374, 156377, 156613, 156704,
156757, 156804 156808, 156985, 157046, 157112, 157183, 157315, 157384, 157575,
157576, 157586, 157612, 157810, 157814, 157815, 157880, 157881, 157882, 157884,
157887, 157901, 158979, 157987, 157989, 158986, 158997, 159076, 159101, 159100,
159200, 159201, 159207, 159527, 159532, 159540, 159583, 159618, 159658, 159659,
159660, 159661, 159703, 159704, 160076, 167356, 172025, 186736
llvm-svn: 190328
2013-09-09 19:14:35 +00:00
|
|
|
Case.setValue(OnVal);
|
2012-03-08 07:06:20 +00:00
|
|
|
Case.setSuccessor(Dest);
|
2004-07-29 12:33:25 +00:00
|
|
|
}
|
|
|
|
|
SwitchInst refactoring.
The purpose of refactoring is to hide operand roles from SwitchInst user (programmer). If you want to play with operands directly, probably you will need lower level methods than SwitchInst ones (TerminatorInst or may be User). After this patch we can reorganize SwitchInst operands and successors as we want.
What was done:
1. Changed semantics of index inside the getCaseValue method:
getCaseValue(0) means "get first case", not a condition. Use getCondition() if you want to resolve the condition. I propose don't mix SwitchInst case indexing with low level indexing (TI successors indexing, User's operands indexing), since it may be dangerous.
2. By the same reason findCaseValue(ConstantInt*) returns actual number of case value. 0 means first case, not default. If there is no case with given value, ErrorIndex will returned.
3. Added getCaseSuccessor method. I propose to avoid usage of TerminatorInst::getSuccessor if you want to resolve case successor BB. Use getCaseSuccessor instead, since internal SwitchInst organization of operands/successors is hidden and may be changed in any moment.
4. Added resolveSuccessorIndex and resolveCaseIndex. The main purpose of these methods is to see how case successors are really mapped in TerminatorInst.
4.1 "resolveSuccessorIndex" was created if you need to level down from SwitchInst to TerminatorInst. It returns TerminatorInst's successor index for given case successor.
4.2 "resolveCaseIndex" converts low level successors index to case index that curresponds to the given successor.
Note: There are also related compatability fix patches for dragonegg, klee, llvm-gcc-4.0, llvm-gcc-4.2, safecode, clang.
llvm-svn: 149481
2012-02-01 07:49:51 +00:00
|
|
|
/// removeCase - This method removes the specified case and its successor
|
|
|
|
/// from the switch instruction.
|
2017-04-12 07:27:28 +00:00
|
|
|
SwitchInst::CaseIt SwitchInst::removeCase(CaseIt I) {
|
|
|
|
unsigned idx = I->getCaseIndex();
|
|
|
|
|
SwitchInst refactoring.
The purpose of refactoring is to hide operand roles from SwitchInst user (programmer). If you want to play with operands directly, probably you will need lower level methods than SwitchInst ones (TerminatorInst or may be User). After this patch we can reorganize SwitchInst operands and successors as we want.
What was done:
1. Changed semantics of index inside the getCaseValue method:
getCaseValue(0) means "get first case", not a condition. Use getCondition() if you want to resolve the condition. I propose don't mix SwitchInst case indexing with low level indexing (TI successors indexing, User's operands indexing), since it may be dangerous.
2. By the same reason findCaseValue(ConstantInt*) returns actual number of case value. 0 means first case, not default. If there is no case with given value, ErrorIndex will returned.
3. Added getCaseSuccessor method. I propose to avoid usage of TerminatorInst::getSuccessor if you want to resolve case successor BB. Use getCaseSuccessor instead, since internal SwitchInst organization of operands/successors is hidden and may be changed in any moment.
4. Added resolveSuccessorIndex and resolveCaseIndex. The main purpose of these methods is to see how case successors are really mapped in TerminatorInst.
4.1 "resolveSuccessorIndex" was created if you need to level down from SwitchInst to TerminatorInst. It returns TerminatorInst's successor index for given case successor.
4.2 "resolveCaseIndex" converts low level successors index to case index that curresponds to the given successor.
Note: There are also related compatability fix patches for dragonegg, klee, llvm-gcc-4.0, llvm-gcc-4.2, safecode, clang.
llvm-svn: 149481
2012-02-01 07:49:51 +00:00
|
|
|
assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!");
|
2005-01-29 00:35:16 +00:00
|
|
|
|
|
|
|
unsigned NumOps = getNumOperands();
|
2015-06-12 17:48:05 +00:00
|
|
|
Use *OL = getOperandList();
|
2005-01-29 00:35:16 +00:00
|
|
|
|
2011-02-01 09:22:34 +00:00
|
|
|
// Overwrite this case with the end of the list.
|
SwitchInst refactoring.
The purpose of refactoring is to hide operand roles from SwitchInst user (programmer). If you want to play with operands directly, probably you will need lower level methods than SwitchInst ones (TerminatorInst or may be User). After this patch we can reorganize SwitchInst operands and successors as we want.
What was done:
1. Changed semantics of index inside the getCaseValue method:
getCaseValue(0) means "get first case", not a condition. Use getCondition() if you want to resolve the condition. I propose don't mix SwitchInst case indexing with low level indexing (TI successors indexing, User's operands indexing), since it may be dangerous.
2. By the same reason findCaseValue(ConstantInt*) returns actual number of case value. 0 means first case, not default. If there is no case with given value, ErrorIndex will returned.
3. Added getCaseSuccessor method. I propose to avoid usage of TerminatorInst::getSuccessor if you want to resolve case successor BB. Use getCaseSuccessor instead, since internal SwitchInst organization of operands/successors is hidden and may be changed in any moment.
4. Added resolveSuccessorIndex and resolveCaseIndex. The main purpose of these methods is to see how case successors are really mapped in TerminatorInst.
4.1 "resolveSuccessorIndex" was created if you need to level down from SwitchInst to TerminatorInst. It returns TerminatorInst's successor index for given case successor.
4.2 "resolveCaseIndex" converts low level successors index to case index that curresponds to the given successor.
Note: There are also related compatability fix patches for dragonegg, klee, llvm-gcc-4.0, llvm-gcc-4.2, safecode, clang.
llvm-svn: 149481
2012-02-01 07:49:51 +00:00
|
|
|
if (2 + (idx + 1) * 2 != NumOps) {
|
|
|
|
OL[2 + idx * 2] = OL[NumOps - 2];
|
|
|
|
OL[2 + idx * 2 + 1] = OL[NumOps - 1];
|
2005-01-29 00:35:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Nuke the last value.
|
2014-04-09 06:08:46 +00:00
|
|
|
OL[NumOps-2].set(nullptr);
|
|
|
|
OL[NumOps-2+1].set(nullptr);
|
2015-06-12 17:48:10 +00:00
|
|
|
setNumHungOffUseOperands(NumOps-2);
|
2017-03-26 02:49:23 +00:00
|
|
|
|
|
|
|
return CaseIt(this, idx);
|
2005-01-29 00:35:16 +00:00
|
|
|
}
|
|
|
|
|
2011-04-01 08:00:58 +00:00
|
|
|
/// growOperands - grow operands - This grows the operand list in response
|
|
|
|
/// to a push_back style of operation. This grows the number of ops by 3 times.
|
2005-01-29 00:35:16 +00:00
|
|
|
///
|
2011-04-01 08:00:58 +00:00
|
|
|
void SwitchInst::growOperands() {
|
2008-05-10 08:32:32 +00:00
|
|
|
unsigned e = getNumOperands();
|
2011-04-01 08:00:58 +00:00
|
|
|
unsigned NumOps = e*3;
|
2005-01-29 00:35:16 +00:00
|
|
|
|
|
|
|
ReservedSpace = NumOps;
|
2015-06-10 22:38:41 +00:00
|
|
|
growHungoffUses(ReservedSpace);
|
2005-01-29 00:35:16 +00:00
|
|
|
}
|
|
|
|
|
2019-05-24 04:34:23 +00:00
|
|
|
MDNode *
|
|
|
|
SwitchInstProfUpdateWrapper::getProfBranchWeightsMD(const SwitchInst &SI) {
|
|
|
|
if (MDNode *ProfileData = SI.getMetadata(LLVMContext::MD_prof))
|
|
|
|
if (auto *MDName = dyn_cast<MDString>(ProfileData->getOperand(0)))
|
|
|
|
if (MDName->getString() == "branch_weights")
|
|
|
|
return ProfileData;
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
MDNode *SwitchInstProfUpdateWrapper::buildProfBranchWeightsMD() {
|
2019-09-12 03:41:34 +00:00
|
|
|
assert(Changed && "called only if metadata has changed");
|
2019-05-24 04:34:23 +00:00
|
|
|
|
|
|
|
if (!Weights)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
assert(SI.getNumSuccessors() == Weights->size() &&
|
|
|
|
"num of prof branch_weights must accord with num of successors");
|
|
|
|
|
|
|
|
bool AllZeroes =
|
2020-10-31 00:15:46 -07:00
|
|
|
all_of(Weights.getValue(), [](uint32_t W) { return W == 0; });
|
2019-05-24 04:34:23 +00:00
|
|
|
|
|
|
|
if (AllZeroes || Weights.getValue().size() < 2)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
return MDBuilder(SI.getParent()->getContext()).createBranchWeights(*Weights);
|
|
|
|
}
|
|
|
|
|
2019-06-04 09:03:39 +00:00
|
|
|
void SwitchInstProfUpdateWrapper::init() {
|
2019-05-24 04:34:23 +00:00
|
|
|
MDNode *ProfileData = getProfBranchWeightsMD(SI);
|
2019-09-12 03:41:34 +00:00
|
|
|
if (!ProfileData)
|
2019-06-04 09:03:39 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (ProfileData->getNumOperands() != SI.getNumSuccessors() + 1) {
|
2019-09-12 03:41:34 +00:00
|
|
|
llvm_unreachable("number of prof branch_weights metadata operands does "
|
|
|
|
"not correspond to number of succesors");
|
2019-06-04 09:03:39 +00:00
|
|
|
}
|
2019-05-24 04:34:23 +00:00
|
|
|
|
2020-10-31 00:15:46 -07:00
|
|
|
SmallVector<uint32_t, 8> Weights;
|
2019-05-24 04:34:23 +00:00
|
|
|
for (unsigned CI = 1, CE = SI.getNumSuccessors(); CI <= CE; ++CI) {
|
|
|
|
ConstantInt *C = mdconst::extract<ConstantInt>(ProfileData->getOperand(CI));
|
2020-10-31 00:15:46 -07:00
|
|
|
uint32_t CW = C->getValue().getZExtValue();
|
2019-05-24 04:34:23 +00:00
|
|
|
Weights.push_back(CW);
|
|
|
|
}
|
2019-06-04 09:03:39 +00:00
|
|
|
this->Weights = std::move(Weights);
|
2019-05-24 04:34:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
SwitchInst::CaseIt
|
|
|
|
SwitchInstProfUpdateWrapper::removeCase(SwitchInst::CaseIt I) {
|
|
|
|
if (Weights) {
|
|
|
|
assert(SI.getNumSuccessors() == Weights->size() &&
|
|
|
|
"num of prof branch_weights must accord with num of successors");
|
2019-09-12 03:41:34 +00:00
|
|
|
Changed = true;
|
2019-05-24 04:34:23 +00:00
|
|
|
// Copy the last case to the place of the removed one and shrink.
|
|
|
|
// This is tightly coupled with the way SwitchInst::removeCase() removes
|
|
|
|
// the cases in SwitchInst::removeCase(CaseIt).
|
|
|
|
Weights.getValue()[I->getCaseIndex() + 1] = Weights.getValue().back();
|
|
|
|
Weights.getValue().pop_back();
|
|
|
|
}
|
|
|
|
return SI.removeCase(I);
|
|
|
|
}
|
|
|
|
|
|
|
|
void SwitchInstProfUpdateWrapper::addCase(
|
|
|
|
ConstantInt *OnVal, BasicBlock *Dest,
|
|
|
|
SwitchInstProfUpdateWrapper::CaseWeightOpt W) {
|
|
|
|
SI.addCase(OnVal, Dest);
|
|
|
|
|
|
|
|
if (!Weights && W && *W) {
|
2019-09-12 03:41:34 +00:00
|
|
|
Changed = true;
|
2020-10-31 00:15:46 -07:00
|
|
|
Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
|
2019-05-24 04:34:23 +00:00
|
|
|
Weights.getValue()[SI.getNumSuccessors() - 1] = *W;
|
|
|
|
} else if (Weights) {
|
2019-09-12 03:41:34 +00:00
|
|
|
Changed = true;
|
2019-05-24 04:34:23 +00:00
|
|
|
Weights.getValue().push_back(W ? *W : 0);
|
|
|
|
}
|
|
|
|
if (Weights)
|
|
|
|
assert(SI.getNumSuccessors() == Weights->size() &&
|
|
|
|
"num of prof branch_weights must accord with num of successors");
|
|
|
|
}
|
|
|
|
|
|
|
|
SymbolTableList<Instruction>::iterator
|
|
|
|
SwitchInstProfUpdateWrapper::eraseFromParent() {
|
|
|
|
// Instruction is erased. Mark as unchanged to not touch it in the destructor.
|
2019-09-12 03:41:34 +00:00
|
|
|
Changed = false;
|
|
|
|
if (Weights)
|
|
|
|
Weights->resize(0);
|
2019-05-24 04:34:23 +00:00
|
|
|
return SI.eraseFromParent();
|
|
|
|
}
|
|
|
|
|
|
|
|
SwitchInstProfUpdateWrapper::CaseWeightOpt
|
|
|
|
SwitchInstProfUpdateWrapper::getSuccessorWeight(unsigned idx) {
|
|
|
|
if (!Weights)
|
|
|
|
return None;
|
|
|
|
return Weights.getValue()[idx];
|
|
|
|
}
|
|
|
|
|
|
|
|
void SwitchInstProfUpdateWrapper::setSuccessorWeight(
|
|
|
|
unsigned idx, SwitchInstProfUpdateWrapper::CaseWeightOpt W) {
|
2019-09-12 03:41:34 +00:00
|
|
|
if (!W)
|
2019-05-24 04:34:23 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (!Weights && *W)
|
2020-10-31 00:15:46 -07:00
|
|
|
Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
|
2019-05-24 04:34:23 +00:00
|
|
|
|
|
|
|
if (Weights) {
|
|
|
|
auto &OldW = Weights.getValue()[idx];
|
|
|
|
if (*W != OldW) {
|
2019-09-12 03:41:34 +00:00
|
|
|
Changed = true;
|
2019-05-24 04:34:23 +00:00
|
|
|
OldW = *W;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
SwitchInstProfUpdateWrapper::CaseWeightOpt
|
|
|
|
SwitchInstProfUpdateWrapper::getSuccessorWeight(const SwitchInst &SI,
|
|
|
|
unsigned idx) {
|
|
|
|
if (MDNode *ProfileData = getProfBranchWeightsMD(SI))
|
2019-06-04 09:03:39 +00:00
|
|
|
if (ProfileData->getNumOperands() == SI.getNumSuccessors() + 1)
|
|
|
|
return mdconst::extract<ConstantInt>(ProfileData->getOperand(idx + 1))
|
|
|
|
->getValue()
|
|
|
|
.getZExtValue();
|
2019-05-24 04:34:23 +00:00
|
|
|
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
2009-10-27 19:13:16 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
2011-01-16 15:30:52 +00:00
|
|
|
// IndirectBrInst Implementation
|
2009-10-27 19:13:16 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2009-10-28 00:19:10 +00:00
|
|
|
void IndirectBrInst::init(Value *Address, unsigned NumDests) {
|
2010-02-16 11:11:14 +00:00
|
|
|
assert(Address && Address->getType()->isPointerTy() &&
|
2009-10-29 05:53:32 +00:00
|
|
|
"Address of indirectbr must be a pointer");
|
2009-10-27 19:13:16 +00:00
|
|
|
ReservedSpace = 1+NumDests;
|
2015-06-12 17:48:10 +00:00
|
|
|
setNumHungOffUseOperands(1);
|
2015-06-10 22:38:46 +00:00
|
|
|
allocHungoffUses(ReservedSpace);
|
|
|
|
|
2015-05-21 22:48:54 +00:00
|
|
|
Op<0>() = Address;
|
2009-10-27 19:13:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-04-01 08:00:58 +00:00
|
|
|
/// growOperands - grow operands - This grows the operand list in response
|
|
|
|
/// to a push_back style of operation. This grows the number of ops by 2 times.
|
2009-10-27 19:13:16 +00:00
|
|
|
///
|
2011-04-01 08:00:58 +00:00
|
|
|
void IndirectBrInst::growOperands() {
|
2009-10-27 19:13:16 +00:00
|
|
|
unsigned e = getNumOperands();
|
2011-04-01 08:00:58 +00:00
|
|
|
unsigned NumOps = e*2;
|
2018-07-30 19:41:25 +00:00
|
|
|
|
2009-10-27 19:13:16 +00:00
|
|
|
ReservedSpace = NumOps;
|
2015-06-10 22:38:41 +00:00
|
|
|
growHungoffUses(ReservedSpace);
|
2009-10-27 19:13:16 +00:00
|
|
|
}
|
|
|
|
|
2009-10-28 00:19:10 +00:00
|
|
|
IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
|
|
|
|
Instruction *InsertBefore)
|
2018-10-19 00:22:37 +00:00
|
|
|
: Instruction(Type::getVoidTy(Address->getContext()),
|
|
|
|
Instruction::IndirectBr, nullptr, 0, InsertBefore) {
|
2009-10-27 19:13:16 +00:00
|
|
|
init(Address, NumCases);
|
|
|
|
}
|
|
|
|
|
2009-10-28 00:19:10 +00:00
|
|
|
IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
|
|
|
|
BasicBlock *InsertAtEnd)
|
2018-10-19 00:22:37 +00:00
|
|
|
: Instruction(Type::getVoidTy(Address->getContext()),
|
|
|
|
Instruction::IndirectBr, nullptr, 0, InsertAtEnd) {
|
2009-10-27 19:13:16 +00:00
|
|
|
init(Address, NumCases);
|
|
|
|
}
|
|
|
|
|
2009-10-28 00:19:10 +00:00
|
|
|
IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI)
|
2018-10-19 00:22:37 +00:00
|
|
|
: Instruction(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr,
|
|
|
|
nullptr, IBI.getNumOperands()) {
|
2015-06-10 22:38:46 +00:00
|
|
|
allocHungoffUses(IBI.getNumOperands());
|
2015-06-12 17:48:05 +00:00
|
|
|
Use *OL = getOperandList();
|
|
|
|
const Use *InOL = IBI.getOperandList();
|
2009-10-27 19:13:16 +00:00
|
|
|
for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i)
|
|
|
|
OL[i] = InOL[i];
|
|
|
|
SubclassOptionalData = IBI.SubclassOptionalData;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// addDestination - Add a destination.
|
|
|
|
///
|
2009-10-28 00:19:10 +00:00
|
|
|
void IndirectBrInst::addDestination(BasicBlock *DestBB) {
|
2015-06-12 17:48:10 +00:00
|
|
|
unsigned OpNo = getNumOperands();
|
2009-10-27 19:13:16 +00:00
|
|
|
if (OpNo+1 > ReservedSpace)
|
2011-04-01 08:00:58 +00:00
|
|
|
growOperands(); // Get more space!
|
2009-10-27 19:13:16 +00:00
|
|
|
// Initialize some new operands.
|
|
|
|
assert(OpNo < ReservedSpace && "Growing didn't work!");
|
2015-06-12 17:48:10 +00:00
|
|
|
setNumHungOffUseOperands(OpNo+1);
|
2015-06-12 17:48:05 +00:00
|
|
|
getOperandList()[OpNo] = DestBB;
|
2009-10-27 19:13:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// removeDestination - This method removes the specified successor from the
|
2009-10-28 00:19:10 +00:00
|
|
|
/// indirectbr instruction.
|
|
|
|
void IndirectBrInst::removeDestination(unsigned idx) {
|
2009-10-27 19:13:16 +00:00
|
|
|
assert(idx < getNumOperands()-1 && "Successor index out of range!");
|
2018-07-30 19:41:25 +00:00
|
|
|
|
2009-10-27 19:13:16 +00:00
|
|
|
unsigned NumOps = getNumOperands();
|
2015-06-12 17:48:05 +00:00
|
|
|
Use *OL = getOperandList();
|
2009-10-27 19:13:16 +00:00
|
|
|
|
|
|
|
// Replace this value with the last one.
|
|
|
|
OL[idx+1] = OL[NumOps-1];
|
2018-07-30 19:41:25 +00:00
|
|
|
|
2009-10-27 19:13:16 +00:00
|
|
|
// Nuke the last value.
|
2014-04-09 06:08:46 +00:00
|
|
|
OL[NumOps-1].set(nullptr);
|
2015-06-12 17:48:10 +00:00
|
|
|
setNumHungOffUseOperands(NumOps-1);
|
2009-10-27 19:13:16 +00:00
|
|
|
}
|
|
|
|
|
[IR] Redefine Freeze instruction
Summary:
This patch redefines freeze instruction from being UnaryOperator to a subclass of UnaryInstruction.
ConstantExpr freeze is removed, as discussed in the previous review.
FreezeOperator is not added because there's no ConstantExpr freeze.
`freeze i8* null` test is added to `test/Bindings/llvm-c/freeze.ll` as well, because the null pointer-related bug in `tools/llvm-c/echo.cpp` is now fixed.
InstVisitor has visitFreeze now because freeze is not unaryop anymore.
Reviewers: whitequark, deadalnix, craig.topper, jdoerfert, lebedev.ri
Reviewed By: craig.topper, lebedev.ri
Subscribers: regehr, nlopes, mehdi_amini, hiraditya, steven_wu, dexonsmith, jfb, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D69932
2019-11-07 01:17:49 +09:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// FreezeInst Implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
FreezeInst::FreezeInst(Value *S,
|
|
|
|
const Twine &Name, Instruction *InsertBefore)
|
|
|
|
: UnaryInstruction(S->getType(), Freeze, S, InsertBefore) {
|
|
|
|
setName(Name);
|
|
|
|
}
|
|
|
|
|
|
|
|
FreezeInst::FreezeInst(Value *S,
|
|
|
|
const Twine &Name, BasicBlock *InsertAtEnd)
|
|
|
|
: UnaryInstruction(S->getType(), Freeze, S, InsertAtEnd) {
|
|
|
|
setName(Name);
|
|
|
|
}
|
|
|
|
|
2009-10-27 19:13:16 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
2015-06-24 20:22:23 +00:00
|
|
|
// cloneImpl() implementations
|
2009-10-27 19:13:16 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2004-10-15 23:52:53 +00:00
|
|
|
// Define these methods here so vtables don't get emitted into every translation
|
|
|
|
// unit that uses these classes.
|
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
GetElementPtrInst *GetElementPtrInst::cloneImpl() const {
|
2009-10-27 22:16:29 +00:00
|
|
|
return new (getNumOperands()) GetElementPtrInst(*this);
|
2004-10-15 23:52:53 +00:00
|
|
|
}
|
|
|
|
|
2018-11-13 18:15:47 +00:00
|
|
|
UnaryOperator *UnaryOperator::cloneImpl() const {
|
|
|
|
return Create(getOpcode(), Op<0>());
|
|
|
|
}
|
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
BinaryOperator *BinaryOperator::cloneImpl() const {
|
2009-10-27 22:16:29 +00:00
|
|
|
return Create(getOpcode(), Op<0>(), Op<1>());
|
2004-10-15 23:52:53 +00:00
|
|
|
}
|
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
FCmpInst *FCmpInst::cloneImpl() const {
|
2009-10-27 22:16:29 +00:00
|
|
|
return new FCmpInst(getPredicate(), Op<0>(), Op<1>());
|
2006-11-20 01:22:35 +00:00
|
|
|
}
|
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
ICmpInst *ICmpInst::cloneImpl() const {
|
2009-10-27 22:16:29 +00:00
|
|
|
return new ICmpInst(getPredicate(), Op<0>(), Op<1>());
|
2008-05-23 00:36:11 +00:00
|
|
|
}
|
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
ExtractValueInst *ExtractValueInst::cloneImpl() const {
|
2009-10-27 22:16:29 +00:00
|
|
|
return new ExtractValueInst(*this);
|
2009-07-09 23:48:35 +00:00
|
|
|
}
|
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
InsertValueInst *InsertValueInst::cloneImpl() const {
|
2009-10-27 22:16:29 +00:00
|
|
|
return new InsertValueInst(*this);
|
2009-07-09 23:48:35 +00:00
|
|
|
}
|
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
AllocaInst *AllocaInst::cloneImpl() const {
|
2019-10-25 22:26:23 +02:00
|
|
|
AllocaInst *Result =
|
|
|
|
new AllocaInst(getAllocatedType(), getType()->getAddressSpace(),
|
2020-05-15 13:23:14 -07:00
|
|
|
getOperand(0), getAlign());
|
2014-04-30 16:12:21 +00:00
|
|
|
Result->setUsedWithInAlloca(isUsedWithInAlloca());
|
2016-04-01 21:41:15 +00:00
|
|
|
Result->setSwiftError(isSwiftError());
|
2014-04-30 16:12:21 +00:00
|
|
|
return Result;
|
2009-07-09 23:48:35 +00:00
|
|
|
}
|
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
LoadInst *LoadInst::cloneImpl() const {
|
2019-01-14 21:37:53 +00:00
|
|
|
return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(),
|
2020-04-03 21:56:20 -07:00
|
|
|
getAlign(), getOrdering(), getSyncScopeID());
|
2009-07-09 23:48:35 +00:00
|
|
|
}
|
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
StoreInst *StoreInst::cloneImpl() const {
|
2020-05-14 14:48:10 -07:00
|
|
|
return new StoreInst(getOperand(0), getOperand(1), isVolatile(), getAlign(),
|
|
|
|
getOrdering(), getSyncScopeID());
|
2009-07-09 23:48:35 +00:00
|
|
|
}
|
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const {
|
2020-07-07 09:54:13 +00:00
|
|
|
AtomicCmpXchgInst *Result = new AtomicCmpXchgInst(
|
|
|
|
getOperand(0), getOperand(1), getOperand(2), getAlign(),
|
|
|
|
getSuccessOrdering(), getFailureOrdering(), getSyncScopeID());
|
2011-07-28 21:48:00 +00:00
|
|
|
Result->setVolatile(isVolatile());
|
IR: add "cmpxchg weak" variant to support permitted failure.
This commit adds a weak variant of the cmpxchg operation, as described
in C++11. A cmpxchg instruction with this modifier is permitted to
fail to store, even if the comparison indicated it should.
As a result, cmpxchg instructions must return a flag indicating
success in addition to their original iN value loaded. Thus, for
uniformity *all* cmpxchg instructions now return "{ iN, i1 }". The
second flag is 1 when the store succeeded.
At the DAG level, a new ATOMIC_CMP_SWAP_WITH_SUCCESS node has been
added as the natural representation for the new cmpxchg instructions.
It is a strong cmpxchg.
By default this gets Expanded to the existing ATOMIC_CMP_SWAP during
Legalization, so existing backends should see no change in behaviour.
If they wish to deal with the enhanced node instead, they can call
setOperationAction on it. Beware: as a node with 2 results, it cannot
be selected from TableGen.
Currently, no use is made of the extra information provided in this
patch. Test updates are almost entirely adapting the input IR to the
new scheme.
Summary for out of tree users:
------------------------------
+ Legacy Bitcode files are upgraded during read.
+ Legacy assembly IR files will be invalid.
+ Front-ends must adapt to different type for "cmpxchg".
+ Backends should be unaffected by default.
llvm-svn: 210903
2014-06-13 14:24:07 +00:00
|
|
|
Result->setWeak(isWeak());
|
2011-07-28 21:48:00 +00:00
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
AtomicRMWInst *AtomicRMWInst::cloneImpl() const {
|
2011-07-28 21:48:00 +00:00
|
|
|
AtomicRMWInst *Result =
|
2020-07-07 09:54:13 +00:00
|
|
|
new AtomicRMWInst(getOperation(), getOperand(0), getOperand(1),
|
|
|
|
getAlign(), getOrdering(), getSyncScopeID());
|
2011-07-28 21:48:00 +00:00
|
|
|
Result->setVolatile(isVolatile());
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
FenceInst *FenceInst::cloneImpl() const {
|
2017-07-11 22:23:00 +00:00
|
|
|
return new FenceInst(getContext(), getOrdering(), getSyncScopeID());
|
2011-07-25 23:16:38 +00:00
|
|
|
}
|
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
TruncInst *TruncInst::cloneImpl() const {
|
2009-10-27 22:16:29 +00:00
|
|
|
return new TruncInst(getOperand(0), getType());
|
2009-07-09 23:48:35 +00:00
|
|
|
}
|
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
ZExtInst *ZExtInst::cloneImpl() const {
|
2009-10-27 22:16:29 +00:00
|
|
|
return new ZExtInst(getOperand(0), getType());
|
2009-07-09 23:48:35 +00:00
|
|
|
}
|
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
SExtInst *SExtInst::cloneImpl() const {
|
2009-10-27 22:16:29 +00:00
|
|
|
return new SExtInst(getOperand(0), getType());
|
2009-07-09 23:48:35 +00:00
|
|
|
}
|
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
FPTruncInst *FPTruncInst::cloneImpl() const {
|
2009-10-27 22:16:29 +00:00
|
|
|
return new FPTruncInst(getOperand(0), getType());
|
2009-07-09 23:48:35 +00:00
|
|
|
}
|
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
FPExtInst *FPExtInst::cloneImpl() const {
|
2009-10-27 22:16:29 +00:00
|
|
|
return new FPExtInst(getOperand(0), getType());
|
2009-07-09 23:48:35 +00:00
|
|
|
}
|
2008-05-23 00:36:11 +00:00
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
UIToFPInst *UIToFPInst::cloneImpl() const {
|
2009-10-27 22:16:29 +00:00
|
|
|
return new UIToFPInst(getOperand(0), getType());
|
2009-07-09 23:48:35 +00:00
|
|
|
}
|
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
SIToFPInst *SIToFPInst::cloneImpl() const {
|
2009-10-27 22:16:29 +00:00
|
|
|
return new SIToFPInst(getOperand(0), getType());
|
2009-07-09 23:48:35 +00:00
|
|
|
}
|
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
FPToUIInst *FPToUIInst::cloneImpl() const {
|
2009-10-27 22:16:29 +00:00
|
|
|
return new FPToUIInst(getOperand(0), getType());
|
2009-07-09 23:48:35 +00:00
|
|
|
}
|
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
FPToSIInst *FPToSIInst::cloneImpl() const {
|
2009-10-27 22:16:29 +00:00
|
|
|
return new FPToSIInst(getOperand(0), getType());
|
2009-07-09 23:48:35 +00:00
|
|
|
}
|
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
PtrToIntInst *PtrToIntInst::cloneImpl() const {
|
2009-10-27 22:16:29 +00:00
|
|
|
return new PtrToIntInst(getOperand(0), getType());
|
2009-07-09 23:48:35 +00:00
|
|
|
}
|
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
IntToPtrInst *IntToPtrInst::cloneImpl() const {
|
2009-10-27 22:16:29 +00:00
|
|
|
return new IntToPtrInst(getOperand(0), getType());
|
2008-05-15 10:04:30 +00:00
|
|
|
}
|
2009-07-09 23:48:35 +00:00
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
BitCastInst *BitCastInst::cloneImpl() const {
|
2009-10-27 22:16:29 +00:00
|
|
|
return new BitCastInst(getOperand(0), getType());
|
2008-05-15 10:04:30 +00:00
|
|
|
}
|
2006-11-27 01:05:10 +00:00
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
AddrSpaceCastInst *AddrSpaceCastInst::cloneImpl() const {
|
2013-11-15 01:34:59 +00:00
|
|
|
return new AddrSpaceCastInst(getOperand(0), getType());
|
|
|
|
}
|
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
CallInst *CallInst::cloneImpl() const {
|
2015-11-10 20:13:21 +00:00
|
|
|
if (hasOperandBundles()) {
|
|
|
|
unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
|
|
|
|
return new(getNumOperands(), DescriptorBytes) CallInst(*this);
|
|
|
|
}
|
2009-10-27 22:16:29 +00:00
|
|
|
return new(getNumOperands()) CallInst(*this);
|
2009-07-09 23:48:35 +00:00
|
|
|
}
|
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
SelectInst *SelectInst::cloneImpl() const {
|
2009-10-27 22:16:29 +00:00
|
|
|
return SelectInst::Create(getOperand(0), getOperand(1), getOperand(2));
|
2006-04-08 01:18:18 +00:00
|
|
|
}
|
2009-07-09 23:48:35 +00:00
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
VAArgInst *VAArgInst::cloneImpl() const {
|
2009-10-27 22:16:29 +00:00
|
|
|
return new VAArgInst(getOperand(0), getType());
|
2006-04-08 01:18:18 +00:00
|
|
|
}
|
2009-07-09 23:48:35 +00:00
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
ExtractElementInst *ExtractElementInst::cloneImpl() const {
|
2009-10-27 22:16:29 +00:00
|
|
|
return ExtractElementInst::Create(getOperand(0), getOperand(1));
|
2006-04-08 01:18:18 +00:00
|
|
|
}
|
2009-07-09 23:48:35 +00:00
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
InsertElementInst *InsertElementInst::cloneImpl() const {
|
2012-01-25 23:49:49 +00:00
|
|
|
return InsertElementInst::Create(getOperand(0), getOperand(1), getOperand(2));
|
2009-07-09 23:48:35 +00:00
|
|
|
}
|
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
ShuffleVectorInst *ShuffleVectorInst::cloneImpl() const {
|
2020-03-31 13:08:59 -07:00
|
|
|
return new ShuffleVectorInst(getOperand(0), getOperand(1), getShuffleMask());
|
2008-05-15 10:04:30 +00:00
|
|
|
}
|
2009-07-09 23:48:35 +00:00
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
PHINode *PHINode::cloneImpl() const { return new PHINode(*this); }
|
2009-10-27 22:16:29 +00:00
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
LandingPadInst *LandingPadInst::cloneImpl() const {
|
2011-08-12 20:24:12 +00:00
|
|
|
return new LandingPadInst(*this);
|
|
|
|
}
|
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
ReturnInst *ReturnInst::cloneImpl() const {
|
2009-10-27 22:16:29 +00:00
|
|
|
return new(getNumOperands()) ReturnInst(*this);
|
|
|
|
}
|
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
BranchInst *BranchInst::cloneImpl() const {
|
2011-01-07 20:29:02 +00:00
|
|
|
return new(getNumOperands()) BranchInst(*this);
|
2008-05-15 10:04:30 +00:00
|
|
|
}
|
2009-07-09 23:48:35 +00:00
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); }
|
2009-07-09 23:48:35 +00:00
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
IndirectBrInst *IndirectBrInst::cloneImpl() const {
|
2009-10-28 00:19:10 +00:00
|
|
|
return new IndirectBrInst(*this);
|
2009-10-27 19:13:16 +00:00
|
|
|
}
|
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
InvokeInst *InvokeInst::cloneImpl() const {
|
2015-11-10 20:13:21 +00:00
|
|
|
if (hasOperandBundles()) {
|
|
|
|
unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
|
|
|
|
return new(getNumOperands(), DescriptorBytes) InvokeInst(*this);
|
|
|
|
}
|
2009-10-27 22:16:29 +00:00
|
|
|
return new(getNumOperands()) InvokeInst(*this);
|
2008-05-15 10:04:30 +00:00
|
|
|
}
|
2009-07-09 23:48:35 +00:00
|
|
|
|
2019-02-08 20:48:56 +00:00
|
|
|
CallBrInst *CallBrInst::cloneImpl() const {
|
|
|
|
if (hasOperandBundles()) {
|
|
|
|
unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
|
|
|
|
return new (getNumOperands(), DescriptorBytes) CallBrInst(*this);
|
|
|
|
}
|
|
|
|
return new (getNumOperands()) CallBrInst(*this);
|
|
|
|
}
|
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
ResumeInst *ResumeInst::cloneImpl() const { return new (1) ResumeInst(*this); }
|
2011-07-31 06:30:59 +00:00
|
|
|
|
2015-07-31 17:58:14 +00:00
|
|
|
CleanupReturnInst *CleanupReturnInst::cloneImpl() const {
|
|
|
|
return new (getNumOperands()) CleanupReturnInst(*this);
|
|
|
|
}
|
|
|
|
|
|
|
|
CatchReturnInst *CatchReturnInst::cloneImpl() const {
|
2015-08-15 02:46:08 +00:00
|
|
|
return new (getNumOperands()) CatchReturnInst(*this);
|
2015-07-31 17:58:14 +00:00
|
|
|
}
|
|
|
|
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 05:38:55 +00:00
|
|
|
CatchSwitchInst *CatchSwitchInst::cloneImpl() const {
|
|
|
|
return new CatchSwitchInst(*this);
|
2015-07-31 17:58:14 +00:00
|
|
|
}
|
|
|
|
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 05:38:55 +00:00
|
|
|
FuncletPadInst *FuncletPadInst::cloneImpl() const {
|
|
|
|
return new (getNumOperands()) FuncletPadInst(*this);
|
2015-07-31 17:58:14 +00:00
|
|
|
}
|
|
|
|
|
2015-06-24 20:22:23 +00:00
|
|
|
UnreachableInst *UnreachableInst::cloneImpl() const {
|
2009-09-27 07:38:41 +00:00
|
|
|
LLVMContext &Context = getContext();
|
2009-10-27 22:16:29 +00:00
|
|
|
return new UnreachableInst(Context);
|
2009-07-09 23:48:35 +00:00
|
|
|
}
|
[IR] Redefine Freeze instruction
Summary:
This patch redefines freeze instruction from being UnaryOperator to a subclass of UnaryInstruction.
ConstantExpr freeze is removed, as discussed in the previous review.
FreezeOperator is not added because there's no ConstantExpr freeze.
`freeze i8* null` test is added to `test/Bindings/llvm-c/freeze.ll` as well, because the null pointer-related bug in `tools/llvm-c/echo.cpp` is now fixed.
InstVisitor has visitFreeze now because freeze is not unaryop anymore.
Reviewers: whitequark, deadalnix, craig.topper, jdoerfert, lebedev.ri
Reviewed By: craig.topper, lebedev.ri
Subscribers: regehr, nlopes, mehdi_amini, hiraditya, steven_wu, dexonsmith, jfb, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D69932
2019-11-07 01:17:49 +09:00
|
|
|
|
|
|
|
FreezeInst *FreezeInst::cloneImpl() const {
|
|
|
|
return new FreezeInst(getOperand(0));
|
|
|
|
}
|