1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-20 03:23:01 +02:00
llvm-mirror/lib/IR/IntrinsicInst.cpp
Chandler Carruth eb66b33867 Sort the remaining #include lines in include/... and lib/....
I did this a long time ago with a janky python script, but now
clang-format has built-in support for this. I fed clang-format every
line with a #include and let it re-sort things according to the precise
LLVM rules for include ordering baked into clang-format these days.

I've reverted a number of files where the results of sorting includes
isn't healthy. Either places where we have legacy code relying on
particular include ordering (where possible, I'll fix these separately)
or where we have particular formatting around #include lines that
I didn't want to disturb in this patch.

This patch is *entirely* mechanical. If you get merge conflicts or
anything, just ignore the changes in this patch and run clang-format
over your #include lines in the files.

Sorry for any noise here, but it is important to keep these things
stable. I was seeing an increasing number of patches with irrelevant
re-ordering of #include lines because clang-format was used. This patch
at least isolates that churn, makes it easy to skip when resolving
conflicts, and gets us to a clean baseline (again).

llvm-svn: 304787
2017-06-06 11:49:48 +00:00

150 lines
5.6 KiB
C++

//===-- InstrinsicInst.cpp - Intrinsic Instruction Wrappers ---------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements methods that make it really easy to deal with intrinsic
// functions.
//
// All intrinsic function calls are instances of the call instruction, so these
// are all subclasses of the CallInst class. Note that none of these classes
// has state or virtual methods, which is an important part of this gross/neat
// hack working.
//
// In some cases, arguments to intrinsics need to be generic and are defined as
// type pointer to empty struct { }*. To access the real item of interest the
// cast instruction needs to be stripped away.
//
//===----------------------------------------------------------------------===//
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
//===----------------------------------------------------------------------===//
/// DbgInfoIntrinsic - This is the common base class for debug info intrinsics
///
Value *DbgInfoIntrinsic::getVariableLocation(bool AllowNullOp) const {
Value *Op = getArgOperand(0);
if (AllowNullOp && !Op)
return nullptr;
auto *MD = cast<MetadataAsValue>(Op)->getMetadata();
if (auto *V = dyn_cast<ValueAsMetadata>(MD))
return V->getValue();
// When the value goes to null, it gets replaced by an empty MDNode.
assert(!cast<MDNode>(MD)->getNumOperands() && "Expected an empty MDNode");
return nullptr;
}
int llvm::Intrinsic::lookupLLVMIntrinsicByName(ArrayRef<const char *> NameTable,
StringRef Name) {
assert(Name.startswith("llvm."));
// Do successive binary searches of the dotted name components. For
// "llvm.gc.experimental.statepoint.p1i8.p1i32", we will find the range of
// intrinsics starting with "llvm.gc", then "llvm.gc.experimental", then
// "llvm.gc.experimental.statepoint", and then we will stop as the range is
// size 1. During the search, we can skip the prefix that we already know is
// identical. By using strncmp we consider names with differing suffixes to
// be part of the equal range.
size_t CmpStart = 0;
size_t CmpEnd = 4; // Skip the "llvm" component.
const char *const *Low = NameTable.begin();
const char *const *High = NameTable.end();
const char *const *LastLow = Low;
while (CmpEnd < Name.size() && High - Low > 0) {
CmpStart = CmpEnd;
CmpEnd = Name.find('.', CmpStart + 1);
CmpEnd = CmpEnd == StringRef::npos ? Name.size() : CmpEnd;
auto Cmp = [CmpStart, CmpEnd](const char *LHS, const char *RHS) {
return strncmp(LHS + CmpStart, RHS + CmpStart, CmpEnd - CmpStart) < 0;
};
LastLow = Low;
std::tie(Low, High) = std::equal_range(Low, High, Name.data(), Cmp);
}
if (High - Low > 0)
LastLow = Low;
if (LastLow == NameTable.end())
return -1;
StringRef NameFound = *LastLow;
if (Name == NameFound ||
(Name.startswith(NameFound) && Name[NameFound.size()] == '.'))
return LastLow - NameTable.begin();
return -1;
}
Value *InstrProfIncrementInst::getStep() const {
if (InstrProfIncrementInstStep::classof(this)) {
return const_cast<Value *>(getArgOperand(4));
}
const Module *M = getModule();
LLVMContext &Context = M->getContext();
return ConstantInt::get(Type::getInt64Ty(Context), 1);
}
ConstrainedFPIntrinsic::RoundingMode
ConstrainedFPIntrinsic::getRoundingMode() const {
unsigned NumOperands = getNumArgOperands();
Metadata *MD =
dyn_cast<MetadataAsValue>(getArgOperand(NumOperands - 2))->getMetadata();
if (!MD || !isa<MDString>(MD))
return rmInvalid;
StringRef RoundingArg = cast<MDString>(MD)->getString();
// For dynamic rounding mode, we use round to nearest but we will set the
// 'exact' SDNodeFlag so that the value will not be rounded.
return StringSwitch<RoundingMode>(RoundingArg)
.Case("round.dynamic", rmDynamic)
.Case("round.tonearest", rmToNearest)
.Case("round.downward", rmDownward)
.Case("round.upward", rmUpward)
.Case("round.towardzero", rmTowardZero)
.Default(rmInvalid);
}
ConstrainedFPIntrinsic::ExceptionBehavior
ConstrainedFPIntrinsic::getExceptionBehavior() const {
unsigned NumOperands = getNumArgOperands();
Metadata *MD =
dyn_cast<MetadataAsValue>(getArgOperand(NumOperands - 1))->getMetadata();
if (!MD || !isa<MDString>(MD))
return ebInvalid;
StringRef ExceptionArg = cast<MDString>(MD)->getString();
return StringSwitch<ExceptionBehavior>(ExceptionArg)
.Case("fpexcept.ignore", ebIgnore)
.Case("fpexcept.maytrap", ebMayTrap)
.Case("fpexcept.strict", ebStrict)
.Default(ebInvalid);
}
bool ConstrainedFPIntrinsic::isUnaryOp() const {
switch (getIntrinsicID()) {
default:
return false;
case Intrinsic::experimental_constrained_sqrt:
case Intrinsic::experimental_constrained_sin:
case Intrinsic::experimental_constrained_cos:
case Intrinsic::experimental_constrained_exp:
case Intrinsic::experimental_constrained_exp2:
case Intrinsic::experimental_constrained_log:
case Intrinsic::experimental_constrained_log10:
case Intrinsic::experimental_constrained_log2:
case Intrinsic::experimental_constrained_rint:
case Intrinsic::experimental_constrained_nearbyint:
return true;
}
}