2014-02-26 04:10:45 +01:00
|
|
|
//===-- InstrinsicInst.cpp - Intrinsic Instruction Wrappers ---------------===//
|
2006-03-23 19:05:12 +01:00
|
|
|
//
|
2019-01-19 09:50:56 +01:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2006-03-23 19:05:12 +01:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
2006-03-24 11:00:56 +01:00
|
|
|
//
|
|
|
|
// This file implements methods that make it really easy to deal with intrinsic
|
2010-01-05 02:10:40 +01:00
|
|
|
// functions.
|
2006-03-24 11:00:56 +01:00
|
|
|
//
|
|
|
|
// All intrinsic function calls are instances of the call instruction, so these
|
|
|
|
// are all subclasses of the CallInst class. Note that none of these classes
|
|
|
|
// has state or virtual methods, which is an important part of this gross/neat
|
|
|
|
// hack working.
|
2017-08-24 06:18:24 +02:00
|
|
|
//
|
2006-03-24 11:00:56 +01:00
|
|
|
// In some cases, arguments to intrinsics need to be generic and are defined as
|
|
|
|
// type pointer to empty struct { }*. To access the real item of interest the
|
2017-08-24 06:18:24 +02:00
|
|
|
// cast instruction needs to be stripped away.
|
2006-03-24 11:00:56 +01:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
2006-03-23 19:05:12 +01:00
|
|
|
|
2013-01-02 12:36:10 +01:00
|
|
|
#include "llvm/IR/IntrinsicInst.h"
|
2017-06-06 13:49:48 +02:00
|
|
|
#include "llvm/ADT/StringSwitch.h"
|
2013-01-02 12:36:10 +01:00
|
|
|
#include "llvm/IR/Constants.h"
|
2018-06-15 15:48:55 +02:00
|
|
|
#include "llvm/IR/DebugInfoMetadata.h"
|
2013-01-02 12:36:10 +01:00
|
|
|
#include "llvm/IR/GlobalVariable.h"
|
|
|
|
#include "llvm/IR/Metadata.h"
|
2016-09-20 21:07:22 +02:00
|
|
|
#include "llvm/IR/Module.h"
|
2020-04-15 12:05:07 +02:00
|
|
|
#include "llvm/IR/Operator.h"
|
2020-03-17 14:52:06 +01:00
|
|
|
#include "llvm/IR/PatternMatch.h"
|
2021-04-06 17:32:13 +02:00
|
|
|
#include "llvm/IR/Statepoint.h"
|
2020-03-17 14:52:06 +01:00
|
|
|
|
2016-01-26 23:33:19 +01:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2006-03-23 19:05:12 +01:00
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
2018-08-06 05:59:47 +02:00
|
|
|
/// DbgVariableIntrinsic - This is the common base class for debug info
|
|
|
|
/// intrinsics for variables.
|
2006-03-23 19:05:12 +01:00
|
|
|
///
|
|
|
|
|
2020-09-30 17:30:14 +02:00
|
|
|
iterator_range<DbgVariableIntrinsic::location_op_iterator>
|
|
|
|
DbgVariableIntrinsic::location_ops() const {
|
|
|
|
auto *MD = getRawLocation();
|
|
|
|
assert(MD && "First operand of DbgVariableIntrinsic should be non-null.");
|
|
|
|
|
|
|
|
// If operand is ValueAsMetadata, return a range over just that operand.
|
|
|
|
if (auto *VAM = dyn_cast<ValueAsMetadata>(MD)) {
|
|
|
|
return {location_op_iterator(VAM), location_op_iterator(VAM + 1)};
|
|
|
|
}
|
|
|
|
// If operand is DIArgList, return a range over its args.
|
|
|
|
if (auto *AL = dyn_cast<DIArgList>(MD))
|
|
|
|
return {location_op_iterator(AL->args_begin()),
|
|
|
|
location_op_iterator(AL->args_end())};
|
|
|
|
// Operand must be an empty metadata tuple, so return empty iterator.
|
|
|
|
return {location_op_iterator(static_cast<ValueAsMetadata *>(nullptr)),
|
|
|
|
location_op_iterator(static_cast<ValueAsMetadata *>(nullptr))};
|
|
|
|
}
|
|
|
|
|
|
|
|
Value *DbgVariableIntrinsic::getVariableLocationOp(unsigned OpIdx) const {
|
|
|
|
auto *MD = getRawLocation();
|
|
|
|
assert(MD && "First operand of DbgVariableIntrinsic should be non-null.");
|
|
|
|
if (auto *AL = dyn_cast<DIArgList>(MD))
|
|
|
|
return AL->getArgs()[OpIdx]->getValue();
|
|
|
|
if (isa<MDNode>(MD))
|
2016-03-29 20:56:03 +02:00
|
|
|
return nullptr;
|
2020-09-30 17:30:14 +02:00
|
|
|
assert(
|
|
|
|
isa<ValueAsMetadata>(MD) &&
|
|
|
|
"Attempted to get location operand from DbgVariableIntrinsic with none.");
|
|
|
|
auto *V = cast<ValueAsMetadata>(MD);
|
|
|
|
assert(OpIdx == 0 && "Operand Index must be 0 for a debug intrinsic with a "
|
|
|
|
"single location operand.");
|
|
|
|
return V->getValue();
|
|
|
|
}
|
2016-03-29 20:56:03 +02:00
|
|
|
|
2020-09-30 17:30:14 +02:00
|
|
|
static ValueAsMetadata *getAsMetadata(Value *V) {
|
|
|
|
return isa<MetadataAsValue>(V) ? dyn_cast<ValueAsMetadata>(
|
|
|
|
cast<MetadataAsValue>(V)->getMetadata())
|
|
|
|
: ValueAsMetadata::get(V);
|
|
|
|
}
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-09 19:38:53 +01:00
|
|
|
|
2020-09-30 17:30:14 +02:00
|
|
|
void DbgVariableIntrinsic::replaceVariableLocationOp(Value *OldValue,
|
|
|
|
Value *NewValue) {
|
|
|
|
assert(NewValue && "Values must be non-null");
|
|
|
|
auto Locations = location_ops();
|
|
|
|
auto OldIt = find(Locations, OldValue);
|
|
|
|
assert(OldIt != Locations.end() && "OldValue must be a current location");
|
|
|
|
if (!hasArgList()) {
|
|
|
|
Value *NewOperand = isa<MetadataAsValue>(NewValue)
|
|
|
|
? NewValue
|
|
|
|
: MetadataAsValue::get(
|
|
|
|
getContext(), ValueAsMetadata::get(NewValue));
|
|
|
|
return setArgOperand(0, NewOperand);
|
|
|
|
}
|
|
|
|
SmallVector<ValueAsMetadata *, 4> MDs;
|
|
|
|
ValueAsMetadata *NewOperand = getAsMetadata(NewValue);
|
|
|
|
for (auto *VMD : Locations)
|
|
|
|
MDs.push_back(VMD == *OldIt ? NewOperand : getAsMetadata(VMD));
|
|
|
|
setArgOperand(
|
|
|
|
0, MetadataAsValue::get(getContext(), DIArgList::get(getContext(), MDs)));
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-09 19:38:53 +01:00
|
|
|
}
|
2021-03-17 16:04:27 +01:00
|
|
|
void DbgVariableIntrinsic::replaceVariableLocationOp(unsigned OpIdx,
|
|
|
|
Value *NewValue) {
|
|
|
|
assert(OpIdx < getNumVariableLocationOps() && "Invalid Operand Index");
|
|
|
|
if (!hasArgList()) {
|
|
|
|
Value *NewOperand = isa<MetadataAsValue>(NewValue)
|
|
|
|
? NewValue
|
|
|
|
: MetadataAsValue::get(
|
|
|
|
getContext(), ValueAsMetadata::get(NewValue));
|
|
|
|
return setArgOperand(0, NewOperand);
|
|
|
|
}
|
|
|
|
SmallVector<ValueAsMetadata *, 4> MDs;
|
|
|
|
ValueAsMetadata *NewOperand = getAsMetadata(NewValue);
|
|
|
|
for (unsigned Idx = 0; Idx < getNumVariableLocationOps(); ++Idx)
|
|
|
|
MDs.push_back(Idx == OpIdx ? NewOperand
|
|
|
|
: getAsMetadata(getVariableLocationOp(Idx)));
|
|
|
|
setArgOperand(
|
|
|
|
0, MetadataAsValue::get(getContext(), DIArgList::get(getContext(), MDs)));
|
|
|
|
}
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-09 19:38:53 +01:00
|
|
|
|
2021-05-20 21:17:33 +02:00
|
|
|
void DbgVariableIntrinsic::addVariableLocationOps(ArrayRef<Value *> NewValues,
|
|
|
|
DIExpression *NewExpr) {
|
|
|
|
assert(NewExpr->hasAllLocationOps(getNumVariableLocationOps() +
|
|
|
|
NewValues.size()) &&
|
|
|
|
"NewExpr for debug variable intrinsic does not reference every "
|
|
|
|
"location operand.");
|
|
|
|
assert(!is_contained(NewValues, nullptr) && "New values must be non-null");
|
|
|
|
setArgOperand(2, MetadataAsValue::get(getContext(), NewExpr));
|
|
|
|
SmallVector<ValueAsMetadata *, 4> MDs;
|
|
|
|
for (auto *VMD : location_ops())
|
|
|
|
MDs.push_back(getAsMetadata(VMD));
|
|
|
|
for (auto *VMD : NewValues)
|
|
|
|
MDs.push_back(getAsMetadata(VMD));
|
|
|
|
setArgOperand(
|
|
|
|
0, MetadataAsValue::get(getContext(), DIArgList::get(getContext(), MDs)));
|
|
|
|
}
|
|
|
|
|
2018-08-06 05:59:47 +02:00
|
|
|
Optional<uint64_t> DbgVariableIntrinsic::getFragmentSizeInBits() const {
|
2018-06-15 15:48:55 +02:00
|
|
|
if (auto Fragment = getExpression()->getFragmentInfo())
|
|
|
|
return Fragment->SizeInBits;
|
|
|
|
return getVariable()->getSizeInBits();
|
|
|
|
}
|
|
|
|
|
2016-01-26 23:33:19 +01:00
|
|
|
int llvm::Intrinsic::lookupLLVMIntrinsicByName(ArrayRef<const char *> NameTable,
|
|
|
|
StringRef Name) {
|
|
|
|
assert(Name.startswith("llvm."));
|
|
|
|
|
|
|
|
// Do successive binary searches of the dotted name components. For
|
|
|
|
// "llvm.gc.experimental.statepoint.p1i8.p1i32", we will find the range of
|
|
|
|
// intrinsics starting with "llvm.gc", then "llvm.gc.experimental", then
|
|
|
|
// "llvm.gc.experimental.statepoint", and then we will stop as the range is
|
|
|
|
// size 1. During the search, we can skip the prefix that we already know is
|
|
|
|
// identical. By using strncmp we consider names with differing suffixes to
|
|
|
|
// be part of the equal range.
|
|
|
|
size_t CmpEnd = 4; // Skip the "llvm" component.
|
|
|
|
const char *const *Low = NameTable.begin();
|
|
|
|
const char *const *High = NameTable.end();
|
|
|
|
const char *const *LastLow = Low;
|
|
|
|
while (CmpEnd < Name.size() && High - Low > 0) {
|
2019-09-24 14:30:07 +02:00
|
|
|
size_t CmpStart = CmpEnd;
|
2016-01-26 23:33:19 +01:00
|
|
|
CmpEnd = Name.find('.', CmpStart + 1);
|
|
|
|
CmpEnd = CmpEnd == StringRef::npos ? Name.size() : CmpEnd;
|
|
|
|
auto Cmp = [CmpStart, CmpEnd](const char *LHS, const char *RHS) {
|
|
|
|
return strncmp(LHS + CmpStart, RHS + CmpStart, CmpEnd - CmpStart) < 0;
|
|
|
|
};
|
|
|
|
LastLow = Low;
|
|
|
|
std::tie(Low, High) = std::equal_range(Low, High, Name.data(), Cmp);
|
|
|
|
}
|
|
|
|
if (High - Low > 0)
|
|
|
|
LastLow = Low;
|
|
|
|
|
|
|
|
if (LastLow == NameTable.end())
|
|
|
|
return -1;
|
|
|
|
StringRef NameFound = *LastLow;
|
|
|
|
if (Name == NameFound ||
|
|
|
|
(Name.startswith(NameFound) && Name[NameFound.size()] == '.'))
|
|
|
|
return LastLow - NameTable.begin();
|
|
|
|
return -1;
|
|
|
|
}
|
2016-09-20 21:07:22 +02:00
|
|
|
|
|
|
|
Value *InstrProfIncrementInst::getStep() const {
|
|
|
|
if (InstrProfIncrementInstStep::classof(this)) {
|
|
|
|
return const_cast<Value *>(getArgOperand(4));
|
|
|
|
}
|
|
|
|
const Module *M = getModule();
|
|
|
|
LLVMContext &Context = M->getContext();
|
|
|
|
return ConstantInt::get(Type::getInt64Ty(Context), 1);
|
|
|
|
}
|
2017-01-27 00:27:59 +01:00
|
|
|
|
2020-03-26 08:51:09 +01:00
|
|
|
Optional<RoundingMode> ConstrainedFPIntrinsic::getRoundingMode() const {
|
2017-05-25 23:31:00 +02:00
|
|
|
unsigned NumOperands = getNumArgOperands();
|
2021-05-20 20:35:17 +02:00
|
|
|
Metadata *MD = nullptr;
|
|
|
|
auto *MAV = dyn_cast<MetadataAsValue>(getArgOperand(NumOperands - 2));
|
|
|
|
if (MAV)
|
|
|
|
MD = MAV->getMetadata();
|
2017-01-27 00:27:59 +01:00
|
|
|
if (!MD || !isa<MDString>(MD))
|
2019-07-08 18:18:18 +02:00
|
|
|
return None;
|
|
|
|
return StrToRoundingMode(cast<MDString>(MD)->getString());
|
|
|
|
}
|
2017-01-27 00:27:59 +01:00
|
|
|
|
2019-08-29 14:29:11 +02:00
|
|
|
Optional<fp::ExceptionBehavior>
|
2017-01-27 00:27:59 +01:00
|
|
|
ConstrainedFPIntrinsic::getExceptionBehavior() const {
|
2017-05-25 23:31:00 +02:00
|
|
|
unsigned NumOperands = getNumArgOperands();
|
2021-05-20 20:35:17 +02:00
|
|
|
Metadata *MD = nullptr;
|
|
|
|
auto *MAV = dyn_cast<MetadataAsValue>(getArgOperand(NumOperands - 1));
|
|
|
|
if (MAV)
|
|
|
|
MD = MAV->getMetadata();
|
2017-01-27 00:27:59 +01:00
|
|
|
if (!MD || !isa<MDString>(MD))
|
2019-07-08 18:18:18 +02:00
|
|
|
return None;
|
|
|
|
return StrToExceptionBehavior(cast<MDString>(MD)->getString());
|
|
|
|
}
|
|
|
|
|
2021-05-20 20:35:17 +02:00
|
|
|
bool ConstrainedFPIntrinsic::isDefaultFPEnvironment() const {
|
|
|
|
Optional<fp::ExceptionBehavior> Except = getExceptionBehavior();
|
|
|
|
if (Except) {
|
|
|
|
if (Except.getValue() != fp::ebIgnore)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
Optional<RoundingMode> Rounding = getRoundingMode();
|
|
|
|
if (Rounding) {
|
|
|
|
if (Rounding.getValue() != RoundingMode::NearestTiesToEven)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-04-15 12:05:07 +02:00
|
|
|
FCmpInst::Predicate ConstrainedFPCmpIntrinsic::getPredicate() const {
|
|
|
|
Metadata *MD = cast<MetadataAsValue>(getArgOperand(2))->getMetadata();
|
[FPEnv] Constrained FCmp intrinsics
This adds support for constrained floating-point comparison intrinsics.
Specifically, we add:
declare <ty2>
@llvm.experimental.constrained.fcmp(<type> <op1>, <type> <op2>,
metadata <condition code>,
metadata <exception behavior>)
declare <ty2>
@llvm.experimental.constrained.fcmps(<type> <op1>, <type> <op2>,
metadata <condition code>,
metadata <exception behavior>)
The first variant implements an IEEE "quiet" comparison (i.e. we only
get an invalid FP exception if either argument is a SNaN), while the
second variant implements an IEEE "signaling" comparison (i.e. we get
an invalid FP exception if either argument is any NaN).
The condition code is implemented as a metadata string. The same set
of predicates as for the fcmp instruction is supported (except for the
"true" and "false" predicates).
These new intrinsics are mapped by SelectionDAG codegen onto two new
ISD opcodes, ISD::STRICT_FSETCC and ISD::STRICT_FSETCCS, again
representing quiet vs. signaling comparison operations. Otherwise
those nodes look like SETCC nodes, with an additional chain argument
and result as usual for strict FP nodes. The patch includes support
for the common legalization operations for those nodes.
The patch also includes full SystemZ back-end support for the new
ISD nodes, mapping them to all available SystemZ instruction to
fully implement strict semantics (scalar and vector).
Differential Revision: https://reviews.llvm.org/D69281
2019-12-06 11:30:04 +01:00
|
|
|
if (!MD || !isa<MDString>(MD))
|
|
|
|
return FCmpInst::BAD_FCMP_PREDICATE;
|
|
|
|
return StringSwitch<FCmpInst::Predicate>(cast<MDString>(MD)->getString())
|
2020-04-15 12:05:07 +02:00
|
|
|
.Case("oeq", FCmpInst::FCMP_OEQ)
|
|
|
|
.Case("ogt", FCmpInst::FCMP_OGT)
|
|
|
|
.Case("oge", FCmpInst::FCMP_OGE)
|
|
|
|
.Case("olt", FCmpInst::FCMP_OLT)
|
|
|
|
.Case("ole", FCmpInst::FCMP_OLE)
|
|
|
|
.Case("one", FCmpInst::FCMP_ONE)
|
|
|
|
.Case("ord", FCmpInst::FCMP_ORD)
|
|
|
|
.Case("uno", FCmpInst::FCMP_UNO)
|
|
|
|
.Case("ueq", FCmpInst::FCMP_UEQ)
|
|
|
|
.Case("ugt", FCmpInst::FCMP_UGT)
|
|
|
|
.Case("uge", FCmpInst::FCMP_UGE)
|
|
|
|
.Case("ult", FCmpInst::FCMP_ULT)
|
|
|
|
.Case("ule", FCmpInst::FCMP_ULE)
|
|
|
|
.Case("une", FCmpInst::FCMP_UNE)
|
|
|
|
.Default(FCmpInst::BAD_FCMP_PREDICATE);
|
[FPEnv] Constrained FCmp intrinsics
This adds support for constrained floating-point comparison intrinsics.
Specifically, we add:
declare <ty2>
@llvm.experimental.constrained.fcmp(<type> <op1>, <type> <op2>,
metadata <condition code>,
metadata <exception behavior>)
declare <ty2>
@llvm.experimental.constrained.fcmps(<type> <op1>, <type> <op2>,
metadata <condition code>,
metadata <exception behavior>)
The first variant implements an IEEE "quiet" comparison (i.e. we only
get an invalid FP exception if either argument is a SNaN), while the
second variant implements an IEEE "signaling" comparison (i.e. we get
an invalid FP exception if either argument is any NaN).
The condition code is implemented as a metadata string. The same set
of predicates as for the fcmp instruction is supported (except for the
"true" and "false" predicates).
These new intrinsics are mapped by SelectionDAG codegen onto two new
ISD opcodes, ISD::STRICT_FSETCC and ISD::STRICT_FSETCCS, again
representing quiet vs. signaling comparison operations. Otherwise
those nodes look like SETCC nodes, with an additional chain argument
and result as usual for strict FP nodes. The patch includes support
for the common legalization operations for those nodes.
The patch also includes full SystemZ back-end support for the new
ISD nodes, mapping them to all available SystemZ instruction to
fully implement strict semantics (scalar and vector).
Differential Revision: https://reviews.llvm.org/D69281
2019-12-06 11:30:04 +01:00
|
|
|
}
|
|
|
|
|
2017-05-25 23:31:00 +02:00
|
|
|
bool ConstrainedFPIntrinsic::isUnaryOp() const {
|
|
|
|
switch (getIntrinsicID()) {
|
2020-04-15 12:05:07 +02:00
|
|
|
default:
|
|
|
|
return false;
|
2020-01-17 03:32:30 +01:00
|
|
|
#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
|
2020-04-15 12:05:07 +02:00
|
|
|
case Intrinsic::INTRINSIC: \
|
|
|
|
return NARG == 1;
|
2019-11-05 14:42:16 +01:00
|
|
|
#include "llvm/IR/ConstrainedOps.def"
|
2017-05-25 23:31:00 +02:00
|
|
|
}
|
|
|
|
}
|
2017-08-24 06:18:24 +02:00
|
|
|
|
|
|
|
bool ConstrainedFPIntrinsic::isTernaryOp() const {
|
|
|
|
switch (getIntrinsicID()) {
|
2020-04-15 12:05:07 +02:00
|
|
|
default:
|
|
|
|
return false;
|
2020-01-17 03:32:30 +01:00
|
|
|
#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
|
2020-04-15 12:05:07 +02:00
|
|
|
case Intrinsic::INTRINSIC: \
|
|
|
|
return NARG == 3;
|
2019-11-05 14:42:16 +01:00
|
|
|
#include "llvm/IR/ConstrainedOps.def"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ConstrainedFPIntrinsic::classof(const IntrinsicInst *I) {
|
|
|
|
switch (I->getIntrinsicID()) {
|
2020-01-17 03:32:30 +01:00
|
|
|
#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
|
2019-11-05 14:42:16 +01:00
|
|
|
case Intrinsic::INTRINSIC:
|
|
|
|
#include "llvm/IR/ConstrainedOps.def"
|
|
|
|
return true;
|
|
|
|
default:
|
|
|
|
return false;
|
2017-08-24 06:18:24 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-17 14:52:06 +01:00
|
|
|
ElementCount VPIntrinsic::getStaticVectorLength() const {
|
|
|
|
auto GetVectorLengthOfType = [](const Type *T) -> ElementCount {
|
|
|
|
auto VT = cast<VectorType>(T);
|
|
|
|
auto ElemCount = VT->getElementCount();
|
|
|
|
return ElemCount;
|
|
|
|
};
|
|
|
|
|
2021-05-28 20:28:45 +02:00
|
|
|
Value *VPMask = getMaskParam();
|
|
|
|
assert(VPMask && "No mask param?");
|
2020-03-17 14:52:06 +01:00
|
|
|
return GetVectorLengthOfType(VPMask->getType());
|
|
|
|
}
|
|
|
|
|
|
|
|
Value *VPIntrinsic::getMaskParam() const {
|
2021-05-28 20:28:45 +02:00
|
|
|
if (auto MaskPos = getMaskParamPos(getIntrinsicID()))
|
|
|
|
return getArgOperand(MaskPos.getValue());
|
2020-03-17 14:52:06 +01:00
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2021-04-30 13:43:48 +02:00
|
|
|
void VPIntrinsic::setMaskParam(Value *NewMask) {
|
2021-05-28 20:28:45 +02:00
|
|
|
auto MaskPos = getMaskParamPos(getIntrinsicID());
|
2021-04-30 13:43:48 +02:00
|
|
|
setArgOperand(*MaskPos, NewMask);
|
|
|
|
}
|
|
|
|
|
2020-03-17 14:52:06 +01:00
|
|
|
Value *VPIntrinsic::getVectorLengthParam() const {
|
2021-05-28 20:28:45 +02:00
|
|
|
if (auto EVLPos = getVectorLengthParamPos(getIntrinsicID()))
|
|
|
|
return getArgOperand(EVLPos.getValue());
|
2020-03-17 14:52:06 +01:00
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2021-04-30 13:43:48 +02:00
|
|
|
void VPIntrinsic::setVectorLengthParam(Value *NewEVL) {
|
2021-05-28 20:28:45 +02:00
|
|
|
auto EVLPos = getVectorLengthParamPos(getIntrinsicID());
|
2021-04-30 13:43:48 +02:00
|
|
|
setArgOperand(*EVLPos, NewEVL);
|
|
|
|
}
|
|
|
|
|
2021-05-28 20:28:45 +02:00
|
|
|
Optional<unsigned> VPIntrinsic::getMaskParamPos(Intrinsic::ID IntrinsicID) {
|
2020-03-17 14:52:06 +01:00
|
|
|
switch (IntrinsicID) {
|
|
|
|
default:
|
|
|
|
return None;
|
|
|
|
|
2020-12-09 11:36:30 +01:00
|
|
|
#define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, VLENPOS) \
|
2020-03-17 14:52:06 +01:00
|
|
|
case Intrinsic::VPID: \
|
|
|
|
return MASKPOS;
|
|
|
|
#include "llvm/IR/VPIntrinsics.def"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-28 20:28:45 +02:00
|
|
|
Optional<unsigned>
|
|
|
|
VPIntrinsic::getVectorLengthParamPos(Intrinsic::ID IntrinsicID) {
|
2020-03-17 14:52:06 +01:00
|
|
|
switch (IntrinsicID) {
|
|
|
|
default:
|
|
|
|
return None;
|
|
|
|
|
2020-12-09 11:36:30 +01:00
|
|
|
#define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, VLENPOS) \
|
2020-03-17 14:52:06 +01:00
|
|
|
case Intrinsic::VPID: \
|
|
|
|
return VLENPOS;
|
|
|
|
#include "llvm/IR/VPIntrinsics.def"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-28 20:28:45 +02:00
|
|
|
bool VPIntrinsic::isVPIntrinsic(Intrinsic::ID ID) {
|
2020-03-17 14:52:06 +01:00
|
|
|
switch (ID) {
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
|
2020-12-09 11:36:30 +01:00
|
|
|
#define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, VLENPOS) \
|
2020-03-17 14:52:06 +01:00
|
|
|
case Intrinsic::VPID: \
|
|
|
|
break;
|
|
|
|
#include "llvm/IR/VPIntrinsics.def"
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Equivalent non-predicated opcode
|
2021-05-28 20:28:45 +02:00
|
|
|
Optional<unsigned> VPIntrinsic::getFunctionalOpcodeForVP(Intrinsic::ID ID) {
|
2021-05-19 17:08:20 +02:00
|
|
|
Optional<unsigned> FunctionalOC;
|
2020-03-17 14:52:06 +01:00
|
|
|
switch (ID) {
|
|
|
|
default:
|
2020-12-09 11:36:30 +01:00
|
|
|
break;
|
|
|
|
#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
|
|
|
|
#define HANDLE_VP_TO_OPC(OPC) FunctionalOC = Instruction::OPC;
|
|
|
|
#define END_REGISTER_VP_INTRINSIC(...) break;
|
2020-03-17 14:52:06 +01:00
|
|
|
#include "llvm/IR/VPIntrinsics.def"
|
|
|
|
}
|
2020-12-09 11:36:30 +01:00
|
|
|
|
|
|
|
return FunctionalOC;
|
2020-03-17 14:52:06 +01:00
|
|
|
}
|
|
|
|
|
2021-05-28 20:28:45 +02:00
|
|
|
Intrinsic::ID VPIntrinsic::getForOpcode(unsigned IROPC) {
|
2020-11-16 09:47:18 +01:00
|
|
|
switch (IROPC) {
|
2020-03-17 14:52:06 +01:00
|
|
|
default:
|
|
|
|
return Intrinsic::not_intrinsic;
|
|
|
|
|
2020-12-09 11:36:30 +01:00
|
|
|
#define HANDLE_VP_TO_OPC(OPC) case Instruction::OPC:
|
|
|
|
#define END_REGISTER_VP_INTRINSIC(VPID) return Intrinsic::VPID;
|
2020-03-17 14:52:06 +01:00
|
|
|
#include "llvm/IR/VPIntrinsics.def"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool VPIntrinsic::canIgnoreVectorLengthParam() const {
|
|
|
|
using namespace PatternMatch;
|
|
|
|
|
|
|
|
ElementCount EC = getStaticVectorLength();
|
|
|
|
|
|
|
|
// No vlen param - no lanes masked-off by it.
|
|
|
|
auto *VLParam = getVectorLengthParam();
|
|
|
|
if (!VLParam)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// Note that the VP intrinsic causes undefined behavior if the Explicit Vector
|
|
|
|
// Length parameter is strictly greater-than the number of vector elements of
|
|
|
|
// the operation. This function returns true when this is detected statically
|
|
|
|
// in the IR.
|
|
|
|
|
2020-08-14 13:15:59 +02:00
|
|
|
// Check whether "W == vscale * EC.getKnownMinValue()"
|
|
|
|
if (EC.isScalable()) {
|
2020-03-17 14:52:06 +01:00
|
|
|
// Undig the DL
|
|
|
|
auto ParMod = this->getModule();
|
|
|
|
if (!ParMod)
|
|
|
|
return false;
|
|
|
|
const auto &DL = ParMod->getDataLayout();
|
|
|
|
|
|
|
|
// Compare vscale patterns
|
2020-06-04 11:09:48 +02:00
|
|
|
uint64_t VScaleFactor;
|
|
|
|
if (match(VLParam, m_c_Mul(m_ConstantInt(VScaleFactor), m_VScale(DL))))
|
2020-08-14 13:15:59 +02:00
|
|
|
return VScaleFactor >= EC.getKnownMinValue();
|
|
|
|
return (EC.getKnownMinValue() == 1) && match(VLParam, m_VScale(DL));
|
2020-03-17 14:52:06 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// standard SIMD operation
|
|
|
|
auto VLConst = dyn_cast<ConstantInt>(VLParam);
|
|
|
|
if (!VLConst)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
uint64_t VLNum = VLConst->getZExtValue();
|
2020-08-14 13:15:59 +02:00
|
|
|
if (VLNum >= EC.getKnownMinValue())
|
2020-03-17 14:52:06 +01:00
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-05-28 20:08:06 +02:00
|
|
|
Instruction::BinaryOps BinaryOpIntrinsic::getBinaryOp() const {
|
2019-04-16 20:55:16 +02:00
|
|
|
switch (getIntrinsicID()) {
|
2020-04-15 12:05:07 +02:00
|
|
|
case Intrinsic::uadd_with_overflow:
|
|
|
|
case Intrinsic::sadd_with_overflow:
|
|
|
|
case Intrinsic::uadd_sat:
|
|
|
|
case Intrinsic::sadd_sat:
|
|
|
|
return Instruction::Add;
|
|
|
|
case Intrinsic::usub_with_overflow:
|
|
|
|
case Intrinsic::ssub_with_overflow:
|
|
|
|
case Intrinsic::usub_sat:
|
|
|
|
case Intrinsic::ssub_sat:
|
|
|
|
return Instruction::Sub;
|
|
|
|
case Intrinsic::umul_with_overflow:
|
|
|
|
case Intrinsic::smul_with_overflow:
|
|
|
|
return Instruction::Mul;
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Invalid intrinsic");
|
2019-04-16 20:55:16 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-28 20:08:06 +02:00
|
|
|
bool BinaryOpIntrinsic::isSigned() const {
|
2019-04-16 20:55:16 +02:00
|
|
|
switch (getIntrinsicID()) {
|
2020-04-15 12:05:07 +02:00
|
|
|
case Intrinsic::sadd_with_overflow:
|
|
|
|
case Intrinsic::ssub_with_overflow:
|
|
|
|
case Intrinsic::smul_with_overflow:
|
|
|
|
case Intrinsic::sadd_sat:
|
|
|
|
case Intrinsic::ssub_sat:
|
|
|
|
return true;
|
|
|
|
default:
|
|
|
|
return false;
|
2019-04-16 20:55:16 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-28 20:08:06 +02:00
|
|
|
unsigned BinaryOpIntrinsic::getNoWrapKind() const {
|
2019-04-16 20:55:16 +02:00
|
|
|
if (isSigned())
|
|
|
|
return OverflowingBinaryOperator::NoSignedWrap;
|
|
|
|
else
|
|
|
|
return OverflowingBinaryOperator::NoUnsignedWrap;
|
|
|
|
}
|
2021-04-06 17:32:13 +02:00
|
|
|
|
|
|
|
const GCStatepointInst *GCProjectionInst::getStatepoint() const {
|
|
|
|
const Value *Token = getArgOperand(0);
|
|
|
|
|
|
|
|
// This takes care both of relocates for call statepoints and relocates
|
|
|
|
// on normal path of invoke statepoint.
|
|
|
|
if (!isa<LandingPadInst>(Token))
|
|
|
|
return cast<GCStatepointInst>(Token);
|
|
|
|
|
|
|
|
// This relocate is on exceptional path of an invoke statepoint
|
|
|
|
const BasicBlock *InvokeBB =
|
|
|
|
cast<Instruction>(Token)->getParent()->getUniquePredecessor();
|
|
|
|
|
|
|
|
assert(InvokeBB && "safepoints should have unique landingpads");
|
|
|
|
assert(InvokeBB->getTerminator() &&
|
|
|
|
"safepoint block should be well formed");
|
|
|
|
|
|
|
|
return cast<GCStatepointInst>(InvokeBB->getTerminator());
|
|
|
|
}
|
|
|
|
|
|
|
|
Value *GCRelocateInst::getBasePtr() const {
|
|
|
|
if (auto Opt = getStatepoint()->getOperandBundle(LLVMContext::OB_gc_live))
|
|
|
|
return *(Opt->Inputs.begin() + getBasePtrIndex());
|
|
|
|
return *(getStatepoint()->arg_begin() + getBasePtrIndex());
|
|
|
|
}
|
|
|
|
|
|
|
|
Value *GCRelocateInst::getDerivedPtr() const {
|
|
|
|
if (auto Opt = getStatepoint()->getOperandBundle(LLVMContext::OB_gc_live))
|
|
|
|
return *(Opt->Inputs.begin() + getDerivedPtrIndex());
|
|
|
|
return *(getStatepoint()->arg_begin() + getDerivedPtrIndex());
|
|
|
|
}
|