2018-03-30 02:47:31 +02:00
|
|
|
//===- IRBuilder.cpp - Builder for LLVM Instrs ----------------------------===//
|
2009-12-28 22:28:46 +01:00
|
|
|
//
|
2019-01-19 09:50:56 +01:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2009-12-28 22:28:46 +01:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file implements the IRBuilder class, which is used as a convenient way
|
|
|
|
// to create LLVM instructions with a consistent and simplified interface.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2017-06-06 13:49:48 +02:00
|
|
|
#include "llvm/IR/IRBuilder.h"
|
2018-03-30 02:47:31 +02:00
|
|
|
#include "llvm/ADT/ArrayRef.h"
|
|
|
|
#include "llvm/ADT/None.h"
|
|
|
|
#include "llvm/IR/Constant.h"
|
|
|
|
#include "llvm/IR/Constants.h"
|
|
|
|
#include "llvm/IR/DerivedTypes.h"
|
2013-01-02 12:36:10 +01:00
|
|
|
#include "llvm/IR/Function.h"
|
2018-03-30 02:47:31 +02:00
|
|
|
#include "llvm/IR/GlobalValue.h"
|
2013-01-02 12:36:10 +01:00
|
|
|
#include "llvm/IR/GlobalVariable.h"
|
Remove alignment argument from memcpy/memmove/memset in favour of alignment attributes (Step 1)
Summary:
This is a resurrection of work first proposed and discussed in Aug 2015:
http://lists.llvm.org/pipermail/llvm-dev/2015-August/089384.html
and initially landed (but then backed out) in Nov 2015:
http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
The @llvm.memcpy/memmove/memset intrinsics currently have an explicit argument
which is required to be a constant integer. It represents the alignment of the
dest (and source), and so must be the minimum of the actual alignment of the
two.
This change is the first in a series that allows source and dest to each
have their own alignments by using the alignment attribute on their arguments.
In this change we:
1) Remove the alignment argument.
2) Add alignment attributes to the source & dest arguments. We, temporarily,
require that the alignments for source & dest be equal.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 100, i32 4, i1 false)
will now read
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 100, i1 false)
Downstream users may have to update their lit tests that check for
@llvm.memcpy/memmove/memset call/declaration patterns. The following extended sed script
may help with updating the majority of your tests, but it does not catch all possible
patterns so some manual checking and updating will be required.
s~declare void @llvm\.mem(set|cpy|move)\.p([^(]*)\((.*), i32, i1\)~declare void @llvm.mem\1.p\2(\3, i1)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* \3, i8 \4, i8 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* \3, i8 \4, i16 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* \3, i8 \4, i32 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* \3, i8 \4, i64 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* \3, i8 \4, i128 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* align \6 \3, i8 \4, i8 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* align \6 \3, i8 \4, i16 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* align \6 \3, i8 \4, i32 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* align \6 \3, i8 \4, i64 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* align \6 \3, i8 \4, i128 \5, i1 \7)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* \4, i8\5* \6, i8 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* \4, i8\5* \6, i16 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* \4, i8\5* \6, i32 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* \4, i8\5* \6, i64 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* \4, i8\5* \6, i128 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* align \8 \4, i8\5* align \8 \6, i8 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* align \8 \4, i8\5* align \8 \6, i16 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* align \8 \4, i8\5* align \8 \6, i32 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* align \8 \4, i8\5* align \8 \6, i64 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* align \8 \4, i8\5* align \8 \6, i128 \7, i1 \9)~g
The remaining changes in the series will:
Step 2) Expand the IRBuilder API to allow creation of memcpy/memmove with differing
source and dest alignments.
Step 3) Update Clang to use the new IRBuilder API.
Step 4) Update Polly to use the new IRBuilder API.
Step 5) Update LLVM passes that create memcpy/memmove calls to use the new IRBuilder API,
and those that use use MemIntrinsicInst::[get|set]Alignment() to use
getDestAlignment() and getSourceAlignment() instead.
Step 6) Remove the single-alignment IRBuilder API for memcpy/memmove, and the
MemIntrinsicInst::[get|set]Alignment() methods.
Reviewers: pete, hfinkel, lhames, reames, bollu
Reviewed By: reames
Subscribers: niosHD, reames, jholewinski, qcolombet, jfb, sanjoy, arsenm, dschuff, dylanmckay, mehdi_amini, sdardis, nemanjai, david2050, nhaehnle, javed.absar, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, asb, rbar, johnrusso, simoncook, jordy.potman.lists, apazos, sabuasal, llvm-commits
Differential Revision: https://reviews.llvm.org/D41675
llvm-svn: 322965
2018-01-19 18:13:12 +01:00
|
|
|
#include "llvm/IR/IntrinsicInst.h"
|
2013-01-02 12:36:10 +01:00
|
|
|
#include "llvm/IR/Intrinsics.h"
|
|
|
|
#include "llvm/IR/LLVMContext.h"
|
Reapply "[IRBuilder] Virtualize IRBuilder"
Relative to the original commit, this fixes some warnings,
and is based on the deletion of the IRBuilder copy constructor
in D74693. The automatic copy constructor would no longer be
safe.
-----
Related llvm-dev thread:
http://lists.llvm.org/pipermail/llvm-dev/2020-February/138951.html
This patch moves the IRBuilder from templating over the constant
folder and inserter towards making both of these virtual.
There are a couple of motivations for this:
1. It's not possible to share code between use-sites that use
different IRBuilder folders/inserters (short of templating the code
and moving it into headers).
2. Methods currently defined on IRBuilderBase (which is not templated)
do not use the custom inserter, resulting in subtle bugs (e.g.
incorrect InstCombine worklist management). It would be possible to
move those into the templated IRBuilder, but...
3. The vast majority of the IRBuilder implementation has to live
in the header, because it depends on the template arguments.
4. We have many unnecessary dependencies on IRBuilder.h,
because it is not easy to forward-declare. (Significant parts of
the backend depend on it via TargetLowering.h, for example.)
This patch addresses the issue by making the following changes:
* IRBuilderDefaultInserter::InsertHelper becomes virtual.
IRBuilderBase accepts a reference to it.
* IRBuilderFolder is introduced as a virtual base class. It is
implemented by ConstantFolder (default), NoFolder and TargetFolder.
IRBuilderBase has a reference to this as well.
* All the logic is moved from IRBuilder to IRBuilderBase. This means
that methods can in the future replace their IRBuilder<> & uses
(or other specific IRBuilder types) with IRBuilderBase & and thus
be usable with different IRBuilders.
* The IRBuilder class is now a thin wrapper around IRBuilderBase.
Essentially it only stores the folder and inserter and takes care
of constructing the base builder.
What this patch doesn't do, but should be simple followups after this change:
* Fixing use of the inserter for creation methods originally defined
on IRBuilderBase.
* Replacing IRBuilder<> uses in arguments with IRBuilderBase, where useful.
* Moving code from the IRBuilder header to the source file.
From the user perspective, these changes should be mostly transparent:
The only thing that consumers using a custom inserted may need to do is
inherit from IRBuilderDefaultInserter publicly and mark their InsertHelper
as public.
Differential Revision: https://reviews.llvm.org/D73835
2020-02-16 17:10:09 +01:00
|
|
|
#include "llvm/IR/NoFolder.h"
|
2020-06-25 13:48:14 +02:00
|
|
|
#include "llvm/IR/Operator.h"
|
Extend the statepoint intrinsic to allow statepoints to be marked as transitions from GC-aware code to code that is not GC-aware.
This changes the shape of the statepoint intrinsic from:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 unused, ...call args, i32 # deopt args, ...deopt args, ...gc args)
to:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 flags, ...call args, i32 # transition args, ...transition args, i32 # deopt args, ...deopt args, ...gc args)
This extension offers the backend the opportunity to insert (somewhat) arbitrary code to manage the transition from GC-aware code to code that is not GC-aware and back.
In order to support the injection of transition code, this extension wraps the STATEPOINT ISD node generated by the usual lowering lowering with two additional nodes: GC_TRANSITION_START and GC_TRANSITION_END. The transition arguments that were passed passed to the intrinsic (if any) are lowered and provided as operands to these nodes and may be used by the backend during code generation.
Eventually, the lowering of the GC_TRANSITION_{START,END} nodes should be informed by the GC strategy in use for the function containing the intrinsic call; for now, these nodes are instead replaced with no-ops.
Differential Revision: http://reviews.llvm.org/D9501
llvm-svn: 236888
2015-05-08 20:07:42 +02:00
|
|
|
#include "llvm/IR/Statepoint.h"
|
2018-03-30 02:47:31 +02:00
|
|
|
#include "llvm/IR/Type.h"
|
|
|
|
#include "llvm/IR/Value.h"
|
|
|
|
#include "llvm/Support/Casting.h"
|
|
|
|
#include "llvm/Support/MathExtras.h"
|
|
|
|
#include <cassert>
|
|
|
|
#include <cstdint>
|
|
|
|
#include <vector>
|
|
|
|
|
2009-12-28 22:28:46 +01:00
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
/// CreateGlobalString - Make a new global variable with an initializer that
|
2010-02-10 21:04:19 +01:00
|
|
|
/// has array of i8 type filled in with the nul terminated string value
|
2009-12-28 22:28:46 +01:00
|
|
|
/// specified. If Name is specified, it is the name of the global variable
|
|
|
|
/// created.
|
2015-04-03 23:33:42 +02:00
|
|
|
GlobalVariable *IRBuilderBase::CreateGlobalString(StringRef Str,
|
2015-06-19 04:12:07 +02:00
|
|
|
const Twine &Name,
|
2020-05-28 16:41:01 +02:00
|
|
|
unsigned AddressSpace,
|
|
|
|
Module *M) {
|
2012-02-05 03:29:43 +01:00
|
|
|
Constant *StrConstant = ConstantDataArray::getString(Context, Str);
|
2020-05-28 16:41:01 +02:00
|
|
|
if (!M)
|
|
|
|
M = BB->getParent()->getParent();
|
|
|
|
auto *GV = new GlobalVariable(
|
|
|
|
*M, StrConstant->getType(), true, GlobalValue::PrivateLinkage,
|
|
|
|
StrConstant, Name, nullptr, GlobalVariable::NotThreadLocal, AddressSpace);
|
2016-06-14 23:01:22 +02:00
|
|
|
GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
|
[Alignment][NFC] Deprecate Align::None()
Summary:
This is a follow up on https://reviews.llvm.org/D71473#inline-647262.
There's a caveat here that `Align(1)` relies on the compiler understanding of `Log2_64` implementation to produce good code. One could use `Align()` as a replacement but I believe it is less clear that the alignment is one in that case.
Reviewers: xbolva00, courbet, bollu
Subscribers: arsenm, dylanmckay, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, jrtc27, atanasyan, jsji, Jim, kerbowa, cfe-commits, llvm-commits
Tags: #clang, #llvm
Differential Revision: https://reviews.llvm.org/D73099
2020-01-21 15:00:04 +01:00
|
|
|
GV->setAlignment(Align(1));
|
2009-12-28 22:28:46 +01:00
|
|
|
return GV;
|
|
|
|
}
|
2009-12-28 22:45:40 +01:00
|
|
|
|
2011-07-12 06:14:22 +02:00
|
|
|
Type *IRBuilderBase::getCurrentFunctionReturnType() const {
|
2009-12-28 22:50:56 +01:00
|
|
|
assert(BB && BB->getParent() && "No current function!");
|
|
|
|
return BB->getParent()->getReturnType();
|
|
|
|
}
|
2010-12-26 23:49:25 +01:00
|
|
|
|
|
|
|
Value *IRBuilderBase::getCastedInt8PtrValue(Value *Ptr) {
|
2018-03-30 02:47:31 +02:00
|
|
|
auto *PT = cast<PointerType>(Ptr->getType());
|
2021-06-27 15:52:24 +02:00
|
|
|
if (PT->isOpaqueOrPointeeTypeMatches(getInt8Ty()))
|
2010-12-26 23:49:25 +01:00
|
|
|
return Ptr;
|
2018-07-03 14:39:52 +02:00
|
|
|
|
2010-12-26 23:49:25 +01:00
|
|
|
// Otherwise, we need to insert a bitcast.
|
2020-02-18 20:35:16 +01:00
|
|
|
return CreateBitCast(Ptr, getInt8PtrTy(PT->getAddressSpace()));
|
2010-12-26 23:49:25 +01:00
|
|
|
}
|
|
|
|
|
2019-02-01 21:43:25 +01:00
|
|
|
static CallInst *createCallHelper(Function *Callee, ArrayRef<Value *> Ops,
|
2014-12-30 06:55:58 +01:00
|
|
|
IRBuilderBase *Builder,
|
2018-02-23 22:16:12 +01:00
|
|
|
const Twine &Name = "",
|
2020-09-12 13:36:45 +02:00
|
|
|
Instruction *FMFSource = nullptr,
|
|
|
|
ArrayRef<OperandBundleDef> OpBundles = {}) {
|
|
|
|
CallInst *CI = Builder->CreateCall(Callee, Ops, OpBundles, Name);
|
2018-02-23 22:16:12 +01:00
|
|
|
if (FMFSource)
|
|
|
|
CI->copyFastMathFlags(FMFSource);
|
2018-07-03 14:39:52 +02:00
|
|
|
return CI;
|
2010-12-26 23:49:25 +01:00
|
|
|
}
|
|
|
|
|
2020-12-08 15:20:04 +01:00
|
|
|
Value *IRBuilderBase::CreateVScale(Constant *Scaling, const Twine &Name) {
|
|
|
|
assert(isa<ConstantInt>(Scaling) && "Expected constant integer");
|
2021-04-19 15:56:35 +02:00
|
|
|
if (cast<ConstantInt>(Scaling)->isZero())
|
|
|
|
return Scaling;
|
|
|
|
Module *M = GetInsertBlock()->getParent()->getParent();
|
2020-12-08 15:20:04 +01:00
|
|
|
Function *TheFn =
|
|
|
|
Intrinsic::getDeclaration(M, Intrinsic::vscale, {Scaling->getType()});
|
|
|
|
CallInst *CI = createCallHelper(TheFn, {}, this, Name);
|
|
|
|
return cast<ConstantInt>(Scaling)->getSExtValue() == 1
|
|
|
|
? CI
|
|
|
|
: CreateMul(CI, Scaling);
|
|
|
|
}
|
|
|
|
|
[IR][SVE] Add new llvm.experimental.stepvector intrinsic
This patch adds a new llvm.experimental.stepvector intrinsic,
which takes no arguments and returns a linear integer sequence of
values of the form <0, 1, ...>. It is primarily intended for
scalable vectors, although it will work for fixed width vectors
too. It is intended that later patches will make use of this
new intrinsic when vectorising induction variables, currently only
supported for fixed width. I've added a new CreateStepVector
method to the IRBuilder, which will generate a call to this
intrinsic for scalable vectors and fall back on creating a
ConstantVector for fixed width.
For scalable vectors this intrinsic is lowered to a new ISD node
called STEP_VECTOR, which takes a single constant integer argument
as the step. During lowering this argument is set to a value of 1.
The reason for this additional argument at the codegen level is
because in future patches we will introduce various generic DAG
combines such as
mul step_vector(1), 2 -> step_vector(2)
add step_vector(1), step_vector(1) -> step_vector(2)
shl step_vector(1), 1 -> step_vector(2)
etc.
that encourage a canonical format for all targets. This hopefully
means all other targets supporting scalable vectors can benefit
from this too.
I've added cost model tests for both fixed width and scalable
vectors:
llvm/test/Analysis/CostModel/AArch64/neon-stepvector.ll
llvm/test/Analysis/CostModel/AArch64/sve-stepvector.ll
as well as codegen lowering tests for fixed width and scalable
vectors:
llvm/test/CodeGen/AArch64/neon-stepvector.ll
llvm/test/CodeGen/AArch64/sve-stepvector.ll
See this thread for discussion of the intrinsic:
https://lists.llvm.org/pipermail/llvm-dev/2021-January/147943.html
2021-02-08 16:46:24 +01:00
|
|
|
Value *IRBuilderBase::CreateStepVector(Type *DstType, const Twine &Name) {
|
|
|
|
if (isa<ScalableVectorType>(DstType))
|
|
|
|
return CreateIntrinsic(Intrinsic::experimental_stepvector, {DstType}, {},
|
|
|
|
nullptr, Name);
|
|
|
|
|
|
|
|
Type *STy = DstType->getScalarType();
|
|
|
|
unsigned NumEls = cast<FixedVectorType>(DstType)->getNumElements();
|
|
|
|
|
|
|
|
// Create a vector of consecutive numbers from zero to VF.
|
|
|
|
SmallVector<Constant *, 8> Indices;
|
|
|
|
for (unsigned i = 0; i < NumEls; ++i)
|
|
|
|
Indices.push_back(ConstantInt::get(STy, i));
|
|
|
|
|
|
|
|
// Add the consecutive indices to the vector value.
|
|
|
|
return ConstantVector::get(Indices);
|
|
|
|
}
|
|
|
|
|
2019-12-09 17:36:50 +01:00
|
|
|
CallInst *IRBuilderBase::CreateMemSet(Value *Ptr, Value *Val, Value *Size,
|
|
|
|
MaybeAlign Align, bool isVolatile,
|
|
|
|
MDNode *TBAATag, MDNode *ScopeTag,
|
|
|
|
MDNode *NoAliasTag) {
|
2010-12-26 23:49:25 +01:00
|
|
|
Ptr = getCastedInt8PtrValue(Ptr);
|
Remove alignment argument from memcpy/memmove/memset in favour of alignment attributes (Step 1)
Summary:
This is a resurrection of work first proposed and discussed in Aug 2015:
http://lists.llvm.org/pipermail/llvm-dev/2015-August/089384.html
and initially landed (but then backed out) in Nov 2015:
http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
The @llvm.memcpy/memmove/memset intrinsics currently have an explicit argument
which is required to be a constant integer. It represents the alignment of the
dest (and source), and so must be the minimum of the actual alignment of the
two.
This change is the first in a series that allows source and dest to each
have their own alignments by using the alignment attribute on their arguments.
In this change we:
1) Remove the alignment argument.
2) Add alignment attributes to the source & dest arguments. We, temporarily,
require that the alignments for source & dest be equal.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 100, i32 4, i1 false)
will now read
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 100, i1 false)
Downstream users may have to update their lit tests that check for
@llvm.memcpy/memmove/memset call/declaration patterns. The following extended sed script
may help with updating the majority of your tests, but it does not catch all possible
patterns so some manual checking and updating will be required.
s~declare void @llvm\.mem(set|cpy|move)\.p([^(]*)\((.*), i32, i1\)~declare void @llvm.mem\1.p\2(\3, i1)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* \3, i8 \4, i8 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* \3, i8 \4, i16 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* \3, i8 \4, i32 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* \3, i8 \4, i64 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* \3, i8 \4, i128 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* align \6 \3, i8 \4, i8 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* align \6 \3, i8 \4, i16 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* align \6 \3, i8 \4, i32 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* align \6 \3, i8 \4, i64 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* align \6 \3, i8 \4, i128 \5, i1 \7)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* \4, i8\5* \6, i8 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* \4, i8\5* \6, i16 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* \4, i8\5* \6, i32 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* \4, i8\5* \6, i64 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* \4, i8\5* \6, i128 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* align \8 \4, i8\5* align \8 \6, i8 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* align \8 \4, i8\5* align \8 \6, i16 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* align \8 \4, i8\5* align \8 \6, i32 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* align \8 \4, i8\5* align \8 \6, i64 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* align \8 \4, i8\5* align \8 \6, i128 \7, i1 \9)~g
The remaining changes in the series will:
Step 2) Expand the IRBuilder API to allow creation of memcpy/memmove with differing
source and dest alignments.
Step 3) Update Clang to use the new IRBuilder API.
Step 4) Update Polly to use the new IRBuilder API.
Step 5) Update LLVM passes that create memcpy/memmove calls to use the new IRBuilder API,
and those that use use MemIntrinsicInst::[get|set]Alignment() to use
getDestAlignment() and getSourceAlignment() instead.
Step 6) Remove the single-alignment IRBuilder API for memcpy/memmove, and the
MemIntrinsicInst::[get|set]Alignment() methods.
Reviewers: pete, hfinkel, lhames, reames, bollu
Reviewed By: reames
Subscribers: niosHD, reames, jholewinski, qcolombet, jfb, sanjoy, arsenm, dschuff, dylanmckay, mehdi_amini, sdardis, nemanjai, david2050, nhaehnle, javed.absar, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, asb, rbar, johnrusso, simoncook, jordy.potman.lists, apazos, sabuasal, llvm-commits
Differential Revision: https://reviews.llvm.org/D41675
llvm-svn: 322965
2018-01-19 18:13:12 +01:00
|
|
|
Value *Ops[] = {Ptr, Val, Size, getInt1(isVolatile)};
|
2011-07-12 16:06:48 +02:00
|
|
|
Type *Tys[] = { Ptr->getType(), Size->getType() };
|
2010-12-26 23:49:25 +01:00
|
|
|
Module *M = BB->getParent()->getParent();
|
2019-02-01 21:43:25 +01:00
|
|
|
Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
|
2018-07-03 14:39:52 +02:00
|
|
|
|
2011-07-15 10:37:34 +02:00
|
|
|
CallInst *CI = createCallHelper(TheFn, Ops, this);
|
Remove alignment argument from memcpy/memmove/memset in favour of alignment attributes (Step 1)
Summary:
This is a resurrection of work first proposed and discussed in Aug 2015:
http://lists.llvm.org/pipermail/llvm-dev/2015-August/089384.html
and initially landed (but then backed out) in Nov 2015:
http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
The @llvm.memcpy/memmove/memset intrinsics currently have an explicit argument
which is required to be a constant integer. It represents the alignment of the
dest (and source), and so must be the minimum of the actual alignment of the
two.
This change is the first in a series that allows source and dest to each
have their own alignments by using the alignment attribute on their arguments.
In this change we:
1) Remove the alignment argument.
2) Add alignment attributes to the source & dest arguments. We, temporarily,
require that the alignments for source & dest be equal.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 100, i32 4, i1 false)
will now read
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 100, i1 false)
Downstream users may have to update their lit tests that check for
@llvm.memcpy/memmove/memset call/declaration patterns. The following extended sed script
may help with updating the majority of your tests, but it does not catch all possible
patterns so some manual checking and updating will be required.
s~declare void @llvm\.mem(set|cpy|move)\.p([^(]*)\((.*), i32, i1\)~declare void @llvm.mem\1.p\2(\3, i1)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* \3, i8 \4, i8 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* \3, i8 \4, i16 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* \3, i8 \4, i32 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* \3, i8 \4, i64 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* \3, i8 \4, i128 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* align \6 \3, i8 \4, i8 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* align \6 \3, i8 \4, i16 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* align \6 \3, i8 \4, i32 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* align \6 \3, i8 \4, i64 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* align \6 \3, i8 \4, i128 \5, i1 \7)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* \4, i8\5* \6, i8 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* \4, i8\5* \6, i16 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* \4, i8\5* \6, i32 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* \4, i8\5* \6, i64 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* \4, i8\5* \6, i128 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* align \8 \4, i8\5* align \8 \6, i8 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* align \8 \4, i8\5* align \8 \6, i16 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* align \8 \4, i8\5* align \8 \6, i32 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* align \8 \4, i8\5* align \8 \6, i64 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* align \8 \4, i8\5* align \8 \6, i128 \7, i1 \9)~g
The remaining changes in the series will:
Step 2) Expand the IRBuilder API to allow creation of memcpy/memmove with differing
source and dest alignments.
Step 3) Update Clang to use the new IRBuilder API.
Step 4) Update Polly to use the new IRBuilder API.
Step 5) Update LLVM passes that create memcpy/memmove calls to use the new IRBuilder API,
and those that use use MemIntrinsicInst::[get|set]Alignment() to use
getDestAlignment() and getSourceAlignment() instead.
Step 6) Remove the single-alignment IRBuilder API for memcpy/memmove, and the
MemIntrinsicInst::[get|set]Alignment() methods.
Reviewers: pete, hfinkel, lhames, reames, bollu
Reviewed By: reames
Subscribers: niosHD, reames, jholewinski, qcolombet, jfb, sanjoy, arsenm, dschuff, dylanmckay, mehdi_amini, sdardis, nemanjai, david2050, nhaehnle, javed.absar, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, asb, rbar, johnrusso, simoncook, jordy.potman.lists, apazos, sabuasal, llvm-commits
Differential Revision: https://reviews.llvm.org/D41675
llvm-svn: 322965
2018-01-19 18:13:12 +01:00
|
|
|
|
2019-12-09 17:36:50 +01:00
|
|
|
if (Align)
|
|
|
|
cast<MemSetInst>(CI)->setDestAlignment(Align->value());
|
Remove alignment argument from memcpy/memmove/memset in favour of alignment attributes (Step 1)
Summary:
This is a resurrection of work first proposed and discussed in Aug 2015:
http://lists.llvm.org/pipermail/llvm-dev/2015-August/089384.html
and initially landed (but then backed out) in Nov 2015:
http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
The @llvm.memcpy/memmove/memset intrinsics currently have an explicit argument
which is required to be a constant integer. It represents the alignment of the
dest (and source), and so must be the minimum of the actual alignment of the
two.
This change is the first in a series that allows source and dest to each
have their own alignments by using the alignment attribute on their arguments.
In this change we:
1) Remove the alignment argument.
2) Add alignment attributes to the source & dest arguments. We, temporarily,
require that the alignments for source & dest be equal.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 100, i32 4, i1 false)
will now read
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 100, i1 false)
Downstream users may have to update their lit tests that check for
@llvm.memcpy/memmove/memset call/declaration patterns. The following extended sed script
may help with updating the majority of your tests, but it does not catch all possible
patterns so some manual checking and updating will be required.
s~declare void @llvm\.mem(set|cpy|move)\.p([^(]*)\((.*), i32, i1\)~declare void @llvm.mem\1.p\2(\3, i1)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* \3, i8 \4, i8 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* \3, i8 \4, i16 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* \3, i8 \4, i32 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* \3, i8 \4, i64 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* \3, i8 \4, i128 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* align \6 \3, i8 \4, i8 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* align \6 \3, i8 \4, i16 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* align \6 \3, i8 \4, i32 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* align \6 \3, i8 \4, i64 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* align \6 \3, i8 \4, i128 \5, i1 \7)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* \4, i8\5* \6, i8 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* \4, i8\5* \6, i16 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* \4, i8\5* \6, i32 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* \4, i8\5* \6, i64 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* \4, i8\5* \6, i128 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* align \8 \4, i8\5* align \8 \6, i8 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* align \8 \4, i8\5* align \8 \6, i16 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* align \8 \4, i8\5* align \8 \6, i32 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* align \8 \4, i8\5* align \8 \6, i64 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* align \8 \4, i8\5* align \8 \6, i128 \7, i1 \9)~g
The remaining changes in the series will:
Step 2) Expand the IRBuilder API to allow creation of memcpy/memmove with differing
source and dest alignments.
Step 3) Update Clang to use the new IRBuilder API.
Step 4) Update Polly to use the new IRBuilder API.
Step 5) Update LLVM passes that create memcpy/memmove calls to use the new IRBuilder API,
and those that use use MemIntrinsicInst::[get|set]Alignment() to use
getDestAlignment() and getSourceAlignment() instead.
Step 6) Remove the single-alignment IRBuilder API for memcpy/memmove, and the
MemIntrinsicInst::[get|set]Alignment() methods.
Reviewers: pete, hfinkel, lhames, reames, bollu
Reviewed By: reames
Subscribers: niosHD, reames, jholewinski, qcolombet, jfb, sanjoy, arsenm, dschuff, dylanmckay, mehdi_amini, sdardis, nemanjai, david2050, nhaehnle, javed.absar, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, asb, rbar, johnrusso, simoncook, jordy.potman.lists, apazos, sabuasal, llvm-commits
Differential Revision: https://reviews.llvm.org/D41675
llvm-svn: 322965
2018-01-19 18:13:12 +01:00
|
|
|
|
2010-12-26 23:49:25 +01:00
|
|
|
// Set the TBAA info if present.
|
|
|
|
if (TBAATag)
|
|
|
|
CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
|
Add scoped-noalias metadata
This commit adds scoped noalias metadata. The primary motivations for this
feature are:
1. To preserve noalias function attribute information when inlining
2. To provide the ability to model block-scope C99 restrict pointers
Neither of these two abilities are added here, only the necessary
infrastructure. In fact, there should be no change to existing functionality,
only the addition of new features. The logic that converts noalias function
parameters into this metadata during inlining will come in a follow-up commit.
What is added here is the ability to generally specify noalias memory-access
sets. Regarding the metadata, alias-analysis scopes are defined similar to TBAA
nodes:
!scope0 = metadata !{ metadata !"scope of foo()" }
!scope1 = metadata !{ metadata !"scope 1", metadata !scope0 }
!scope2 = metadata !{ metadata !"scope 2", metadata !scope0 }
!scope3 = metadata !{ metadata !"scope 2.1", metadata !scope2 }
!scope4 = metadata !{ metadata !"scope 2.2", metadata !scope2 }
Loads and stores can be tagged with an alias-analysis scope, and also, with a
noalias tag for a specific scope:
... = load %ptr1, !alias.scope !{ !scope1 }
... = load %ptr2, !alias.scope !{ !scope1, !scope2 }, !noalias !{ !scope1 }
When evaluating an aliasing query, if one of the instructions is associated
with an alias.scope id that is identical to the noalias scope associated with
the other instruction, or is a descendant (in the scope hierarchy) of the
noalias scope associated with the other instruction, then the two memory
accesses are assumed not to alias.
Note that is the first element of the scope metadata is a string, then it can
be combined accross functions and translation units. The string can be replaced
by a self-reference to create globally unqiue scope identifiers.
[Note: This overview is slightly stylized, since the metadata nodes really need
to just be numbers (!0 instead of !scope0), and the scope lists are also global
unnamed metadata.]
Existing noalias metadata in a callee is "cloned" for use by the inlined code.
This is necessary because the aliasing scopes are unique to each call site
(because of possible control dependencies on the aliasing properties). For
example, consider a function: foo(noalias a, noalias b) { *a = *b; } that gets
inlined into bar() { ... if (...) foo(a1, b1); ... if (...) foo(a2, b2); } --
now just because we know that a1 does not alias with b1 at the first call site,
and a2 does not alias with b2 at the second call site, we cannot let inlining
these functons have the metadata imply that a1 does not alias with b2.
llvm-svn: 213864
2014-07-24 16:25:39 +02:00
|
|
|
|
|
|
|
if (ScopeTag)
|
|
|
|
CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
|
2018-07-03 14:39:52 +02:00
|
|
|
|
Add scoped-noalias metadata
This commit adds scoped noalias metadata. The primary motivations for this
feature are:
1. To preserve noalias function attribute information when inlining
2. To provide the ability to model block-scope C99 restrict pointers
Neither of these two abilities are added here, only the necessary
infrastructure. In fact, there should be no change to existing functionality,
only the addition of new features. The logic that converts noalias function
parameters into this metadata during inlining will come in a follow-up commit.
What is added here is the ability to generally specify noalias memory-access
sets. Regarding the metadata, alias-analysis scopes are defined similar to TBAA
nodes:
!scope0 = metadata !{ metadata !"scope of foo()" }
!scope1 = metadata !{ metadata !"scope 1", metadata !scope0 }
!scope2 = metadata !{ metadata !"scope 2", metadata !scope0 }
!scope3 = metadata !{ metadata !"scope 2.1", metadata !scope2 }
!scope4 = metadata !{ metadata !"scope 2.2", metadata !scope2 }
Loads and stores can be tagged with an alias-analysis scope, and also, with a
noalias tag for a specific scope:
... = load %ptr1, !alias.scope !{ !scope1 }
... = load %ptr2, !alias.scope !{ !scope1, !scope2 }, !noalias !{ !scope1 }
When evaluating an aliasing query, if one of the instructions is associated
with an alias.scope id that is identical to the noalias scope associated with
the other instruction, or is a descendant (in the scope hierarchy) of the
noalias scope associated with the other instruction, then the two memory
accesses are assumed not to alias.
Note that is the first element of the scope metadata is a string, then it can
be combined accross functions and translation units. The string can be replaced
by a self-reference to create globally unqiue scope identifiers.
[Note: This overview is slightly stylized, since the metadata nodes really need
to just be numbers (!0 instead of !scope0), and the scope lists are also global
unnamed metadata.]
Existing noalias metadata in a callee is "cloned" for use by the inlined code.
This is necessary because the aliasing scopes are unique to each call site
(because of possible control dependencies on the aliasing properties). For
example, consider a function: foo(noalias a, noalias b) { *a = *b; } that gets
inlined into bar() { ... if (...) foo(a1, b1); ... if (...) foo(a2, b2); } --
now just because we know that a1 does not alias with b1 at the first call site,
and a2 does not alias with b2 at the second call site, we cannot let inlining
these functons have the metadata imply that a1 does not alias with b2.
llvm-svn: 213864
2014-07-24 16:25:39 +02:00
|
|
|
if (NoAliasTag)
|
|
|
|
CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
|
Remove alignment argument from memcpy/memmove/memset in favour of alignment attributes (Step 1)
Summary:
This is a resurrection of work first proposed and discussed in Aug 2015:
http://lists.llvm.org/pipermail/llvm-dev/2015-August/089384.html
and initially landed (but then backed out) in Nov 2015:
http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
The @llvm.memcpy/memmove/memset intrinsics currently have an explicit argument
which is required to be a constant integer. It represents the alignment of the
dest (and source), and so must be the minimum of the actual alignment of the
two.
This change is the first in a series that allows source and dest to each
have their own alignments by using the alignment attribute on their arguments.
In this change we:
1) Remove the alignment argument.
2) Add alignment attributes to the source & dest arguments. We, temporarily,
require that the alignments for source & dest be equal.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 100, i32 4, i1 false)
will now read
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 100, i1 false)
Downstream users may have to update their lit tests that check for
@llvm.memcpy/memmove/memset call/declaration patterns. The following extended sed script
may help with updating the majority of your tests, but it does not catch all possible
patterns so some manual checking and updating will be required.
s~declare void @llvm\.mem(set|cpy|move)\.p([^(]*)\((.*), i32, i1\)~declare void @llvm.mem\1.p\2(\3, i1)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* \3, i8 \4, i8 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* \3, i8 \4, i16 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* \3, i8 \4, i32 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* \3, i8 \4, i64 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* \3, i8 \4, i128 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* align \6 \3, i8 \4, i8 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* align \6 \3, i8 \4, i16 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* align \6 \3, i8 \4, i32 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* align \6 \3, i8 \4, i64 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* align \6 \3, i8 \4, i128 \5, i1 \7)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* \4, i8\5* \6, i8 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* \4, i8\5* \6, i16 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* \4, i8\5* \6, i32 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* \4, i8\5* \6, i64 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* \4, i8\5* \6, i128 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* align \8 \4, i8\5* align \8 \6, i8 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* align \8 \4, i8\5* align \8 \6, i16 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* align \8 \4, i8\5* align \8 \6, i32 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* align \8 \4, i8\5* align \8 \6, i64 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* align \8 \4, i8\5* align \8 \6, i128 \7, i1 \9)~g
The remaining changes in the series will:
Step 2) Expand the IRBuilder API to allow creation of memcpy/memmove with differing
source and dest alignments.
Step 3) Update Clang to use the new IRBuilder API.
Step 4) Update Polly to use the new IRBuilder API.
Step 5) Update LLVM passes that create memcpy/memmove calls to use the new IRBuilder API,
and those that use use MemIntrinsicInst::[get|set]Alignment() to use
getDestAlignment() and getSourceAlignment() instead.
Step 6) Remove the single-alignment IRBuilder API for memcpy/memmove, and the
MemIntrinsicInst::[get|set]Alignment() methods.
Reviewers: pete, hfinkel, lhames, reames, bollu
Reviewed By: reames
Subscribers: niosHD, reames, jholewinski, qcolombet, jfb, sanjoy, arsenm, dschuff, dylanmckay, mehdi_amini, sdardis, nemanjai, david2050, nhaehnle, javed.absar, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, asb, rbar, johnrusso, simoncook, jordy.potman.lists, apazos, sabuasal, llvm-commits
Differential Revision: https://reviews.llvm.org/D41675
llvm-svn: 322965
2018-01-19 18:13:12 +01:00
|
|
|
|
2010-12-26 23:49:25 +01:00
|
|
|
return CI;
|
|
|
|
}
|
|
|
|
|
2018-05-30 22:02:56 +02:00
|
|
|
CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemSet(
|
2019-12-19 15:41:05 +01:00
|
|
|
Value *Ptr, Value *Val, Value *Size, Align Alignment, uint32_t ElementSize,
|
2018-05-30 22:02:56 +02:00
|
|
|
MDNode *TBAATag, MDNode *ScopeTag, MDNode *NoAliasTag) {
|
|
|
|
|
|
|
|
Ptr = getCastedInt8PtrValue(Ptr);
|
|
|
|
Value *Ops[] = {Ptr, Val, Size, getInt32(ElementSize)};
|
|
|
|
Type *Tys[] = {Ptr->getType(), Size->getType()};
|
|
|
|
Module *M = BB->getParent()->getParent();
|
2019-02-01 21:43:25 +01:00
|
|
|
Function *TheFn = Intrinsic::getDeclaration(
|
2018-05-30 22:02:56 +02:00
|
|
|
M, Intrinsic::memset_element_unordered_atomic, Tys);
|
|
|
|
|
|
|
|
CallInst *CI = createCallHelper(TheFn, Ops, this);
|
|
|
|
|
2019-12-19 15:41:05 +01:00
|
|
|
cast<AtomicMemSetInst>(CI)->setDestAlignment(Alignment);
|
2018-05-30 22:02:56 +02:00
|
|
|
|
|
|
|
// Set the TBAA info if present.
|
|
|
|
if (TBAATag)
|
|
|
|
CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
|
|
|
|
|
|
|
|
if (ScopeTag)
|
|
|
|
CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
|
|
|
|
|
|
|
|
if (NoAliasTag)
|
|
|
|
CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
|
|
|
|
|
|
|
|
return CI;
|
|
|
|
}
|
|
|
|
|
2020-10-05 19:50:17 +02:00
|
|
|
CallInst *IRBuilderBase::CreateMemTransferInst(
|
|
|
|
Intrinsic::ID IntrID, Value *Dst, MaybeAlign DstAlign, Value *Src,
|
|
|
|
MaybeAlign SrcAlign, Value *Size, bool isVolatile, MDNode *TBAATag,
|
|
|
|
MDNode *TBAAStructTag, MDNode *ScopeTag, MDNode *NoAliasTag) {
|
2010-12-26 23:49:25 +01:00
|
|
|
Dst = getCastedInt8PtrValue(Dst);
|
|
|
|
Src = getCastedInt8PtrValue(Src);
|
|
|
|
|
Remove alignment argument from memcpy/memmove/memset in favour of alignment attributes (Step 1)
Summary:
This is a resurrection of work first proposed and discussed in Aug 2015:
http://lists.llvm.org/pipermail/llvm-dev/2015-August/089384.html
and initially landed (but then backed out) in Nov 2015:
http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
The @llvm.memcpy/memmove/memset intrinsics currently have an explicit argument
which is required to be a constant integer. It represents the alignment of the
dest (and source), and so must be the minimum of the actual alignment of the
two.
This change is the first in a series that allows source and dest to each
have their own alignments by using the alignment attribute on their arguments.
In this change we:
1) Remove the alignment argument.
2) Add alignment attributes to the source & dest arguments. We, temporarily,
require that the alignments for source & dest be equal.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 100, i32 4, i1 false)
will now read
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 100, i1 false)
Downstream users may have to update their lit tests that check for
@llvm.memcpy/memmove/memset call/declaration patterns. The following extended sed script
may help with updating the majority of your tests, but it does not catch all possible
patterns so some manual checking and updating will be required.
s~declare void @llvm\.mem(set|cpy|move)\.p([^(]*)\((.*), i32, i1\)~declare void @llvm.mem\1.p\2(\3, i1)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* \3, i8 \4, i8 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* \3, i8 \4, i16 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* \3, i8 \4, i32 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* \3, i8 \4, i64 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* \3, i8 \4, i128 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* align \6 \3, i8 \4, i8 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* align \6 \3, i8 \4, i16 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* align \6 \3, i8 \4, i32 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* align \6 \3, i8 \4, i64 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* align \6 \3, i8 \4, i128 \5, i1 \7)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* \4, i8\5* \6, i8 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* \4, i8\5* \6, i16 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* \4, i8\5* \6, i32 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* \4, i8\5* \6, i64 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* \4, i8\5* \6, i128 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* align \8 \4, i8\5* align \8 \6, i8 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* align \8 \4, i8\5* align \8 \6, i16 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* align \8 \4, i8\5* align \8 \6, i32 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* align \8 \4, i8\5* align \8 \6, i64 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* align \8 \4, i8\5* align \8 \6, i128 \7, i1 \9)~g
The remaining changes in the series will:
Step 2) Expand the IRBuilder API to allow creation of memcpy/memmove with differing
source and dest alignments.
Step 3) Update Clang to use the new IRBuilder API.
Step 4) Update Polly to use the new IRBuilder API.
Step 5) Update LLVM passes that create memcpy/memmove calls to use the new IRBuilder API,
and those that use use MemIntrinsicInst::[get|set]Alignment() to use
getDestAlignment() and getSourceAlignment() instead.
Step 6) Remove the single-alignment IRBuilder API for memcpy/memmove, and the
MemIntrinsicInst::[get|set]Alignment() methods.
Reviewers: pete, hfinkel, lhames, reames, bollu
Reviewed By: reames
Subscribers: niosHD, reames, jholewinski, qcolombet, jfb, sanjoy, arsenm, dschuff, dylanmckay, mehdi_amini, sdardis, nemanjai, david2050, nhaehnle, javed.absar, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, asb, rbar, johnrusso, simoncook, jordy.potman.lists, apazos, sabuasal, llvm-commits
Differential Revision: https://reviews.llvm.org/D41675
llvm-svn: 322965
2018-01-19 18:13:12 +01:00
|
|
|
Value *Ops[] = {Dst, Src, Size, getInt1(isVolatile)};
|
2011-07-12 16:06:48 +02:00
|
|
|
Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() };
|
2010-12-26 23:49:25 +01:00
|
|
|
Module *M = BB->getParent()->getParent();
|
2020-10-05 19:50:17 +02:00
|
|
|
Function *TheFn = Intrinsic::getDeclaration(M, IntrID, Tys);
|
2018-07-03 14:39:52 +02:00
|
|
|
|
2011-07-15 10:37:34 +02:00
|
|
|
CallInst *CI = createCallHelper(TheFn, Ops, this);
|
Remove alignment argument from memcpy/memmove/memset in favour of alignment attributes (Step 1)
Summary:
This is a resurrection of work first proposed and discussed in Aug 2015:
http://lists.llvm.org/pipermail/llvm-dev/2015-August/089384.html
and initially landed (but then backed out) in Nov 2015:
http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
The @llvm.memcpy/memmove/memset intrinsics currently have an explicit argument
which is required to be a constant integer. It represents the alignment of the
dest (and source), and so must be the minimum of the actual alignment of the
two.
This change is the first in a series that allows source and dest to each
have their own alignments by using the alignment attribute on their arguments.
In this change we:
1) Remove the alignment argument.
2) Add alignment attributes to the source & dest arguments. We, temporarily,
require that the alignments for source & dest be equal.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 100, i32 4, i1 false)
will now read
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 100, i1 false)
Downstream users may have to update their lit tests that check for
@llvm.memcpy/memmove/memset call/declaration patterns. The following extended sed script
may help with updating the majority of your tests, but it does not catch all possible
patterns so some manual checking and updating will be required.
s~declare void @llvm\.mem(set|cpy|move)\.p([^(]*)\((.*), i32, i1\)~declare void @llvm.mem\1.p\2(\3, i1)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* \3, i8 \4, i8 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* \3, i8 \4, i16 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* \3, i8 \4, i32 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* \3, i8 \4, i64 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* \3, i8 \4, i128 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* align \6 \3, i8 \4, i8 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* align \6 \3, i8 \4, i16 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* align \6 \3, i8 \4, i32 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* align \6 \3, i8 \4, i64 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* align \6 \3, i8 \4, i128 \5, i1 \7)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* \4, i8\5* \6, i8 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* \4, i8\5* \6, i16 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* \4, i8\5* \6, i32 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* \4, i8\5* \6, i64 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* \4, i8\5* \6, i128 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* align \8 \4, i8\5* align \8 \6, i8 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* align \8 \4, i8\5* align \8 \6, i16 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* align \8 \4, i8\5* align \8 \6, i32 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* align \8 \4, i8\5* align \8 \6, i64 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* align \8 \4, i8\5* align \8 \6, i128 \7, i1 \9)~g
The remaining changes in the series will:
Step 2) Expand the IRBuilder API to allow creation of memcpy/memmove with differing
source and dest alignments.
Step 3) Update Clang to use the new IRBuilder API.
Step 4) Update Polly to use the new IRBuilder API.
Step 5) Update LLVM passes that create memcpy/memmove calls to use the new IRBuilder API,
and those that use use MemIntrinsicInst::[get|set]Alignment() to use
getDestAlignment() and getSourceAlignment() instead.
Step 6) Remove the single-alignment IRBuilder API for memcpy/memmove, and the
MemIntrinsicInst::[get|set]Alignment() methods.
Reviewers: pete, hfinkel, lhames, reames, bollu
Reviewed By: reames
Subscribers: niosHD, reames, jholewinski, qcolombet, jfb, sanjoy, arsenm, dschuff, dylanmckay, mehdi_amini, sdardis, nemanjai, david2050, nhaehnle, javed.absar, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, asb, rbar, johnrusso, simoncook, jordy.potman.lists, apazos, sabuasal, llvm-commits
Differential Revision: https://reviews.llvm.org/D41675
llvm-svn: 322965
2018-01-19 18:13:12 +01:00
|
|
|
|
2020-10-05 19:50:17 +02:00
|
|
|
auto* MCI = cast<MemTransferInst>(CI);
|
2019-12-12 15:32:19 +01:00
|
|
|
if (DstAlign)
|
|
|
|
MCI->setDestAlignment(*DstAlign);
|
|
|
|
if (SrcAlign)
|
|
|
|
MCI->setSourceAlignment(*SrcAlign);
|
Remove alignment argument from memcpy/memmove/memset in favour of alignment attributes (Step 1)
Summary:
This is a resurrection of work first proposed and discussed in Aug 2015:
http://lists.llvm.org/pipermail/llvm-dev/2015-August/089384.html
and initially landed (but then backed out) in Nov 2015:
http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
The @llvm.memcpy/memmove/memset intrinsics currently have an explicit argument
which is required to be a constant integer. It represents the alignment of the
dest (and source), and so must be the minimum of the actual alignment of the
two.
This change is the first in a series that allows source and dest to each
have their own alignments by using the alignment attribute on their arguments.
In this change we:
1) Remove the alignment argument.
2) Add alignment attributes to the source & dest arguments. We, temporarily,
require that the alignments for source & dest be equal.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 100, i32 4, i1 false)
will now read
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 100, i1 false)
Downstream users may have to update their lit tests that check for
@llvm.memcpy/memmove/memset call/declaration patterns. The following extended sed script
may help with updating the majority of your tests, but it does not catch all possible
patterns so some manual checking and updating will be required.
s~declare void @llvm\.mem(set|cpy|move)\.p([^(]*)\((.*), i32, i1\)~declare void @llvm.mem\1.p\2(\3, i1)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* \3, i8 \4, i8 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* \3, i8 \4, i16 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* \3, i8 \4, i32 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* \3, i8 \4, i64 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* \3, i8 \4, i128 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* align \6 \3, i8 \4, i8 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* align \6 \3, i8 \4, i16 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* align \6 \3, i8 \4, i32 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* align \6 \3, i8 \4, i64 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* align \6 \3, i8 \4, i128 \5, i1 \7)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* \4, i8\5* \6, i8 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* \4, i8\5* \6, i16 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* \4, i8\5* \6, i32 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* \4, i8\5* \6, i64 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* \4, i8\5* \6, i128 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* align \8 \4, i8\5* align \8 \6, i8 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* align \8 \4, i8\5* align \8 \6, i16 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* align \8 \4, i8\5* align \8 \6, i32 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* align \8 \4, i8\5* align \8 \6, i64 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* align \8 \4, i8\5* align \8 \6, i128 \7, i1 \9)~g
The remaining changes in the series will:
Step 2) Expand the IRBuilder API to allow creation of memcpy/memmove with differing
source and dest alignments.
Step 3) Update Clang to use the new IRBuilder API.
Step 4) Update Polly to use the new IRBuilder API.
Step 5) Update LLVM passes that create memcpy/memmove calls to use the new IRBuilder API,
and those that use use MemIntrinsicInst::[get|set]Alignment() to use
getDestAlignment() and getSourceAlignment() instead.
Step 6) Remove the single-alignment IRBuilder API for memcpy/memmove, and the
MemIntrinsicInst::[get|set]Alignment() methods.
Reviewers: pete, hfinkel, lhames, reames, bollu
Reviewed By: reames
Subscribers: niosHD, reames, jholewinski, qcolombet, jfb, sanjoy, arsenm, dschuff, dylanmckay, mehdi_amini, sdardis, nemanjai, david2050, nhaehnle, javed.absar, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, asb, rbar, johnrusso, simoncook, jordy.potman.lists, apazos, sabuasal, llvm-commits
Differential Revision: https://reviews.llvm.org/D41675
llvm-svn: 322965
2018-01-19 18:13:12 +01:00
|
|
|
|
2010-12-26 23:49:25 +01:00
|
|
|
// Set the TBAA info if present.
|
|
|
|
if (TBAATag)
|
|
|
|
CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
|
2012-09-27 00:17:14 +02:00
|
|
|
|
|
|
|
// Set the TBAA Struct info if present.
|
|
|
|
if (TBAAStructTag)
|
|
|
|
CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
|
2018-07-03 14:39:52 +02:00
|
|
|
|
Add scoped-noalias metadata
This commit adds scoped noalias metadata. The primary motivations for this
feature are:
1. To preserve noalias function attribute information when inlining
2. To provide the ability to model block-scope C99 restrict pointers
Neither of these two abilities are added here, only the necessary
infrastructure. In fact, there should be no change to existing functionality,
only the addition of new features. The logic that converts noalias function
parameters into this metadata during inlining will come in a follow-up commit.
What is added here is the ability to generally specify noalias memory-access
sets. Regarding the metadata, alias-analysis scopes are defined similar to TBAA
nodes:
!scope0 = metadata !{ metadata !"scope of foo()" }
!scope1 = metadata !{ metadata !"scope 1", metadata !scope0 }
!scope2 = metadata !{ metadata !"scope 2", metadata !scope0 }
!scope3 = metadata !{ metadata !"scope 2.1", metadata !scope2 }
!scope4 = metadata !{ metadata !"scope 2.2", metadata !scope2 }
Loads and stores can be tagged with an alias-analysis scope, and also, with a
noalias tag for a specific scope:
... = load %ptr1, !alias.scope !{ !scope1 }
... = load %ptr2, !alias.scope !{ !scope1, !scope2 }, !noalias !{ !scope1 }
When evaluating an aliasing query, if one of the instructions is associated
with an alias.scope id that is identical to the noalias scope associated with
the other instruction, or is a descendant (in the scope hierarchy) of the
noalias scope associated with the other instruction, then the two memory
accesses are assumed not to alias.
Note that is the first element of the scope metadata is a string, then it can
be combined accross functions and translation units. The string can be replaced
by a self-reference to create globally unqiue scope identifiers.
[Note: This overview is slightly stylized, since the metadata nodes really need
to just be numbers (!0 instead of !scope0), and the scope lists are also global
unnamed metadata.]
Existing noalias metadata in a callee is "cloned" for use by the inlined code.
This is necessary because the aliasing scopes are unique to each call site
(because of possible control dependencies on the aliasing properties). For
example, consider a function: foo(noalias a, noalias b) { *a = *b; } that gets
inlined into bar() { ... if (...) foo(a1, b1); ... if (...) foo(a2, b2); } --
now just because we know that a1 does not alias with b1 at the first call site,
and a2 does not alias with b2 at the second call site, we cannot let inlining
these functons have the metadata imply that a1 does not alias with b2.
llvm-svn: 213864
2014-07-24 16:25:39 +02:00
|
|
|
if (ScopeTag)
|
|
|
|
CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
|
2018-07-03 14:39:52 +02:00
|
|
|
|
Add scoped-noalias metadata
This commit adds scoped noalias metadata. The primary motivations for this
feature are:
1. To preserve noalias function attribute information when inlining
2. To provide the ability to model block-scope C99 restrict pointers
Neither of these two abilities are added here, only the necessary
infrastructure. In fact, there should be no change to existing functionality,
only the addition of new features. The logic that converts noalias function
parameters into this metadata during inlining will come in a follow-up commit.
What is added here is the ability to generally specify noalias memory-access
sets. Regarding the metadata, alias-analysis scopes are defined similar to TBAA
nodes:
!scope0 = metadata !{ metadata !"scope of foo()" }
!scope1 = metadata !{ metadata !"scope 1", metadata !scope0 }
!scope2 = metadata !{ metadata !"scope 2", metadata !scope0 }
!scope3 = metadata !{ metadata !"scope 2.1", metadata !scope2 }
!scope4 = metadata !{ metadata !"scope 2.2", metadata !scope2 }
Loads and stores can be tagged with an alias-analysis scope, and also, with a
noalias tag for a specific scope:
... = load %ptr1, !alias.scope !{ !scope1 }
... = load %ptr2, !alias.scope !{ !scope1, !scope2 }, !noalias !{ !scope1 }
When evaluating an aliasing query, if one of the instructions is associated
with an alias.scope id that is identical to the noalias scope associated with
the other instruction, or is a descendant (in the scope hierarchy) of the
noalias scope associated with the other instruction, then the two memory
accesses are assumed not to alias.
Note that is the first element of the scope metadata is a string, then it can
be combined accross functions and translation units. The string can be replaced
by a self-reference to create globally unqiue scope identifiers.
[Note: This overview is slightly stylized, since the metadata nodes really need
to just be numbers (!0 instead of !scope0), and the scope lists are also global
unnamed metadata.]
Existing noalias metadata in a callee is "cloned" for use by the inlined code.
This is necessary because the aliasing scopes are unique to each call site
(because of possible control dependencies on the aliasing properties). For
example, consider a function: foo(noalias a, noalias b) { *a = *b; } that gets
inlined into bar() { ... if (...) foo(a1, b1); ... if (...) foo(a2, b2); } --
now just because we know that a1 does not alias with b1 at the first call site,
and a2 does not alias with b2 at the second call site, we cannot let inlining
these functons have the metadata imply that a1 does not alias with b2.
llvm-svn: 213864
2014-07-24 16:25:39 +02:00
|
|
|
if (NoAliasTag)
|
|
|
|
CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
|
Remove alignment argument from memcpy/memmove/memset in favour of alignment attributes (Step 1)
Summary:
This is a resurrection of work first proposed and discussed in Aug 2015:
http://lists.llvm.org/pipermail/llvm-dev/2015-August/089384.html
and initially landed (but then backed out) in Nov 2015:
http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
The @llvm.memcpy/memmove/memset intrinsics currently have an explicit argument
which is required to be a constant integer. It represents the alignment of the
dest (and source), and so must be the minimum of the actual alignment of the
two.
This change is the first in a series that allows source and dest to each
have their own alignments by using the alignment attribute on their arguments.
In this change we:
1) Remove the alignment argument.
2) Add alignment attributes to the source & dest arguments. We, temporarily,
require that the alignments for source & dest be equal.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 100, i32 4, i1 false)
will now read
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 100, i1 false)
Downstream users may have to update their lit tests that check for
@llvm.memcpy/memmove/memset call/declaration patterns. The following extended sed script
may help with updating the majority of your tests, but it does not catch all possible
patterns so some manual checking and updating will be required.
s~declare void @llvm\.mem(set|cpy|move)\.p([^(]*)\((.*), i32, i1\)~declare void @llvm.mem\1.p\2(\3, i1)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* \3, i8 \4, i8 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* \3, i8 \4, i16 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* \3, i8 \4, i32 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* \3, i8 \4, i64 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* \3, i8 \4, i128 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* align \6 \3, i8 \4, i8 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* align \6 \3, i8 \4, i16 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* align \6 \3, i8 \4, i32 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* align \6 \3, i8 \4, i64 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* align \6 \3, i8 \4, i128 \5, i1 \7)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* \4, i8\5* \6, i8 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* \4, i8\5* \6, i16 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* \4, i8\5* \6, i32 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* \4, i8\5* \6, i64 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* \4, i8\5* \6, i128 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* align \8 \4, i8\5* align \8 \6, i8 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* align \8 \4, i8\5* align \8 \6, i16 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* align \8 \4, i8\5* align \8 \6, i32 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* align \8 \4, i8\5* align \8 \6, i64 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* align \8 \4, i8\5* align \8 \6, i128 \7, i1 \9)~g
The remaining changes in the series will:
Step 2) Expand the IRBuilder API to allow creation of memcpy/memmove with differing
source and dest alignments.
Step 3) Update Clang to use the new IRBuilder API.
Step 4) Update Polly to use the new IRBuilder API.
Step 5) Update LLVM passes that create memcpy/memmove calls to use the new IRBuilder API,
and those that use use MemIntrinsicInst::[get|set]Alignment() to use
getDestAlignment() and getSourceAlignment() instead.
Step 6) Remove the single-alignment IRBuilder API for memcpy/memmove, and the
MemIntrinsicInst::[get|set]Alignment() methods.
Reviewers: pete, hfinkel, lhames, reames, bollu
Reviewed By: reames
Subscribers: niosHD, reames, jholewinski, qcolombet, jfb, sanjoy, arsenm, dschuff, dylanmckay, mehdi_amini, sdardis, nemanjai, david2050, nhaehnle, javed.absar, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, asb, rbar, johnrusso, simoncook, jordy.potman.lists, apazos, sabuasal, llvm-commits
Differential Revision: https://reviews.llvm.org/D41675
llvm-svn: 322965
2018-01-19 18:13:12 +01:00
|
|
|
|
2018-07-03 14:39:52 +02:00
|
|
|
return CI;
|
2010-12-26 23:49:25 +01:00
|
|
|
}
|
|
|
|
|
2021-07-01 01:49:48 +02:00
|
|
|
CallInst *IRBuilderBase::CreateMemCpyInline(
|
|
|
|
Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign,
|
|
|
|
Value *Size, bool IsVolatile, MDNode *TBAATag, MDNode *TBAAStructTag,
|
|
|
|
MDNode *ScopeTag, MDNode *NoAliasTag) {
|
2020-01-28 13:01:19 +01:00
|
|
|
Dst = getCastedInt8PtrValue(Dst);
|
|
|
|
Src = getCastedInt8PtrValue(Src);
|
|
|
|
|
2021-07-01 01:49:48 +02:00
|
|
|
Value *Ops[] = {Dst, Src, Size, getInt1(IsVolatile)};
|
2020-01-28 13:01:19 +01:00
|
|
|
Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
|
|
|
|
Function *F = BB->getParent();
|
|
|
|
Module *M = F->getParent();
|
|
|
|
Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memcpy_inline, Tys);
|
|
|
|
|
|
|
|
CallInst *CI = createCallHelper(TheFn, Ops, this);
|
|
|
|
|
|
|
|
auto *MCI = cast<MemCpyInlineInst>(CI);
|
|
|
|
if (DstAlign)
|
|
|
|
MCI->setDestAlignment(*DstAlign);
|
|
|
|
if (SrcAlign)
|
|
|
|
MCI->setSourceAlignment(*SrcAlign);
|
|
|
|
|
2021-07-01 01:49:48 +02:00
|
|
|
// Set the TBAA info if present.
|
|
|
|
if (TBAATag)
|
|
|
|
MCI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
|
|
|
|
|
|
|
|
// Set the TBAA Struct info if present.
|
|
|
|
if (TBAAStructTag)
|
|
|
|
MCI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
|
|
|
|
|
|
|
|
if (ScopeTag)
|
|
|
|
MCI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
|
|
|
|
|
|
|
|
if (NoAliasTag)
|
|
|
|
MCI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
|
|
|
|
|
2020-01-28 13:01:19 +01:00
|
|
|
return CI;
|
|
|
|
}
|
|
|
|
|
2017-06-16 16:43:59 +02:00
|
|
|
CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemCpy(
|
[Alignment][NFC] Use Align with CreateElementUnorderedAtomicMemCpy
Summary:
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet, nicolasvasilache
Subscribers: hiraditya, jfb, mehdi_amini, rriddle, jpienaar, burmako, shauheen, antiagainst, csigg, arpith-jacob, mgester, lucyrfox, herhut, liufengdb, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D73041
2020-01-20 14:47:28 +01:00
|
|
|
Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
|
2017-11-10 20:38:12 +01:00
|
|
|
uint32_t ElementSize, MDNode *TBAATag, MDNode *TBAAStructTag,
|
|
|
|
MDNode *ScopeTag, MDNode *NoAliasTag) {
|
|
|
|
assert(DstAlign >= ElementSize &&
|
|
|
|
"Pointer alignment must be at least element size");
|
|
|
|
assert(SrcAlign >= ElementSize &&
|
|
|
|
"Pointer alignment must be at least element size");
|
2017-06-06 18:45:25 +02:00
|
|
|
Dst = getCastedInt8PtrValue(Dst);
|
|
|
|
Src = getCastedInt8PtrValue(Src);
|
|
|
|
|
2017-06-16 16:43:59 +02:00
|
|
|
Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)};
|
|
|
|
Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
|
2017-06-06 18:45:25 +02:00
|
|
|
Module *M = BB->getParent()->getParent();
|
2019-02-01 21:43:25 +01:00
|
|
|
Function *TheFn = Intrinsic::getDeclaration(
|
2017-06-16 16:43:59 +02:00
|
|
|
M, Intrinsic::memcpy_element_unordered_atomic, Tys);
|
2017-06-06 18:45:25 +02:00
|
|
|
|
|
|
|
CallInst *CI = createCallHelper(TheFn, Ops, this);
|
|
|
|
|
2017-11-10 20:38:12 +01:00
|
|
|
// Set the alignment of the pointer args.
|
Remove alignment argument from memcpy/memmove/memset in favour of alignment attributes (Step 1)
Summary:
This is a resurrection of work first proposed and discussed in Aug 2015:
http://lists.llvm.org/pipermail/llvm-dev/2015-August/089384.html
and initially landed (but then backed out) in Nov 2015:
http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
The @llvm.memcpy/memmove/memset intrinsics currently have an explicit argument
which is required to be a constant integer. It represents the alignment of the
dest (and source), and so must be the minimum of the actual alignment of the
two.
This change is the first in a series that allows source and dest to each
have their own alignments by using the alignment attribute on their arguments.
In this change we:
1) Remove the alignment argument.
2) Add alignment attributes to the source & dest arguments. We, temporarily,
require that the alignments for source & dest be equal.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 100, i32 4, i1 false)
will now read
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 100, i1 false)
Downstream users may have to update their lit tests that check for
@llvm.memcpy/memmove/memset call/declaration patterns. The following extended sed script
may help with updating the majority of your tests, but it does not catch all possible
patterns so some manual checking and updating will be required.
s~declare void @llvm\.mem(set|cpy|move)\.p([^(]*)\((.*), i32, i1\)~declare void @llvm.mem\1.p\2(\3, i1)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* \3, i8 \4, i8 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* \3, i8 \4, i16 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* \3, i8 \4, i32 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* \3, i8 \4, i64 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* \3, i8 \4, i128 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* align \6 \3, i8 \4, i8 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* align \6 \3, i8 \4, i16 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* align \6 \3, i8 \4, i32 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* align \6 \3, i8 \4, i64 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* align \6 \3, i8 \4, i128 \5, i1 \7)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* \4, i8\5* \6, i8 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* \4, i8\5* \6, i16 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* \4, i8\5* \6, i32 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* \4, i8\5* \6, i64 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* \4, i8\5* \6, i128 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* align \8 \4, i8\5* align \8 \6, i8 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* align \8 \4, i8\5* align \8 \6, i16 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* align \8 \4, i8\5* align \8 \6, i32 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* align \8 \4, i8\5* align \8 \6, i64 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* align \8 \4, i8\5* align \8 \6, i128 \7, i1 \9)~g
The remaining changes in the series will:
Step 2) Expand the IRBuilder API to allow creation of memcpy/memmove with differing
source and dest alignments.
Step 3) Update Clang to use the new IRBuilder API.
Step 4) Update Polly to use the new IRBuilder API.
Step 5) Update LLVM passes that create memcpy/memmove calls to use the new IRBuilder API,
and those that use use MemIntrinsicInst::[get|set]Alignment() to use
getDestAlignment() and getSourceAlignment() instead.
Step 6) Remove the single-alignment IRBuilder API for memcpy/memmove, and the
MemIntrinsicInst::[get|set]Alignment() methods.
Reviewers: pete, hfinkel, lhames, reames, bollu
Reviewed By: reames
Subscribers: niosHD, reames, jholewinski, qcolombet, jfb, sanjoy, arsenm, dschuff, dylanmckay, mehdi_amini, sdardis, nemanjai, david2050, nhaehnle, javed.absar, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, asb, rbar, johnrusso, simoncook, jordy.potman.lists, apazos, sabuasal, llvm-commits
Differential Revision: https://reviews.llvm.org/D41675
llvm-svn: 322965
2018-01-19 18:13:12 +01:00
|
|
|
auto *AMCI = cast<AtomicMemCpyInst>(CI);
|
|
|
|
AMCI->setDestAlignment(DstAlign);
|
|
|
|
AMCI->setSourceAlignment(SrcAlign);
|
2017-11-10 20:38:12 +01:00
|
|
|
|
2017-06-06 18:45:25 +02:00
|
|
|
// Set the TBAA info if present.
|
|
|
|
if (TBAATag)
|
|
|
|
CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
|
|
|
|
|
|
|
|
// Set the TBAA Struct info if present.
|
|
|
|
if (TBAAStructTag)
|
|
|
|
CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
|
|
|
|
|
|
|
|
if (ScopeTag)
|
|
|
|
CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
|
|
|
|
|
|
|
|
if (NoAliasTag)
|
|
|
|
CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
|
|
|
|
|
|
|
|
return CI;
|
|
|
|
}
|
|
|
|
|
2019-12-12 15:32:19 +01:00
|
|
|
CallInst *IRBuilderBase::CreateMemMove(Value *Dst, MaybeAlign DstAlign,
|
|
|
|
Value *Src, MaybeAlign SrcAlign,
|
|
|
|
Value *Size, bool isVolatile,
|
|
|
|
MDNode *TBAATag, MDNode *ScopeTag,
|
|
|
|
MDNode *NoAliasTag) {
|
2010-12-26 23:49:25 +01:00
|
|
|
Dst = getCastedInt8PtrValue(Dst);
|
|
|
|
Src = getCastedInt8PtrValue(Src);
|
Remove alignment argument from memcpy/memmove/memset in favour of alignment attributes (Step 1)
Summary:
This is a resurrection of work first proposed and discussed in Aug 2015:
http://lists.llvm.org/pipermail/llvm-dev/2015-August/089384.html
and initially landed (but then backed out) in Nov 2015:
http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
The @llvm.memcpy/memmove/memset intrinsics currently have an explicit argument
which is required to be a constant integer. It represents the alignment of the
dest (and source), and so must be the minimum of the actual alignment of the
two.
This change is the first in a series that allows source and dest to each
have their own alignments by using the alignment attribute on their arguments.
In this change we:
1) Remove the alignment argument.
2) Add alignment attributes to the source & dest arguments. We, temporarily,
require that the alignments for source & dest be equal.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 100, i32 4, i1 false)
will now read
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 100, i1 false)
Downstream users may have to update their lit tests that check for
@llvm.memcpy/memmove/memset call/declaration patterns. The following extended sed script
may help with updating the majority of your tests, but it does not catch all possible
patterns so some manual checking and updating will be required.
s~declare void @llvm\.mem(set|cpy|move)\.p([^(]*)\((.*), i32, i1\)~declare void @llvm.mem\1.p\2(\3, i1)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* \3, i8 \4, i8 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* \3, i8 \4, i16 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* \3, i8 \4, i32 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* \3, i8 \4, i64 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* \3, i8 \4, i128 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* align \6 \3, i8 \4, i8 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* align \6 \3, i8 \4, i16 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* align \6 \3, i8 \4, i32 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* align \6 \3, i8 \4, i64 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* align \6 \3, i8 \4, i128 \5, i1 \7)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* \4, i8\5* \6, i8 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* \4, i8\5* \6, i16 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* \4, i8\5* \6, i32 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* \4, i8\5* \6, i64 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* \4, i8\5* \6, i128 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* align \8 \4, i8\5* align \8 \6, i8 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* align \8 \4, i8\5* align \8 \6, i16 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* align \8 \4, i8\5* align \8 \6, i32 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* align \8 \4, i8\5* align \8 \6, i64 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* align \8 \4, i8\5* align \8 \6, i128 \7, i1 \9)~g
The remaining changes in the series will:
Step 2) Expand the IRBuilder API to allow creation of memcpy/memmove with differing
source and dest alignments.
Step 3) Update Clang to use the new IRBuilder API.
Step 4) Update Polly to use the new IRBuilder API.
Step 5) Update LLVM passes that create memcpy/memmove calls to use the new IRBuilder API,
and those that use use MemIntrinsicInst::[get|set]Alignment() to use
getDestAlignment() and getSourceAlignment() instead.
Step 6) Remove the single-alignment IRBuilder API for memcpy/memmove, and the
MemIntrinsicInst::[get|set]Alignment() methods.
Reviewers: pete, hfinkel, lhames, reames, bollu
Reviewed By: reames
Subscribers: niosHD, reames, jholewinski, qcolombet, jfb, sanjoy, arsenm, dschuff, dylanmckay, mehdi_amini, sdardis, nemanjai, david2050, nhaehnle, javed.absar, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, asb, rbar, johnrusso, simoncook, jordy.potman.lists, apazos, sabuasal, llvm-commits
Differential Revision: https://reviews.llvm.org/D41675
llvm-svn: 322965
2018-01-19 18:13:12 +01:00
|
|
|
|
|
|
|
Value *Ops[] = {Dst, Src, Size, getInt1(isVolatile)};
|
2011-07-12 16:06:48 +02:00
|
|
|
Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() };
|
2010-12-26 23:49:25 +01:00
|
|
|
Module *M = BB->getParent()->getParent();
|
2019-02-01 21:43:25 +01:00
|
|
|
Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memmove, Tys);
|
2018-07-03 14:39:52 +02:00
|
|
|
|
2011-07-15 10:37:34 +02:00
|
|
|
CallInst *CI = createCallHelper(TheFn, Ops, this);
|
Remove alignment argument from memcpy/memmove/memset in favour of alignment attributes (Step 1)
Summary:
This is a resurrection of work first proposed and discussed in Aug 2015:
http://lists.llvm.org/pipermail/llvm-dev/2015-August/089384.html
and initially landed (but then backed out) in Nov 2015:
http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
The @llvm.memcpy/memmove/memset intrinsics currently have an explicit argument
which is required to be a constant integer. It represents the alignment of the
dest (and source), and so must be the minimum of the actual alignment of the
two.
This change is the first in a series that allows source and dest to each
have their own alignments by using the alignment attribute on their arguments.
In this change we:
1) Remove the alignment argument.
2) Add alignment attributes to the source & dest arguments. We, temporarily,
require that the alignments for source & dest be equal.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 100, i32 4, i1 false)
will now read
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 100, i1 false)
Downstream users may have to update their lit tests that check for
@llvm.memcpy/memmove/memset call/declaration patterns. The following extended sed script
may help with updating the majority of your tests, but it does not catch all possible
patterns so some manual checking and updating will be required.
s~declare void @llvm\.mem(set|cpy|move)\.p([^(]*)\((.*), i32, i1\)~declare void @llvm.mem\1.p\2(\3, i1)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* \3, i8 \4, i8 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* \3, i8 \4, i16 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* \3, i8 \4, i32 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* \3, i8 \4, i64 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* \3, i8 \4, i128 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* align \6 \3, i8 \4, i8 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* align \6 \3, i8 \4, i16 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* align \6 \3, i8 \4, i32 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* align \6 \3, i8 \4, i64 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* align \6 \3, i8 \4, i128 \5, i1 \7)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* \4, i8\5* \6, i8 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* \4, i8\5* \6, i16 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* \4, i8\5* \6, i32 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* \4, i8\5* \6, i64 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* \4, i8\5* \6, i128 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* align \8 \4, i8\5* align \8 \6, i8 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* align \8 \4, i8\5* align \8 \6, i16 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* align \8 \4, i8\5* align \8 \6, i32 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* align \8 \4, i8\5* align \8 \6, i64 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* align \8 \4, i8\5* align \8 \6, i128 \7, i1 \9)~g
The remaining changes in the series will:
Step 2) Expand the IRBuilder API to allow creation of memcpy/memmove with differing
source and dest alignments.
Step 3) Update Clang to use the new IRBuilder API.
Step 4) Update Polly to use the new IRBuilder API.
Step 5) Update LLVM passes that create memcpy/memmove calls to use the new IRBuilder API,
and those that use use MemIntrinsicInst::[get|set]Alignment() to use
getDestAlignment() and getSourceAlignment() instead.
Step 6) Remove the single-alignment IRBuilder API for memcpy/memmove, and the
MemIntrinsicInst::[get|set]Alignment() methods.
Reviewers: pete, hfinkel, lhames, reames, bollu
Reviewed By: reames
Subscribers: niosHD, reames, jholewinski, qcolombet, jfb, sanjoy, arsenm, dschuff, dylanmckay, mehdi_amini, sdardis, nemanjai, david2050, nhaehnle, javed.absar, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, asb, rbar, johnrusso, simoncook, jordy.potman.lists, apazos, sabuasal, llvm-commits
Differential Revision: https://reviews.llvm.org/D41675
llvm-svn: 322965
2018-01-19 18:13:12 +01:00
|
|
|
|
|
|
|
auto *MMI = cast<MemMoveInst>(CI);
|
2019-12-12 15:32:19 +01:00
|
|
|
if (DstAlign)
|
|
|
|
MMI->setDestAlignment(*DstAlign);
|
|
|
|
if (SrcAlign)
|
|
|
|
MMI->setSourceAlignment(*SrcAlign);
|
Remove alignment argument from memcpy/memmove/memset in favour of alignment attributes (Step 1)
Summary:
This is a resurrection of work first proposed and discussed in Aug 2015:
http://lists.llvm.org/pipermail/llvm-dev/2015-August/089384.html
and initially landed (but then backed out) in Nov 2015:
http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
The @llvm.memcpy/memmove/memset intrinsics currently have an explicit argument
which is required to be a constant integer. It represents the alignment of the
dest (and source), and so must be the minimum of the actual alignment of the
two.
This change is the first in a series that allows source and dest to each
have their own alignments by using the alignment attribute on their arguments.
In this change we:
1) Remove the alignment argument.
2) Add alignment attributes to the source & dest arguments. We, temporarily,
require that the alignments for source & dest be equal.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 100, i32 4, i1 false)
will now read
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 100, i1 false)
Downstream users may have to update their lit tests that check for
@llvm.memcpy/memmove/memset call/declaration patterns. The following extended sed script
may help with updating the majority of your tests, but it does not catch all possible
patterns so some manual checking and updating will be required.
s~declare void @llvm\.mem(set|cpy|move)\.p([^(]*)\((.*), i32, i1\)~declare void @llvm.mem\1.p\2(\3, i1)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* \3, i8 \4, i8 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* \3, i8 \4, i16 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* \3, i8 \4, i32 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* \3, i8 \4, i64 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* \3, i8 \4, i128 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* align \6 \3, i8 \4, i8 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* align \6 \3, i8 \4, i16 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* align \6 \3, i8 \4, i32 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* align \6 \3, i8 \4, i64 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* align \6 \3, i8 \4, i128 \5, i1 \7)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* \4, i8\5* \6, i8 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* \4, i8\5* \6, i16 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* \4, i8\5* \6, i32 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* \4, i8\5* \6, i64 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* \4, i8\5* \6, i128 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* align \8 \4, i8\5* align \8 \6, i8 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* align \8 \4, i8\5* align \8 \6, i16 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* align \8 \4, i8\5* align \8 \6, i32 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* align \8 \4, i8\5* align \8 \6, i64 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* align \8 \4, i8\5* align \8 \6, i128 \7, i1 \9)~g
The remaining changes in the series will:
Step 2) Expand the IRBuilder API to allow creation of memcpy/memmove with differing
source and dest alignments.
Step 3) Update Clang to use the new IRBuilder API.
Step 4) Update Polly to use the new IRBuilder API.
Step 5) Update LLVM passes that create memcpy/memmove calls to use the new IRBuilder API,
and those that use use MemIntrinsicInst::[get|set]Alignment() to use
getDestAlignment() and getSourceAlignment() instead.
Step 6) Remove the single-alignment IRBuilder API for memcpy/memmove, and the
MemIntrinsicInst::[get|set]Alignment() methods.
Reviewers: pete, hfinkel, lhames, reames, bollu
Reviewed By: reames
Subscribers: niosHD, reames, jholewinski, qcolombet, jfb, sanjoy, arsenm, dschuff, dylanmckay, mehdi_amini, sdardis, nemanjai, david2050, nhaehnle, javed.absar, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, asb, rbar, johnrusso, simoncook, jordy.potman.lists, apazos, sabuasal, llvm-commits
Differential Revision: https://reviews.llvm.org/D41675
llvm-svn: 322965
2018-01-19 18:13:12 +01:00
|
|
|
|
2010-12-26 23:49:25 +01:00
|
|
|
// Set the TBAA info if present.
|
|
|
|
if (TBAATag)
|
|
|
|
CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
|
2018-07-03 14:39:52 +02:00
|
|
|
|
Add scoped-noalias metadata
This commit adds scoped noalias metadata. The primary motivations for this
feature are:
1. To preserve noalias function attribute information when inlining
2. To provide the ability to model block-scope C99 restrict pointers
Neither of these two abilities are added here, only the necessary
infrastructure. In fact, there should be no change to existing functionality,
only the addition of new features. The logic that converts noalias function
parameters into this metadata during inlining will come in a follow-up commit.
What is added here is the ability to generally specify noalias memory-access
sets. Regarding the metadata, alias-analysis scopes are defined similar to TBAA
nodes:
!scope0 = metadata !{ metadata !"scope of foo()" }
!scope1 = metadata !{ metadata !"scope 1", metadata !scope0 }
!scope2 = metadata !{ metadata !"scope 2", metadata !scope0 }
!scope3 = metadata !{ metadata !"scope 2.1", metadata !scope2 }
!scope4 = metadata !{ metadata !"scope 2.2", metadata !scope2 }
Loads and stores can be tagged with an alias-analysis scope, and also, with a
noalias tag for a specific scope:
... = load %ptr1, !alias.scope !{ !scope1 }
... = load %ptr2, !alias.scope !{ !scope1, !scope2 }, !noalias !{ !scope1 }
When evaluating an aliasing query, if one of the instructions is associated
with an alias.scope id that is identical to the noalias scope associated with
the other instruction, or is a descendant (in the scope hierarchy) of the
noalias scope associated with the other instruction, then the two memory
accesses are assumed not to alias.
Note that is the first element of the scope metadata is a string, then it can
be combined accross functions and translation units. The string can be replaced
by a self-reference to create globally unqiue scope identifiers.
[Note: This overview is slightly stylized, since the metadata nodes really need
to just be numbers (!0 instead of !scope0), and the scope lists are also global
unnamed metadata.]
Existing noalias metadata in a callee is "cloned" for use by the inlined code.
This is necessary because the aliasing scopes are unique to each call site
(because of possible control dependencies on the aliasing properties). For
example, consider a function: foo(noalias a, noalias b) { *a = *b; } that gets
inlined into bar() { ... if (...) foo(a1, b1); ... if (...) foo(a2, b2); } --
now just because we know that a1 does not alias with b1 at the first call site,
and a2 does not alias with b2 at the second call site, we cannot let inlining
these functons have the metadata imply that a1 does not alias with b2.
llvm-svn: 213864
2014-07-24 16:25:39 +02:00
|
|
|
if (ScopeTag)
|
|
|
|
CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
|
2018-07-03 14:39:52 +02:00
|
|
|
|
Add scoped-noalias metadata
This commit adds scoped noalias metadata. The primary motivations for this
feature are:
1. To preserve noalias function attribute information when inlining
2. To provide the ability to model block-scope C99 restrict pointers
Neither of these two abilities are added here, only the necessary
infrastructure. In fact, there should be no change to existing functionality,
only the addition of new features. The logic that converts noalias function
parameters into this metadata during inlining will come in a follow-up commit.
What is added here is the ability to generally specify noalias memory-access
sets. Regarding the metadata, alias-analysis scopes are defined similar to TBAA
nodes:
!scope0 = metadata !{ metadata !"scope of foo()" }
!scope1 = metadata !{ metadata !"scope 1", metadata !scope0 }
!scope2 = metadata !{ metadata !"scope 2", metadata !scope0 }
!scope3 = metadata !{ metadata !"scope 2.1", metadata !scope2 }
!scope4 = metadata !{ metadata !"scope 2.2", metadata !scope2 }
Loads and stores can be tagged with an alias-analysis scope, and also, with a
noalias tag for a specific scope:
... = load %ptr1, !alias.scope !{ !scope1 }
... = load %ptr2, !alias.scope !{ !scope1, !scope2 }, !noalias !{ !scope1 }
When evaluating an aliasing query, if one of the instructions is associated
with an alias.scope id that is identical to the noalias scope associated with
the other instruction, or is a descendant (in the scope hierarchy) of the
noalias scope associated with the other instruction, then the two memory
accesses are assumed not to alias.
Note that is the first element of the scope metadata is a string, then it can
be combined accross functions and translation units. The string can be replaced
by a self-reference to create globally unqiue scope identifiers.
[Note: This overview is slightly stylized, since the metadata nodes really need
to just be numbers (!0 instead of !scope0), and the scope lists are also global
unnamed metadata.]
Existing noalias metadata in a callee is "cloned" for use by the inlined code.
This is necessary because the aliasing scopes are unique to each call site
(because of possible control dependencies on the aliasing properties). For
example, consider a function: foo(noalias a, noalias b) { *a = *b; } that gets
inlined into bar() { ... if (...) foo(a1, b1); ... if (...) foo(a2, b2); } --
now just because we know that a1 does not alias with b1 at the first call site,
and a2 does not alias with b2 at the second call site, we cannot let inlining
these functons have the metadata imply that a1 does not alias with b2.
llvm-svn: 213864
2014-07-24 16:25:39 +02:00
|
|
|
if (NoAliasTag)
|
|
|
|
CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
|
2018-07-03 14:39:52 +02:00
|
|
|
|
|
|
|
return CI;
|
2010-12-26 23:49:25 +01:00
|
|
|
}
|
2011-05-22 01:14:36 +02:00
|
|
|
|
2018-05-30 22:02:56 +02:00
|
|
|
CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemMove(
|
2020-01-20 17:11:00 +01:00
|
|
|
Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
|
2018-05-30 22:02:56 +02:00
|
|
|
uint32_t ElementSize, MDNode *TBAATag, MDNode *TBAAStructTag,
|
|
|
|
MDNode *ScopeTag, MDNode *NoAliasTag) {
|
|
|
|
assert(DstAlign >= ElementSize &&
|
|
|
|
"Pointer alignment must be at least element size");
|
|
|
|
assert(SrcAlign >= ElementSize &&
|
|
|
|
"Pointer alignment must be at least element size");
|
|
|
|
Dst = getCastedInt8PtrValue(Dst);
|
|
|
|
Src = getCastedInt8PtrValue(Src);
|
|
|
|
|
|
|
|
Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)};
|
|
|
|
Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
|
|
|
|
Module *M = BB->getParent()->getParent();
|
2019-02-01 21:43:25 +01:00
|
|
|
Function *TheFn = Intrinsic::getDeclaration(
|
2018-05-30 22:02:56 +02:00
|
|
|
M, Intrinsic::memmove_element_unordered_atomic, Tys);
|
|
|
|
|
|
|
|
CallInst *CI = createCallHelper(TheFn, Ops, this);
|
|
|
|
|
|
|
|
// Set the alignment of the pointer args.
|
2020-01-20 17:11:00 +01:00
|
|
|
CI->addParamAttr(0, Attribute::getWithAlignment(CI->getContext(), DstAlign));
|
|
|
|
CI->addParamAttr(1, Attribute::getWithAlignment(CI->getContext(), SrcAlign));
|
2018-05-30 22:02:56 +02:00
|
|
|
|
|
|
|
// Set the TBAA info if present.
|
|
|
|
if (TBAATag)
|
|
|
|
CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
|
|
|
|
|
|
|
|
// Set the TBAA Struct info if present.
|
|
|
|
if (TBAAStructTag)
|
|
|
|
CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
|
|
|
|
|
|
|
|
if (ScopeTag)
|
|
|
|
CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
|
|
|
|
|
|
|
|
if (NoAliasTag)
|
|
|
|
CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
|
|
|
|
|
|
|
|
return CI;
|
|
|
|
}
|
|
|
|
|
2017-05-09 12:43:25 +02:00
|
|
|
static CallInst *getReductionIntrinsic(IRBuilderBase *Builder, Intrinsic::ID ID,
|
|
|
|
Value *Src) {
|
|
|
|
Module *M = Builder->GetInsertBlock()->getParent()->getParent();
|
|
|
|
Value *Ops[] = {Src};
|
2019-06-13 11:37:38 +02:00
|
|
|
Type *Tys[] = { Src->getType() };
|
2017-05-09 12:43:25 +02:00
|
|
|
auto Decl = Intrinsic::getDeclaration(M, ID, Tys);
|
|
|
|
return createCallHelper(Decl, Ops, Builder);
|
|
|
|
}
|
|
|
|
|
|
|
|
CallInst *IRBuilderBase::CreateFAddReduce(Value *Acc, Value *Src) {
|
|
|
|
Module *M = GetInsertBlock()->getParent()->getParent();
|
|
|
|
Value *Ops[] = {Acc, Src};
|
2020-10-03 03:30:53 +02:00
|
|
|
auto Decl = Intrinsic::getDeclaration(M, Intrinsic::vector_reduce_fadd,
|
|
|
|
{Src->getType()});
|
2017-05-09 12:43:25 +02:00
|
|
|
return createCallHelper(Decl, Ops, this);
|
|
|
|
}
|
|
|
|
|
|
|
|
CallInst *IRBuilderBase::CreateFMulReduce(Value *Acc, Value *Src) {
|
|
|
|
Module *M = GetInsertBlock()->getParent()->getParent();
|
|
|
|
Value *Ops[] = {Acc, Src};
|
2020-10-03 03:30:53 +02:00
|
|
|
auto Decl = Intrinsic::getDeclaration(M, Intrinsic::vector_reduce_fmul,
|
|
|
|
{Src->getType()});
|
2017-05-09 12:43:25 +02:00
|
|
|
return createCallHelper(Decl, Ops, this);
|
|
|
|
}
|
|
|
|
|
|
|
|
CallInst *IRBuilderBase::CreateAddReduce(Value *Src) {
|
2020-10-03 03:30:53 +02:00
|
|
|
return getReductionIntrinsic(this, Intrinsic::vector_reduce_add, Src);
|
2017-05-09 12:43:25 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
CallInst *IRBuilderBase::CreateMulReduce(Value *Src) {
|
2020-10-03 03:30:53 +02:00
|
|
|
return getReductionIntrinsic(this, Intrinsic::vector_reduce_mul, Src);
|
2017-05-09 12:43:25 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
CallInst *IRBuilderBase::CreateAndReduce(Value *Src) {
|
2020-10-03 03:30:53 +02:00
|
|
|
return getReductionIntrinsic(this, Intrinsic::vector_reduce_and, Src);
|
2017-05-09 12:43:25 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
CallInst *IRBuilderBase::CreateOrReduce(Value *Src) {
|
2020-10-03 03:30:53 +02:00
|
|
|
return getReductionIntrinsic(this, Intrinsic::vector_reduce_or, Src);
|
2017-05-09 12:43:25 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
CallInst *IRBuilderBase::CreateXorReduce(Value *Src) {
|
2020-10-03 03:30:53 +02:00
|
|
|
return getReductionIntrinsic(this, Intrinsic::vector_reduce_xor, Src);
|
2017-05-09 12:43:25 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
CallInst *IRBuilderBase::CreateIntMaxReduce(Value *Src, bool IsSigned) {
|
2020-10-03 03:30:53 +02:00
|
|
|
auto ID =
|
|
|
|
IsSigned ? Intrinsic::vector_reduce_smax : Intrinsic::vector_reduce_umax;
|
2017-05-09 12:43:25 +02:00
|
|
|
return getReductionIntrinsic(this, ID, Src);
|
|
|
|
}
|
|
|
|
|
|
|
|
CallInst *IRBuilderBase::CreateIntMinReduce(Value *Src, bool IsSigned) {
|
2020-10-03 03:30:53 +02:00
|
|
|
auto ID =
|
|
|
|
IsSigned ? Intrinsic::vector_reduce_smin : Intrinsic::vector_reduce_umin;
|
2017-05-09 12:43:25 +02:00
|
|
|
return getReductionIntrinsic(this, ID, Src);
|
|
|
|
}
|
|
|
|
|
2020-12-30 15:11:10 +01:00
|
|
|
CallInst *IRBuilderBase::CreateFPMaxReduce(Value *Src) {
|
|
|
|
return getReductionIntrinsic(this, Intrinsic::vector_reduce_fmax, Src);
|
2017-05-09 12:43:25 +02:00
|
|
|
}
|
|
|
|
|
2020-12-30 15:11:10 +01:00
|
|
|
CallInst *IRBuilderBase::CreateFPMinReduce(Value *Src) {
|
|
|
|
return getReductionIntrinsic(this, Intrinsic::vector_reduce_fmin, Src);
|
2017-05-09 12:43:25 +02:00
|
|
|
}
|
|
|
|
|
2011-05-22 01:14:36 +02:00
|
|
|
CallInst *IRBuilderBase::CreateLifetimeStart(Value *Ptr, ConstantInt *Size) {
|
|
|
|
assert(isa<PointerType>(Ptr->getType()) &&
|
2012-07-19 02:11:40 +02:00
|
|
|
"lifetime.start only applies to pointers.");
|
2011-05-22 01:14:36 +02:00
|
|
|
Ptr = getCastedInt8PtrValue(Ptr);
|
|
|
|
if (!Size)
|
|
|
|
Size = getInt64(-1);
|
|
|
|
else
|
|
|
|
assert(Size->getType() == getInt64Ty() &&
|
2012-07-19 02:11:40 +02:00
|
|
|
"lifetime.start requires the size to be an i64");
|
2011-05-22 01:14:36 +02:00
|
|
|
Value *Ops[] = { Size, Ptr };
|
|
|
|
Module *M = BB->getParent()->getParent();
|
2019-02-01 21:43:25 +01:00
|
|
|
Function *TheFn =
|
|
|
|
Intrinsic::getDeclaration(M, Intrinsic::lifetime_start, {Ptr->getType()});
|
2011-07-15 10:37:34 +02:00
|
|
|
return createCallHelper(TheFn, Ops, this);
|
2011-05-22 01:14:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
CallInst *IRBuilderBase::CreateLifetimeEnd(Value *Ptr, ConstantInt *Size) {
|
|
|
|
assert(isa<PointerType>(Ptr->getType()) &&
|
2012-07-19 02:11:40 +02:00
|
|
|
"lifetime.end only applies to pointers.");
|
2011-05-22 01:14:36 +02:00
|
|
|
Ptr = getCastedInt8PtrValue(Ptr);
|
|
|
|
if (!Size)
|
|
|
|
Size = getInt64(-1);
|
|
|
|
else
|
|
|
|
assert(Size->getType() == getInt64Ty() &&
|
2012-07-19 02:11:40 +02:00
|
|
|
"lifetime.end requires the size to be an i64");
|
2011-05-22 01:14:36 +02:00
|
|
|
Value *Ops[] = { Size, Ptr };
|
|
|
|
Module *M = BB->getParent()->getParent();
|
2019-02-01 21:43:25 +01:00
|
|
|
Function *TheFn =
|
|
|
|
Intrinsic::getDeclaration(M, Intrinsic::lifetime_end, {Ptr->getType()});
|
2011-07-15 10:37:34 +02:00
|
|
|
return createCallHelper(TheFn, Ops, this);
|
2011-05-22 01:14:36 +02:00
|
|
|
}
|
2014-10-16 01:44:22 +02:00
|
|
|
|
2016-07-22 22:57:23 +02:00
|
|
|
CallInst *IRBuilderBase::CreateInvariantStart(Value *Ptr, ConstantInt *Size) {
|
|
|
|
|
|
|
|
assert(isa<PointerType>(Ptr->getType()) &&
|
|
|
|
"invariant.start only applies to pointers.");
|
|
|
|
Ptr = getCastedInt8PtrValue(Ptr);
|
|
|
|
if (!Size)
|
|
|
|
Size = getInt64(-1);
|
|
|
|
else
|
|
|
|
assert(Size->getType() == getInt64Ty() &&
|
|
|
|
"invariant.start requires the size to be an i64");
|
|
|
|
|
|
|
|
Value *Ops[] = {Size, Ptr};
|
|
|
|
// Fill in the single overloaded type: memory object type.
|
|
|
|
Type *ObjectPtr[1] = {Ptr->getType()};
|
|
|
|
Module *M = BB->getParent()->getParent();
|
2019-02-01 21:43:25 +01:00
|
|
|
Function *TheFn =
|
2016-07-22 22:57:23 +02:00
|
|
|
Intrinsic::getDeclaration(M, Intrinsic::invariant_start, ObjectPtr);
|
|
|
|
return createCallHelper(TheFn, Ops, this);
|
|
|
|
}
|
|
|
|
|
2020-09-12 13:36:45 +02:00
|
|
|
CallInst *
|
|
|
|
IRBuilderBase::CreateAssumption(Value *Cond,
|
|
|
|
ArrayRef<OperandBundleDef> OpBundles) {
|
2014-10-16 01:44:22 +02:00
|
|
|
assert(Cond->getType() == getInt1Ty() &&
|
|
|
|
"an assumption condition must be of type i1");
|
|
|
|
|
|
|
|
Value *Ops[] = { Cond };
|
|
|
|
Module *M = BB->getParent()->getParent();
|
2019-02-01 21:43:25 +01:00
|
|
|
Function *FnAssume = Intrinsic::getDeclaration(M, Intrinsic::assume);
|
2020-09-12 13:36:45 +02:00
|
|
|
return createCallHelper(FnAssume, Ops, this, "", nullptr, OpBundles);
|
2014-10-16 01:44:22 +02:00
|
|
|
}
|
|
|
|
|
2021-01-16 09:14:18 +01:00
|
|
|
Instruction *IRBuilderBase::CreateNoAliasScopeDeclaration(Value *Scope) {
|
|
|
|
Module *M = BB->getModule();
|
|
|
|
auto *FnIntrinsic = Intrinsic::getDeclaration(
|
|
|
|
M, Intrinsic::experimental_noalias_scope_decl, {});
|
|
|
|
return createCallHelper(FnIntrinsic, {Scope}, this);
|
|
|
|
}
|
|
|
|
|
2018-05-01 17:54:18 +02:00
|
|
|
/// Create a call to a Masked Load intrinsic.
|
2021-07-03 14:57:41 +02:00
|
|
|
/// \p Ty - vector type to load
|
2020-01-21 11:21:31 +01:00
|
|
|
/// \p Ptr - base pointer for the load
|
|
|
|
/// \p Alignment - alignment of the source location
|
|
|
|
/// \p Mask - vector of booleans which indicates what vector lanes should
|
|
|
|
/// be accessed in memory
|
|
|
|
/// \p PassThru - pass-through value that is used to fill the masked-off lanes
|
|
|
|
/// of the result
|
|
|
|
/// \p Name - name of the result variable
|
2021-07-03 14:57:41 +02:00
|
|
|
CallInst *IRBuilderBase::CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment,
|
2014-12-30 15:28:14 +01:00
|
|
|
Value *Mask, Value *PassThru,
|
|
|
|
const Twine &Name) {
|
2018-03-30 02:47:31 +02:00
|
|
|
auto *PtrTy = cast<PointerType>(Ptr->getType());
|
2021-07-03 14:57:41 +02:00
|
|
|
assert(Ty->isVectorTy() && "Type should be vector");
|
|
|
|
assert(PtrTy->isOpaqueOrPointeeTypeMatches(Ty) && "Wrong element type");
|
2017-07-31 15:21:42 +02:00
|
|
|
assert(Mask && "Mask should not be all-ones (null)");
|
2014-12-30 15:28:14 +01:00
|
|
|
if (!PassThru)
|
2021-07-03 14:57:41 +02:00
|
|
|
PassThru = UndefValue::get(Ty);
|
|
|
|
Type *OverloadedTypes[] = { Ty, PtrTy };
|
2020-01-21 11:21:31 +01:00
|
|
|
Value *Ops[] = {Ptr, getInt32(Alignment.value()), Mask, PassThru};
|
2016-06-28 20:27:25 +02:00
|
|
|
return CreateMaskedIntrinsic(Intrinsic::masked_load, Ops,
|
|
|
|
OverloadedTypes, Name);
|
2014-12-04 10:40:44 +01:00
|
|
|
}
|
|
|
|
|
2018-05-01 17:54:18 +02:00
|
|
|
/// Create a call to a Masked Store intrinsic.
|
2020-01-21 16:13:04 +01:00
|
|
|
/// \p Val - data to be stored,
|
|
|
|
/// \p Ptr - base pointer for the store
|
|
|
|
/// \p Alignment - alignment of the destination location
|
|
|
|
/// \p Mask - vector of booleans which indicates what vector lanes should
|
|
|
|
/// be accessed in memory
|
2014-12-30 15:28:14 +01:00
|
|
|
CallInst *IRBuilderBase::CreateMaskedStore(Value *Val, Value *Ptr,
|
2020-01-21 16:13:04 +01:00
|
|
|
Align Alignment, Value *Mask) {
|
2018-03-30 02:47:31 +02:00
|
|
|
auto *PtrTy = cast<PointerType>(Ptr->getType());
|
2021-07-03 12:44:21 +02:00
|
|
|
Type *DataTy = Val->getType();
|
|
|
|
assert(DataTy->isVectorTy() && "Val should be a vector");
|
|
|
|
assert(PtrTy->isOpaqueOrPointeeTypeMatches(DataTy) && "Wrong element type");
|
2017-07-31 15:21:42 +02:00
|
|
|
assert(Mask && "Mask should not be all-ones (null)");
|
2016-06-28 20:27:25 +02:00
|
|
|
Type *OverloadedTypes[] = { DataTy, PtrTy };
|
2020-01-21 16:13:04 +01:00
|
|
|
Value *Ops[] = {Val, Ptr, getInt32(Alignment.value()), Mask};
|
2016-06-28 20:27:25 +02:00
|
|
|
return CreateMaskedIntrinsic(Intrinsic::masked_store, Ops, OverloadedTypes);
|
2014-12-04 10:40:44 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Create a call to a Masked intrinsic, with given intrinsic Id,
|
2016-06-28 20:27:25 +02:00
|
|
|
/// an array of operands - Ops, and an array of overloaded types -
|
|
|
|
/// OverloadedTypes.
|
2015-05-20 19:16:39 +02:00
|
|
|
CallInst *IRBuilderBase::CreateMaskedIntrinsic(Intrinsic::ID Id,
|
2014-12-04 10:40:44 +01:00
|
|
|
ArrayRef<Value *> Ops,
|
2016-06-28 20:27:25 +02:00
|
|
|
ArrayRef<Type *> OverloadedTypes,
|
2014-12-30 15:28:14 +01:00
|
|
|
const Twine &Name) {
|
2014-12-04 10:40:44 +01:00
|
|
|
Module *M = BB->getParent()->getParent();
|
2019-02-01 21:43:25 +01:00
|
|
|
Function *TheFn = Intrinsic::getDeclaration(M, Id, OverloadedTypes);
|
2014-12-30 15:28:14 +01:00
|
|
|
return createCallHelper(TheFn, Ops, this, Name);
|
2014-12-04 10:40:44 +01:00
|
|
|
}
|
2014-12-30 06:55:58 +01:00
|
|
|
|
2018-05-01 17:54:18 +02:00
|
|
|
/// Create a call to a Masked Gather intrinsic.
|
2021-07-03 14:57:41 +02:00
|
|
|
/// \p Ty - vector type to gather
|
2016-02-17 20:23:04 +01:00
|
|
|
/// \p Ptrs - vector of pointers for loading
|
|
|
|
/// \p Align - alignment for one element
|
|
|
|
/// \p Mask - vector of booleans which indicates what vector lanes should
|
|
|
|
/// be accessed in memory
|
|
|
|
/// \p PassThru - pass-through value that is used to fill the masked-off lanes
|
|
|
|
/// of the result
|
|
|
|
/// \p Name - name of the result variable
|
2021-07-03 14:57:41 +02:00
|
|
|
CallInst *IRBuilderBase::CreateMaskedGather(Type *Ty, Value *Ptrs,
|
|
|
|
Align Alignment, Value *Mask,
|
|
|
|
Value *PassThru,
|
2020-01-24 17:40:17 +01:00
|
|
|
const Twine &Name) {
|
2021-07-03 14:57:41 +02:00
|
|
|
auto *VecTy = cast<VectorType>(Ty);
|
|
|
|
ElementCount NumElts = VecTy->getElementCount();
|
2021-01-22 17:53:21 +01:00
|
|
|
auto *PtrsTy = cast<VectorType>(Ptrs->getType());
|
2021-07-03 14:57:41 +02:00
|
|
|
assert(cast<PointerType>(PtrsTy->getElementType())
|
|
|
|
->isOpaqueOrPointeeTypeMatches(
|
|
|
|
cast<VectorType>(Ty)->getElementType()) &&
|
|
|
|
"Element type mismatch");
|
|
|
|
assert(NumElts == PtrsTy->getElementCount() && "Element count mismatch");
|
2016-02-17 20:23:04 +01:00
|
|
|
|
|
|
|
if (!Mask)
|
2020-06-03 22:35:41 +02:00
|
|
|
Mask = Constant::getAllOnesValue(
|
2021-01-22 17:53:21 +01:00
|
|
|
VectorType::get(Type::getInt1Ty(Context), NumElts));
|
2016-02-17 20:23:04 +01:00
|
|
|
|
2017-05-19 12:40:18 +02:00
|
|
|
if (!PassThru)
|
2021-07-03 14:57:41 +02:00
|
|
|
PassThru = UndefValue::get(Ty);
|
2017-05-19 12:40:18 +02:00
|
|
|
|
2021-07-03 14:57:41 +02:00
|
|
|
Type *OverloadedTypes[] = {Ty, PtrsTy};
|
2020-01-24 17:40:17 +01:00
|
|
|
Value *Ops[] = {Ptrs, getInt32(Alignment.value()), Mask, PassThru};
|
2016-02-17 20:23:04 +01:00
|
|
|
|
|
|
|
// We specify only one type when we create this intrinsic. Types of other
|
|
|
|
// arguments are derived from this type.
|
2017-05-03 14:28:54 +02:00
|
|
|
return CreateMaskedIntrinsic(Intrinsic::masked_gather, Ops, OverloadedTypes,
|
|
|
|
Name);
|
2016-02-17 20:23:04 +01:00
|
|
|
}
|
|
|
|
|
2018-05-01 17:54:18 +02:00
|
|
|
/// Create a call to a Masked Scatter intrinsic.
|
2016-02-17 20:23:04 +01:00
|
|
|
/// \p Data - data to be stored,
|
|
|
|
/// \p Ptrs - the vector of pointers, where the \p Data elements should be
|
|
|
|
/// stored
|
|
|
|
/// \p Align - alignment for one element
|
|
|
|
/// \p Mask - vector of booleans which indicates what vector lanes should
|
|
|
|
/// be accessed in memory
|
|
|
|
CallInst *IRBuilderBase::CreateMaskedScatter(Value *Data, Value *Ptrs,
|
2020-01-24 17:40:17 +01:00
|
|
|
Align Alignment, Value *Mask) {
|
2021-01-22 17:53:21 +01:00
|
|
|
auto *PtrsTy = cast<VectorType>(Ptrs->getType());
|
|
|
|
auto *DataTy = cast<VectorType>(Data->getType());
|
|
|
|
ElementCount NumElts = PtrsTy->getElementCount();
|
2016-02-17 20:23:04 +01:00
|
|
|
|
2016-02-17 22:16:59 +01:00
|
|
|
#ifndef NDEBUG
|
2021-07-03 12:44:21 +02:00
|
|
|
auto *PtrTy = cast<PointerType>(PtrsTy->getElementType());
|
2021-01-22 17:53:21 +01:00
|
|
|
assert(NumElts == DataTy->getElementCount() &&
|
2021-07-03 12:44:21 +02:00
|
|
|
PtrTy->isOpaqueOrPointeeTypeMatches(DataTy->getElementType()) &&
|
2016-02-17 22:16:59 +01:00
|
|
|
"Incompatible pointer and data types");
|
|
|
|
#endif
|
2016-02-17 20:23:04 +01:00
|
|
|
|
|
|
|
if (!Mask)
|
2020-06-03 22:35:41 +02:00
|
|
|
Mask = Constant::getAllOnesValue(
|
2021-01-22 17:53:21 +01:00
|
|
|
VectorType::get(Type::getInt1Ty(Context), NumElts));
|
2017-05-03 14:28:54 +02:00
|
|
|
|
|
|
|
Type *OverloadedTypes[] = {DataTy, PtrsTy};
|
2020-01-24 17:40:17 +01:00
|
|
|
Value *Ops[] = {Data, Ptrs, getInt32(Alignment.value()), Mask};
|
2016-02-17 20:23:04 +01:00
|
|
|
|
|
|
|
// We specify only one type when we create this intrinsic. Types of other
|
|
|
|
// arguments are derived from this type.
|
2017-05-03 14:28:54 +02:00
|
|
|
return CreateMaskedIntrinsic(Intrinsic::masked_scatter, Ops, OverloadedTypes);
|
2016-02-17 20:23:04 +01:00
|
|
|
}
|
|
|
|
|
2020-06-05 00:22:07 +02:00
|
|
|
template <typename T0>
|
2015-05-13 01:52:24 +02:00
|
|
|
static std::vector<Value *>
|
|
|
|
getStatepointArgs(IRBuilderBase &B, uint64_t ID, uint32_t NumPatchBytes,
|
2020-06-05 00:22:07 +02:00
|
|
|
Value *ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs) {
|
2015-05-07 01:53:09 +02:00
|
|
|
std::vector<Value *> Args;
|
2015-05-13 01:52:24 +02:00
|
|
|
Args.push_back(B.getInt64(ID));
|
|
|
|
Args.push_back(B.getInt32(NumPatchBytes));
|
2015-05-07 01:53:09 +02:00
|
|
|
Args.push_back(ActualCallee);
|
|
|
|
Args.push_back(B.getInt32(CallArgs.size()));
|
2015-10-09 01:18:33 +02:00
|
|
|
Args.push_back(B.getInt32(Flags));
|
2021-01-07 03:27:33 +01:00
|
|
|
llvm::append_range(Args, CallArgs);
|
2020-05-28 19:11:08 +02:00
|
|
|
// GC Transition and Deopt args are now always handled via operand bundle.
|
|
|
|
// They will be removed from the signature of gc.statepoint shortly.
|
|
|
|
Args.push_back(B.getInt32(0));
|
|
|
|
Args.push_back(B.getInt32(0));
|
2020-06-05 00:22:07 +02:00
|
|
|
// GC args are now encoded in the gc-live operand bundle
|
2015-05-07 01:53:09 +02:00
|
|
|
return Args;
|
|
|
|
}
|
|
|
|
|
2020-06-05 00:22:07 +02:00
|
|
|
template<typename T1, typename T2, typename T3>
|
2020-05-28 19:11:08 +02:00
|
|
|
static std::vector<OperandBundleDef>
|
|
|
|
getStatepointBundles(Optional<ArrayRef<T1>> TransitionArgs,
|
2020-06-05 00:22:07 +02:00
|
|
|
Optional<ArrayRef<T2>> DeoptArgs,
|
|
|
|
ArrayRef<T3> GCArgs) {
|
2020-05-28 19:11:08 +02:00
|
|
|
std::vector<OperandBundleDef> Rval;
|
|
|
|
if (DeoptArgs) {
|
|
|
|
SmallVector<Value*, 16> DeoptValues;
|
2021-01-07 03:27:33 +01:00
|
|
|
llvm::append_range(DeoptValues, *DeoptArgs);
|
2020-05-28 19:11:08 +02:00
|
|
|
Rval.emplace_back("deopt", DeoptValues);
|
|
|
|
}
|
|
|
|
if (TransitionArgs) {
|
|
|
|
SmallVector<Value*, 16> TransitionValues;
|
2021-01-07 03:27:33 +01:00
|
|
|
llvm::append_range(TransitionValues, *TransitionArgs);
|
2020-05-28 19:11:08 +02:00
|
|
|
Rval.emplace_back("gc-transition", TransitionValues);
|
|
|
|
}
|
2020-06-05 00:22:07 +02:00
|
|
|
if (GCArgs.size()) {
|
|
|
|
SmallVector<Value*, 16> LiveValues;
|
2021-01-07 03:27:33 +01:00
|
|
|
llvm::append_range(LiveValues, GCArgs);
|
2020-06-05 00:22:07 +02:00
|
|
|
Rval.emplace_back("gc-live", LiveValues);
|
|
|
|
}
|
2020-05-28 19:11:08 +02:00
|
|
|
return Rval;
|
|
|
|
}
|
|
|
|
|
2015-10-07 21:52:12 +02:00
|
|
|
template <typename T0, typename T1, typename T2, typename T3>
|
|
|
|
static CallInst *CreateGCStatepointCallCommon(
|
|
|
|
IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes,
|
2015-10-09 01:18:33 +02:00
|
|
|
Value *ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs,
|
2020-05-28 19:11:08 +02:00
|
|
|
Optional<ArrayRef<T1>> TransitionArgs,
|
|
|
|
Optional<ArrayRef<T2>> DeoptArgs, ArrayRef<T3> GCArgs,
|
2015-10-07 21:52:12 +02:00
|
|
|
const Twine &Name) {
|
2015-05-06 04:36:34 +02:00
|
|
|
// Extract out the type of the callee.
|
2018-03-30 02:47:31 +02:00
|
|
|
auto *FuncPtrType = cast<PointerType>(ActualCallee->getType());
|
2015-05-06 04:36:34 +02:00
|
|
|
assert(isa<FunctionType>(FuncPtrType->getElementType()) &&
|
|
|
|
"actual callee must be a callable value");
|
2014-12-30 06:55:58 +01:00
|
|
|
|
2015-10-07 21:52:12 +02:00
|
|
|
Module *M = Builder->GetInsertBlock()->getParent()->getParent();
|
2015-05-06 04:36:34 +02:00
|
|
|
// Fill in the one generic type'd argument (the function is also vararg)
|
|
|
|
Type *ArgTypes[] = { FuncPtrType };
|
|
|
|
Function *FnStatepoint =
|
|
|
|
Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_statepoint,
|
|
|
|
ArgTypes);
|
2014-12-30 06:55:58 +01:00
|
|
|
|
2018-03-30 02:47:31 +02:00
|
|
|
std::vector<Value *> Args =
|
2015-10-07 21:52:12 +02:00
|
|
|
getStatepointArgs(*Builder, ID, NumPatchBytes, ActualCallee, Flags,
|
2020-06-05 00:22:07 +02:00
|
|
|
CallArgs);
|
2020-05-28 19:11:08 +02:00
|
|
|
|
|
|
|
return Builder->CreateCall(FnStatepoint, Args,
|
2020-06-05 00:22:07 +02:00
|
|
|
getStatepointBundles(TransitionArgs, DeoptArgs,
|
|
|
|
GCArgs),
|
2020-05-28 19:11:08 +02:00
|
|
|
Name);
|
2014-12-30 06:55:58 +01:00
|
|
|
}
|
|
|
|
|
2015-05-13 01:52:24 +02:00
|
|
|
CallInst *IRBuilderBase::CreateGCStatepointCall(
|
|
|
|
uint64_t ID, uint32_t NumPatchBytes, Value *ActualCallee,
|
2020-05-28 19:11:08 +02:00
|
|
|
ArrayRef<Value *> CallArgs, Optional<ArrayRef<Value *>> DeoptArgs,
|
2015-05-13 01:52:24 +02:00
|
|
|
ArrayRef<Value *> GCArgs, const Twine &Name) {
|
2015-10-07 21:52:12 +02:00
|
|
|
return CreateGCStatepointCallCommon<Value *, Value *, Value *, Value *>(
|
2015-10-09 01:18:33 +02:00
|
|
|
this, ID, NumPatchBytes, ActualCallee, uint32_t(StatepointFlags::None),
|
|
|
|
CallArgs, None /* No Transition Args */, DeoptArgs, GCArgs, Name);
|
2015-05-07 01:53:09 +02:00
|
|
|
}
|
|
|
|
|
2015-10-07 21:52:12 +02:00
|
|
|
CallInst *IRBuilderBase::CreateGCStatepointCall(
|
2015-10-09 01:18:33 +02:00
|
|
|
uint64_t ID, uint32_t NumPatchBytes, Value *ActualCallee, uint32_t Flags,
|
2020-10-02 05:01:39 +02:00
|
|
|
ArrayRef<Value *> CallArgs, Optional<ArrayRef<Use>> TransitionArgs,
|
2020-05-28 19:11:08 +02:00
|
|
|
Optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs,
|
|
|
|
const Twine &Name) {
|
2020-10-02 05:01:39 +02:00
|
|
|
return CreateGCStatepointCallCommon<Value *, Use, Use, Value *>(
|
2015-10-07 21:52:12 +02:00
|
|
|
this, ID, NumPatchBytes, ActualCallee, Flags, CallArgs, TransitionArgs,
|
|
|
|
DeoptArgs, GCArgs, Name);
|
|
|
|
}
|
|
|
|
|
|
|
|
CallInst *IRBuilderBase::CreateGCStatepointCall(
|
|
|
|
uint64_t ID, uint32_t NumPatchBytes, Value *ActualCallee,
|
2020-05-28 19:11:08 +02:00
|
|
|
ArrayRef<Use> CallArgs, Optional<ArrayRef<Value *>> DeoptArgs,
|
2015-05-07 01:53:09 +02:00
|
|
|
ArrayRef<Value *> GCArgs, const Twine &Name) {
|
2015-10-07 21:52:12 +02:00
|
|
|
return CreateGCStatepointCallCommon<Use, Value *, Value *, Value *>(
|
2015-10-09 01:18:33 +02:00
|
|
|
this, ID, NumPatchBytes, ActualCallee, uint32_t(StatepointFlags::None),
|
|
|
|
CallArgs, None, DeoptArgs, GCArgs, Name);
|
2015-10-07 21:52:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T0, typename T1, typename T2, typename T3>
|
|
|
|
static InvokeInst *CreateGCStatepointInvokeCommon(
|
|
|
|
IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes,
|
|
|
|
Value *ActualInvokee, BasicBlock *NormalDest, BasicBlock *UnwindDest,
|
2020-05-28 19:11:08 +02:00
|
|
|
uint32_t Flags, ArrayRef<T0> InvokeArgs,
|
|
|
|
Optional<ArrayRef<T1>> TransitionArgs, Optional<ArrayRef<T2>> DeoptArgs,
|
|
|
|
ArrayRef<T3> GCArgs, const Twine &Name) {
|
2015-05-07 01:53:09 +02:00
|
|
|
// Extract out the type of the callee.
|
2018-03-30 02:47:31 +02:00
|
|
|
auto *FuncPtrType = cast<PointerType>(ActualInvokee->getType());
|
2015-05-07 01:53:09 +02:00
|
|
|
assert(isa<FunctionType>(FuncPtrType->getElementType()) &&
|
|
|
|
"actual callee must be a callable value");
|
|
|
|
|
2015-10-07 21:52:12 +02:00
|
|
|
Module *M = Builder->GetInsertBlock()->getParent()->getParent();
|
2015-05-07 01:53:09 +02:00
|
|
|
// Fill in the one generic type'd argument (the function is also vararg)
|
|
|
|
Function *FnStatepoint = Intrinsic::getDeclaration(
|
|
|
|
M, Intrinsic::experimental_gc_statepoint, {FuncPtrType});
|
|
|
|
|
2018-03-30 02:47:31 +02:00
|
|
|
std::vector<Value *> Args =
|
2015-10-07 21:52:12 +02:00
|
|
|
getStatepointArgs(*Builder, ID, NumPatchBytes, ActualInvokee, Flags,
|
2020-06-05 00:22:07 +02:00
|
|
|
InvokeArgs);
|
2020-05-28 19:11:08 +02:00
|
|
|
|
2020-02-18 20:35:16 +01:00
|
|
|
return Builder->CreateInvoke(FnStatepoint, NormalDest, UnwindDest, Args,
|
2020-06-05 00:22:07 +02:00
|
|
|
getStatepointBundles(TransitionArgs, DeoptArgs,
|
|
|
|
GCArgs),
|
2020-02-18 20:35:16 +01:00
|
|
|
Name);
|
2015-05-07 01:53:09 +02:00
|
|
|
}
|
|
|
|
|
2015-10-07 21:52:12 +02:00
|
|
|
InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
|
|
|
|
uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
|
|
|
|
BasicBlock *NormalDest, BasicBlock *UnwindDest,
|
2020-05-28 19:11:08 +02:00
|
|
|
ArrayRef<Value *> InvokeArgs, Optional<ArrayRef<Value *>> DeoptArgs,
|
2015-10-07 21:52:12 +02:00
|
|
|
ArrayRef<Value *> GCArgs, const Twine &Name) {
|
|
|
|
return CreateGCStatepointInvokeCommon<Value *, Value *, Value *, Value *>(
|
|
|
|
this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest,
|
2015-10-09 01:18:33 +02:00
|
|
|
uint32_t(StatepointFlags::None), InvokeArgs, None /* No Transition Args*/,
|
2015-10-07 21:52:12 +02:00
|
|
|
DeoptArgs, GCArgs, Name);
|
|
|
|
}
|
|
|
|
|
|
|
|
InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
|
|
|
|
uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
|
2015-10-09 01:18:33 +02:00
|
|
|
BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags,
|
2020-10-02 05:01:39 +02:00
|
|
|
ArrayRef<Value *> InvokeArgs, Optional<ArrayRef<Use>> TransitionArgs,
|
2020-05-28 19:11:08 +02:00
|
|
|
Optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs, const Twine &Name) {
|
2020-10-02 05:01:39 +02:00
|
|
|
return CreateGCStatepointInvokeCommon<Value *, Use, Use, Value *>(
|
2015-10-07 21:52:12 +02:00
|
|
|
this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest, Flags,
|
|
|
|
InvokeArgs, TransitionArgs, DeoptArgs, GCArgs, Name);
|
|
|
|
}
|
|
|
|
|
2015-05-07 01:53:09 +02:00
|
|
|
InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
|
2015-05-13 01:52:24 +02:00
|
|
|
uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
|
|
|
|
BasicBlock *NormalDest, BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs,
|
2020-05-28 19:11:08 +02:00
|
|
|
Optional<ArrayRef<Value *>> DeoptArgs, ArrayRef<Value *> GCArgs, const Twine &Name) {
|
2015-10-07 21:52:12 +02:00
|
|
|
return CreateGCStatepointInvokeCommon<Use, Value *, Value *, Value *>(
|
|
|
|
this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest,
|
2015-10-09 01:18:33 +02:00
|
|
|
uint32_t(StatepointFlags::None), InvokeArgs, None, DeoptArgs, GCArgs,
|
|
|
|
Name);
|
2015-02-26 01:35:56 +01:00
|
|
|
}
|
|
|
|
|
2014-12-30 06:55:58 +01:00
|
|
|
CallInst *IRBuilderBase::CreateGCResult(Instruction *Statepoint,
|
|
|
|
Type *ResultType,
|
|
|
|
const Twine &Name) {
|
2015-01-22 21:14:38 +01:00
|
|
|
Intrinsic::ID ID = Intrinsic::experimental_gc_result;
|
2014-12-30 06:55:58 +01:00
|
|
|
Module *M = BB->getParent()->getParent();
|
|
|
|
Type *Types[] = {ResultType};
|
2019-02-01 21:43:25 +01:00
|
|
|
Function *FnGCResult = Intrinsic::getDeclaration(M, ID, Types);
|
2014-12-30 06:55:58 +01:00
|
|
|
|
|
|
|
Value *Args[] = {Statepoint};
|
|
|
|
return createCallHelper(FnGCResult, Args, this, Name);
|
|
|
|
}
|
|
|
|
|
|
|
|
CallInst *IRBuilderBase::CreateGCRelocate(Instruction *Statepoint,
|
|
|
|
int BaseOffset,
|
|
|
|
int DerivedOffset,
|
|
|
|
Type *ResultType,
|
|
|
|
const Twine &Name) {
|
|
|
|
Module *M = BB->getParent()->getParent();
|
|
|
|
Type *Types[] = {ResultType};
|
2019-02-01 21:43:25 +01:00
|
|
|
Function *FnGCRelocate =
|
|
|
|
Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_relocate, Types);
|
2014-12-30 06:55:58 +01:00
|
|
|
|
|
|
|
Value *Args[] = {Statepoint,
|
|
|
|
getInt32(BaseOffset),
|
|
|
|
getInt32(DerivedOffset)};
|
|
|
|
return createCallHelper(FnGCRelocate, Args, this, Name);
|
|
|
|
}
|
2017-02-28 00:08:49 +01:00
|
|
|
|
2021-05-27 04:01:55 +02:00
|
|
|
CallInst *IRBuilderBase::CreateGCGetPointerBase(Value *DerivedPtr,
|
|
|
|
const Twine &Name) {
|
|
|
|
Module *M = BB->getParent()->getParent();
|
|
|
|
Type *PtrTy = DerivedPtr->getType();
|
|
|
|
Function *FnGCFindBase = Intrinsic::getDeclaration(
|
|
|
|
M, Intrinsic::experimental_gc_get_pointer_base, {PtrTy, PtrTy});
|
|
|
|
return createCallHelper(FnGCFindBase, {DerivedPtr}, this, Name);
|
|
|
|
}
|
|
|
|
|
|
|
|
CallInst *IRBuilderBase::CreateGCGetPointerOffset(Value *DerivedPtr,
|
|
|
|
const Twine &Name) {
|
|
|
|
Module *M = BB->getParent()->getParent();
|
|
|
|
Type *PtrTy = DerivedPtr->getType();
|
|
|
|
Function *FnGCGetOffset = Intrinsic::getDeclaration(
|
|
|
|
M, Intrinsic::experimental_gc_get_pointer_offset, {PtrTy});
|
|
|
|
return createCallHelper(FnGCGetOffset, {DerivedPtr}, this, Name);
|
|
|
|
}
|
|
|
|
|
2018-10-08 12:32:33 +02:00
|
|
|
CallInst *IRBuilderBase::CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V,
|
|
|
|
Instruction *FMFSource,
|
|
|
|
const Twine &Name) {
|
2018-02-23 22:16:12 +01:00
|
|
|
Module *M = BB->getModule();
|
2018-10-08 12:32:33 +02:00
|
|
|
Function *Fn = Intrinsic::getDeclaration(M, ID, {V->getType()});
|
|
|
|
return createCallHelper(Fn, {V}, this, Name, FMFSource);
|
2017-02-28 00:08:49 +01:00
|
|
|
}
|
2018-02-23 22:16:12 +01:00
|
|
|
|
2018-10-08 12:32:33 +02:00
|
|
|
CallInst *IRBuilderBase::CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS,
|
|
|
|
Value *RHS,
|
|
|
|
Instruction *FMFSource,
|
|
|
|
const Twine &Name) {
|
2018-05-29 20:06:50 +02:00
|
|
|
Module *M = BB->getModule();
|
2018-10-08 12:32:33 +02:00
|
|
|
Function *Fn = Intrinsic::getDeclaration(M, ID, { LHS->getType() });
|
|
|
|
return createCallHelper(Fn, {LHS, RHS}, this, Name, FMFSource);
|
2018-05-29 20:06:50 +02:00
|
|
|
}
|
|
|
|
|
2018-02-23 22:16:12 +01:00
|
|
|
CallInst *IRBuilderBase::CreateIntrinsic(Intrinsic::ID ID,
|
2018-10-08 12:32:33 +02:00
|
|
|
ArrayRef<Type *> Types,
|
2018-02-23 22:16:12 +01:00
|
|
|
ArrayRef<Value *> Args,
|
|
|
|
Instruction *FMFSource,
|
|
|
|
const Twine &Name) {
|
|
|
|
Module *M = BB->getModule();
|
2018-10-08 12:32:33 +02:00
|
|
|
Function *Fn = Intrinsic::getDeclaration(M, ID, Types);
|
2018-02-23 22:16:12 +01:00
|
|
|
return createCallHelper(Fn, Args, this, Name, FMFSource);
|
|
|
|
}
|
Reapply "[IRBuilder] Virtualize IRBuilder"
Relative to the original commit, this fixes some warnings,
and is based on the deletion of the IRBuilder copy constructor
in D74693. The automatic copy constructor would no longer be
safe.
-----
Related llvm-dev thread:
http://lists.llvm.org/pipermail/llvm-dev/2020-February/138951.html
This patch moves the IRBuilder from templating over the constant
folder and inserter towards making both of these virtual.
There are a couple of motivations for this:
1. It's not possible to share code between use-sites that use
different IRBuilder folders/inserters (short of templating the code
and moving it into headers).
2. Methods currently defined on IRBuilderBase (which is not templated)
do not use the custom inserter, resulting in subtle bugs (e.g.
incorrect InstCombine worklist management). It would be possible to
move those into the templated IRBuilder, but...
3. The vast majority of the IRBuilder implementation has to live
in the header, because it depends on the template arguments.
4. We have many unnecessary dependencies on IRBuilder.h,
because it is not easy to forward-declare. (Significant parts of
the backend depend on it via TargetLowering.h, for example.)
This patch addresses the issue by making the following changes:
* IRBuilderDefaultInserter::InsertHelper becomes virtual.
IRBuilderBase accepts a reference to it.
* IRBuilderFolder is introduced as a virtual base class. It is
implemented by ConstantFolder (default), NoFolder and TargetFolder.
IRBuilderBase has a reference to this as well.
* All the logic is moved from IRBuilder to IRBuilderBase. This means
that methods can in the future replace their IRBuilder<> & uses
(or other specific IRBuilder types) with IRBuilderBase & and thus
be usable with different IRBuilders.
* The IRBuilder class is now a thin wrapper around IRBuilderBase.
Essentially it only stores the folder and inserter and takes care
of constructing the base builder.
What this patch doesn't do, but should be simple followups after this change:
* Fixing use of the inserter for creation methods originally defined
on IRBuilderBase.
* Replacing IRBuilder<> uses in arguments with IRBuilderBase, where useful.
* Moving code from the IRBuilder header to the source file.
From the user perspective, these changes should be mostly transparent:
The only thing that consumers using a custom inserted may need to do is
inherit from IRBuilderDefaultInserter publicly and mark their InsertHelper
as public.
Differential Revision: https://reviews.llvm.org/D73835
2020-02-16 17:10:09 +01:00
|
|
|
|
2020-04-02 21:35:24 +02:00
|
|
|
CallInst *IRBuilderBase::CreateConstrainedFPBinOp(
|
|
|
|
Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource,
|
|
|
|
const Twine &Name, MDNode *FPMathTag,
|
2020-03-26 08:51:09 +01:00
|
|
|
Optional<RoundingMode> Rounding,
|
2020-04-02 21:35:24 +02:00
|
|
|
Optional<fp::ExceptionBehavior> Except) {
|
|
|
|
Value *RoundingV = getConstrainedFPRounding(Rounding);
|
|
|
|
Value *ExceptV = getConstrainedFPExcept(Except);
|
|
|
|
|
|
|
|
FastMathFlags UseFMF = FMF;
|
|
|
|
if (FMFSource)
|
|
|
|
UseFMF = FMFSource->getFastMathFlags();
|
|
|
|
|
|
|
|
CallInst *C = CreateIntrinsic(ID, {L->getType()},
|
|
|
|
{L, R, RoundingV, ExceptV}, nullptr, Name);
|
|
|
|
setConstrainedFPCallAttr(C);
|
|
|
|
setFPAttrs(C, FPMathTag, UseFMF);
|
|
|
|
return C;
|
|
|
|
}
|
|
|
|
|
|
|
|
Value *IRBuilderBase::CreateNAryOp(unsigned Opc, ArrayRef<Value *> Ops,
|
|
|
|
const Twine &Name, MDNode *FPMathTag) {
|
|
|
|
if (Instruction::isBinaryOp(Opc)) {
|
|
|
|
assert(Ops.size() == 2 && "Invalid number of operands!");
|
|
|
|
return CreateBinOp(static_cast<Instruction::BinaryOps>(Opc),
|
|
|
|
Ops[0], Ops[1], Name, FPMathTag);
|
|
|
|
}
|
|
|
|
if (Instruction::isUnaryOp(Opc)) {
|
|
|
|
assert(Ops.size() == 1 && "Invalid number of operands!");
|
|
|
|
return CreateUnOp(static_cast<Instruction::UnaryOps>(Opc),
|
|
|
|
Ops[0], Name, FPMathTag);
|
|
|
|
}
|
|
|
|
llvm_unreachable("Unexpected opcode!");
|
|
|
|
}
|
|
|
|
|
|
|
|
CallInst *IRBuilderBase::CreateConstrainedFPCast(
|
|
|
|
Intrinsic::ID ID, Value *V, Type *DestTy,
|
|
|
|
Instruction *FMFSource, const Twine &Name, MDNode *FPMathTag,
|
2020-03-26 08:51:09 +01:00
|
|
|
Optional<RoundingMode> Rounding,
|
2020-04-02 21:35:24 +02:00
|
|
|
Optional<fp::ExceptionBehavior> Except) {
|
|
|
|
Value *ExceptV = getConstrainedFPExcept(Except);
|
|
|
|
|
|
|
|
FastMathFlags UseFMF = FMF;
|
|
|
|
if (FMFSource)
|
|
|
|
UseFMF = FMFSource->getFastMathFlags();
|
|
|
|
|
|
|
|
CallInst *C;
|
|
|
|
bool HasRoundingMD = false;
|
|
|
|
switch (ID) {
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
|
|
|
|
case Intrinsic::INTRINSIC: \
|
|
|
|
HasRoundingMD = ROUND_MODE; \
|
|
|
|
break;
|
|
|
|
#include "llvm/IR/ConstrainedOps.def"
|
|
|
|
}
|
|
|
|
if (HasRoundingMD) {
|
|
|
|
Value *RoundingV = getConstrainedFPRounding(Rounding);
|
|
|
|
C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, RoundingV, ExceptV},
|
|
|
|
nullptr, Name);
|
|
|
|
} else
|
|
|
|
C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, ExceptV}, nullptr,
|
|
|
|
Name);
|
|
|
|
|
|
|
|
setConstrainedFPCallAttr(C);
|
|
|
|
|
|
|
|
if (isa<FPMathOperator>(C))
|
|
|
|
setFPAttrs(C, FPMathTag, UseFMF);
|
|
|
|
return C;
|
|
|
|
}
|
|
|
|
|
|
|
|
Value *IRBuilderBase::CreateFCmpHelper(
|
|
|
|
CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name,
|
|
|
|
MDNode *FPMathTag, bool IsSignaling) {
|
|
|
|
if (IsFPConstrained) {
|
|
|
|
auto ID = IsSignaling ? Intrinsic::experimental_constrained_fcmps
|
|
|
|
: Intrinsic::experimental_constrained_fcmp;
|
|
|
|
return CreateConstrainedFPCmp(ID, P, LHS, RHS, Name);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (auto *LC = dyn_cast<Constant>(LHS))
|
|
|
|
if (auto *RC = dyn_cast<Constant>(RHS))
|
|
|
|
return Insert(Folder.CreateFCmp(P, LC, RC), Name);
|
|
|
|
return Insert(setFPAttrs(new FCmpInst(P, LHS, RHS), FPMathTag, FMF), Name);
|
|
|
|
}
|
|
|
|
|
|
|
|
CallInst *IRBuilderBase::CreateConstrainedFPCmp(
|
|
|
|
Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R,
|
|
|
|
const Twine &Name, Optional<fp::ExceptionBehavior> Except) {
|
|
|
|
Value *PredicateV = getConstrainedFPPredicate(P);
|
|
|
|
Value *ExceptV = getConstrainedFPExcept(Except);
|
|
|
|
|
|
|
|
CallInst *C = CreateIntrinsic(ID, {L->getType()},
|
|
|
|
{L, R, PredicateV, ExceptV}, nullptr, Name);
|
|
|
|
setConstrainedFPCallAttr(C);
|
|
|
|
return C;
|
|
|
|
}
|
|
|
|
|
|
|
|
CallInst *IRBuilderBase::CreateConstrainedFPCall(
|
|
|
|
Function *Callee, ArrayRef<Value *> Args, const Twine &Name,
|
2020-03-26 08:51:09 +01:00
|
|
|
Optional<RoundingMode> Rounding,
|
2020-04-02 21:35:24 +02:00
|
|
|
Optional<fp::ExceptionBehavior> Except) {
|
|
|
|
llvm::SmallVector<Value *, 6> UseArgs;
|
|
|
|
|
2021-01-28 08:25:41 +01:00
|
|
|
append_range(UseArgs, Args);
|
2020-04-02 21:35:24 +02:00
|
|
|
bool HasRoundingMD = false;
|
|
|
|
switch (Callee->getIntrinsicID()) {
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
|
|
|
|
case Intrinsic::INTRINSIC: \
|
|
|
|
HasRoundingMD = ROUND_MODE; \
|
|
|
|
break;
|
|
|
|
#include "llvm/IR/ConstrainedOps.def"
|
|
|
|
}
|
|
|
|
if (HasRoundingMD)
|
|
|
|
UseArgs.push_back(getConstrainedFPRounding(Rounding));
|
|
|
|
UseArgs.push_back(getConstrainedFPExcept(Except));
|
|
|
|
|
|
|
|
CallInst *C = CreateCall(Callee, UseArgs, Name);
|
|
|
|
setConstrainedFPCallAttr(C);
|
|
|
|
return C;
|
|
|
|
}
|
|
|
|
|
|
|
|
Value *IRBuilderBase::CreateSelect(Value *C, Value *True, Value *False,
|
|
|
|
const Twine &Name, Instruction *MDFrom) {
|
|
|
|
if (auto *CC = dyn_cast<Constant>(C))
|
|
|
|
if (auto *TC = dyn_cast<Constant>(True))
|
|
|
|
if (auto *FC = dyn_cast<Constant>(False))
|
|
|
|
return Insert(Folder.CreateSelect(CC, TC, FC), Name);
|
|
|
|
|
|
|
|
SelectInst *Sel = SelectInst::Create(C, True, False);
|
|
|
|
if (MDFrom) {
|
|
|
|
MDNode *Prof = MDFrom->getMetadata(LLVMContext::MD_prof);
|
|
|
|
MDNode *Unpred = MDFrom->getMetadata(LLVMContext::MD_unpredictable);
|
|
|
|
Sel = addBranchMetadata(Sel, Prof, Unpred);
|
|
|
|
}
|
|
|
|
if (isa<FPMathOperator>(Sel))
|
|
|
|
setFPAttrs(Sel, nullptr /* MDNode* */, FMF);
|
|
|
|
return Insert(Sel, Name);
|
|
|
|
}
|
|
|
|
|
|
|
|
Value *IRBuilderBase::CreatePtrDiff(Value *LHS, Value *RHS,
|
|
|
|
const Twine &Name) {
|
|
|
|
assert(LHS->getType() == RHS->getType() &&
|
|
|
|
"Pointer subtraction operand types must match!");
|
|
|
|
auto *ArgType = cast<PointerType>(LHS->getType());
|
|
|
|
Value *LHS_int = CreatePtrToInt(LHS, Type::getInt64Ty(Context));
|
|
|
|
Value *RHS_int = CreatePtrToInt(RHS, Type::getInt64Ty(Context));
|
|
|
|
Value *Difference = CreateSub(LHS_int, RHS_int);
|
|
|
|
return CreateExactSDiv(Difference,
|
|
|
|
ConstantExpr::getSizeOf(ArgType->getElementType()),
|
|
|
|
Name);
|
|
|
|
}
|
|
|
|
|
|
|
|
Value *IRBuilderBase::CreateLaunderInvariantGroup(Value *Ptr) {
|
|
|
|
assert(isa<PointerType>(Ptr->getType()) &&
|
|
|
|
"launder.invariant.group only applies to pointers.");
|
|
|
|
// FIXME: we could potentially avoid casts to/from i8*.
|
|
|
|
auto *PtrType = Ptr->getType();
|
|
|
|
auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace());
|
|
|
|
if (PtrType != Int8PtrTy)
|
|
|
|
Ptr = CreateBitCast(Ptr, Int8PtrTy);
|
|
|
|
Module *M = BB->getParent()->getParent();
|
|
|
|
Function *FnLaunderInvariantGroup = Intrinsic::getDeclaration(
|
|
|
|
M, Intrinsic::launder_invariant_group, {Int8PtrTy});
|
|
|
|
|
|
|
|
assert(FnLaunderInvariantGroup->getReturnType() == Int8PtrTy &&
|
|
|
|
FnLaunderInvariantGroup->getFunctionType()->getParamType(0) ==
|
|
|
|
Int8PtrTy &&
|
|
|
|
"LaunderInvariantGroup should take and return the same type");
|
|
|
|
|
|
|
|
CallInst *Fn = CreateCall(FnLaunderInvariantGroup, {Ptr});
|
|
|
|
|
|
|
|
if (PtrType != Int8PtrTy)
|
|
|
|
return CreateBitCast(Fn, PtrType);
|
|
|
|
return Fn;
|
|
|
|
}
|
|
|
|
|
|
|
|
Value *IRBuilderBase::CreateStripInvariantGroup(Value *Ptr) {
|
|
|
|
assert(isa<PointerType>(Ptr->getType()) &&
|
|
|
|
"strip.invariant.group only applies to pointers.");
|
|
|
|
|
|
|
|
// FIXME: we could potentially avoid casts to/from i8*.
|
|
|
|
auto *PtrType = Ptr->getType();
|
|
|
|
auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace());
|
|
|
|
if (PtrType != Int8PtrTy)
|
|
|
|
Ptr = CreateBitCast(Ptr, Int8PtrTy);
|
|
|
|
Module *M = BB->getParent()->getParent();
|
|
|
|
Function *FnStripInvariantGroup = Intrinsic::getDeclaration(
|
|
|
|
M, Intrinsic::strip_invariant_group, {Int8PtrTy});
|
|
|
|
|
|
|
|
assert(FnStripInvariantGroup->getReturnType() == Int8PtrTy &&
|
|
|
|
FnStripInvariantGroup->getFunctionType()->getParamType(0) ==
|
|
|
|
Int8PtrTy &&
|
|
|
|
"StripInvariantGroup should take and return the same type");
|
|
|
|
|
|
|
|
CallInst *Fn = CreateCall(FnStripInvariantGroup, {Ptr});
|
|
|
|
|
|
|
|
if (PtrType != Int8PtrTy)
|
|
|
|
return CreateBitCast(Fn, PtrType);
|
|
|
|
return Fn;
|
|
|
|
}
|
|
|
|
|
2021-01-19 12:30:50 +01:00
|
|
|
Value *IRBuilderBase::CreateVectorReverse(Value *V, const Twine &Name) {
|
|
|
|
auto *Ty = cast<VectorType>(V->getType());
|
|
|
|
if (isa<ScalableVectorType>(Ty)) {
|
|
|
|
Module *M = BB->getParent()->getParent();
|
|
|
|
Function *F = Intrinsic::getDeclaration(
|
|
|
|
M, Intrinsic::experimental_vector_reverse, Ty);
|
|
|
|
return Insert(CallInst::Create(F, V), Name);
|
|
|
|
}
|
|
|
|
// Keep the original behaviour for fixed vector
|
|
|
|
SmallVector<int, 8> ShuffleMask;
|
|
|
|
int NumElts = Ty->getElementCount().getKnownMinValue();
|
|
|
|
for (int i = 0; i < NumElts; ++i)
|
|
|
|
ShuffleMask.push_back(NumElts - i - 1);
|
|
|
|
return CreateShuffleVector(V, ShuffleMask, Name);
|
|
|
|
}
|
|
|
|
|
2021-05-06 11:50:51 +02:00
|
|
|
Value *IRBuilderBase::CreateVectorSplice(Value *V1, Value *V2, int64_t Imm,
|
|
|
|
const Twine &Name) {
|
|
|
|
assert(isa<VectorType>(V1->getType()) && "Unexpected type");
|
|
|
|
assert(V1->getType() == V2->getType() &&
|
|
|
|
"Splice expects matching operand types!");
|
|
|
|
|
|
|
|
if (auto *VTy = dyn_cast<ScalableVectorType>(V1->getType())) {
|
|
|
|
Module *M = BB->getParent()->getParent();
|
|
|
|
Function *F = Intrinsic::getDeclaration(
|
|
|
|
M, Intrinsic::experimental_vector_splice, VTy);
|
|
|
|
|
|
|
|
Value *Ops[] = {V1, V2, getInt32(Imm)};
|
|
|
|
return Insert(CallInst::Create(F, Ops), Name);
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned NumElts = cast<FixedVectorType>(V1->getType())->getNumElements();
|
|
|
|
assert(((-Imm <= NumElts) || (Imm < NumElts)) &&
|
|
|
|
"Invalid immediate for vector splice!");
|
|
|
|
|
|
|
|
// Keep the original behaviour for fixed vector
|
|
|
|
unsigned Idx = (NumElts + Imm) % NumElts;
|
|
|
|
SmallVector<int, 8> Mask;
|
|
|
|
for (unsigned I = 0; I < NumElts; ++I)
|
|
|
|
Mask.push_back(Idx + I);
|
|
|
|
|
|
|
|
return CreateShuffleVector(V1, V2, Mask);
|
|
|
|
}
|
|
|
|
|
2020-04-02 21:35:24 +02:00
|
|
|
Value *IRBuilderBase::CreateVectorSplat(unsigned NumElts, Value *V,
|
|
|
|
const Twine &Name) {
|
2020-08-19 19:26:36 +02:00
|
|
|
auto EC = ElementCount::getFixed(NumElts);
|
2020-08-02 17:55:16 +02:00
|
|
|
return CreateVectorSplat(EC, V, Name);
|
|
|
|
}
|
|
|
|
|
|
|
|
Value *IRBuilderBase::CreateVectorSplat(ElementCount EC, Value *V,
|
|
|
|
const Twine &Name) {
|
2020-08-14 13:15:59 +02:00
|
|
|
assert(EC.isNonZero() && "Cannot splat to an empty vector!");
|
2020-04-02 21:35:24 +02:00
|
|
|
|
2020-12-24 01:33:58 +01:00
|
|
|
// First insert it into a poison vector so we can shuffle it.
|
2020-04-02 21:35:24 +02:00
|
|
|
Type *I32Ty = getInt32Ty();
|
2020-12-24 01:33:58 +01:00
|
|
|
Value *Poison = PoisonValue::get(VectorType::get(V->getType(), EC));
|
|
|
|
V = CreateInsertElement(Poison, V, ConstantInt::get(I32Ty, 0),
|
2020-04-02 21:35:24 +02:00
|
|
|
Name + ".splatinsert");
|
|
|
|
|
|
|
|
// Shuffle the value across the desired number of elements.
|
2020-12-24 01:33:58 +01:00
|
|
|
SmallVector<int, 16> Zeros;
|
|
|
|
Zeros.resize(EC.getKnownMinValue());
|
|
|
|
return CreateShuffleVector(V, Zeros, Name + ".splat");
|
2020-04-02 21:35:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Value *IRBuilderBase::CreateExtractInteger(
|
|
|
|
const DataLayout &DL, Value *From, IntegerType *ExtractedTy,
|
|
|
|
uint64_t Offset, const Twine &Name) {
|
|
|
|
auto *IntTy = cast<IntegerType>(From->getType());
|
|
|
|
assert(DL.getTypeStoreSize(ExtractedTy) + Offset <=
|
|
|
|
DL.getTypeStoreSize(IntTy) &&
|
|
|
|
"Element extends past full value");
|
|
|
|
uint64_t ShAmt = 8 * Offset;
|
|
|
|
Value *V = From;
|
|
|
|
if (DL.isBigEndian())
|
|
|
|
ShAmt = 8 * (DL.getTypeStoreSize(IntTy) -
|
|
|
|
DL.getTypeStoreSize(ExtractedTy) - Offset);
|
|
|
|
if (ShAmt) {
|
|
|
|
V = CreateLShr(V, ShAmt, Name + ".shift");
|
|
|
|
}
|
|
|
|
assert(ExtractedTy->getBitWidth() <= IntTy->getBitWidth() &&
|
|
|
|
"Cannot extract to a larger integer!");
|
|
|
|
if (ExtractedTy != IntTy) {
|
|
|
|
V = CreateTrunc(V, ExtractedTy, Name + ".trunc");
|
|
|
|
}
|
|
|
|
return V;
|
|
|
|
}
|
|
|
|
|
|
|
|
Value *IRBuilderBase::CreatePreserveArrayAccessIndex(
|
|
|
|
Type *ElTy, Value *Base, unsigned Dimension, unsigned LastIndex,
|
|
|
|
MDNode *DbgInfo) {
|
|
|
|
assert(isa<PointerType>(Base->getType()) &&
|
|
|
|
"Invalid Base ptr type for preserve.array.access.index.");
|
|
|
|
auto *BaseType = Base->getType();
|
|
|
|
|
|
|
|
Value *LastIndexV = getInt32(LastIndex);
|
|
|
|
Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
|
2021-01-17 19:39:47 +01:00
|
|
|
SmallVector<Value *, 4> IdxList(Dimension, Zero);
|
2020-04-02 21:35:24 +02:00
|
|
|
IdxList.push_back(LastIndexV);
|
|
|
|
|
|
|
|
Type *ResultType =
|
|
|
|
GetElementPtrInst::getGEPReturnType(ElTy, Base, IdxList);
|
|
|
|
|
|
|
|
Module *M = BB->getParent()->getParent();
|
|
|
|
Function *FnPreserveArrayAccessIndex = Intrinsic::getDeclaration(
|
|
|
|
M, Intrinsic::preserve_array_access_index, {ResultType, BaseType});
|
|
|
|
|
|
|
|
Value *DimV = getInt32(Dimension);
|
|
|
|
CallInst *Fn =
|
|
|
|
CreateCall(FnPreserveArrayAccessIndex, {Base, DimV, LastIndexV});
|
|
|
|
if (DbgInfo)
|
|
|
|
Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
|
|
|
|
|
|
|
|
return Fn;
|
|
|
|
}
|
|
|
|
|
|
|
|
Value *IRBuilderBase::CreatePreserveUnionAccessIndex(
|
|
|
|
Value *Base, unsigned FieldIndex, MDNode *DbgInfo) {
|
|
|
|
assert(isa<PointerType>(Base->getType()) &&
|
|
|
|
"Invalid Base ptr type for preserve.union.access.index.");
|
|
|
|
auto *BaseType = Base->getType();
|
|
|
|
|
|
|
|
Module *M = BB->getParent()->getParent();
|
|
|
|
Function *FnPreserveUnionAccessIndex = Intrinsic::getDeclaration(
|
|
|
|
M, Intrinsic::preserve_union_access_index, {BaseType, BaseType});
|
|
|
|
|
|
|
|
Value *DIIndex = getInt32(FieldIndex);
|
|
|
|
CallInst *Fn =
|
|
|
|
CreateCall(FnPreserveUnionAccessIndex, {Base, DIIndex});
|
|
|
|
if (DbgInfo)
|
|
|
|
Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
|
|
|
|
|
|
|
|
return Fn;
|
|
|
|
}
|
|
|
|
|
|
|
|
Value *IRBuilderBase::CreatePreserveStructAccessIndex(
|
|
|
|
Type *ElTy, Value *Base, unsigned Index, unsigned FieldIndex,
|
|
|
|
MDNode *DbgInfo) {
|
|
|
|
assert(isa<PointerType>(Base->getType()) &&
|
|
|
|
"Invalid Base ptr type for preserve.struct.access.index.");
|
|
|
|
auto *BaseType = Base->getType();
|
|
|
|
|
|
|
|
Value *GEPIndex = getInt32(Index);
|
|
|
|
Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
|
|
|
|
Type *ResultType =
|
|
|
|
GetElementPtrInst::getGEPReturnType(ElTy, Base, {Zero, GEPIndex});
|
|
|
|
|
|
|
|
Module *M = BB->getParent()->getParent();
|
|
|
|
Function *FnPreserveStructAccessIndex = Intrinsic::getDeclaration(
|
|
|
|
M, Intrinsic::preserve_struct_access_index, {ResultType, BaseType});
|
|
|
|
|
|
|
|
Value *DIIndex = getInt32(FieldIndex);
|
|
|
|
CallInst *Fn = CreateCall(FnPreserveStructAccessIndex,
|
|
|
|
{Base, GEPIndex, DIIndex});
|
|
|
|
if (DbgInfo)
|
|
|
|
Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
|
|
|
|
|
|
|
|
return Fn;
|
|
|
|
}
|
|
|
|
|
2020-09-12 13:36:45 +02:00
|
|
|
CallInst *IRBuilderBase::CreateAlignmentAssumptionHelper(const DataLayout &DL,
|
|
|
|
Value *PtrValue,
|
|
|
|
Value *AlignValue,
|
|
|
|
Value *OffsetValue) {
|
|
|
|
SmallVector<Value *, 4> Vals({PtrValue, AlignValue});
|
|
|
|
if (OffsetValue)
|
|
|
|
Vals.push_back(OffsetValue);
|
|
|
|
OperandBundleDefT<Value *> AlignOpB("align", Vals);
|
|
|
|
return CreateAssumption(ConstantInt::getTrue(getContext()), {AlignOpB});
|
2020-04-02 21:35:24 +02:00
|
|
|
}
|
|
|
|
|
2020-09-12 13:36:45 +02:00
|
|
|
CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL,
|
|
|
|
Value *PtrValue,
|
|
|
|
unsigned Alignment,
|
|
|
|
Value *OffsetValue) {
|
2020-04-02 21:35:24 +02:00
|
|
|
assert(isa<PointerType>(PtrValue->getType()) &&
|
|
|
|
"trying to create an alignment assumption on a non-pointer?");
|
|
|
|
assert(Alignment != 0 && "Invalid Alignment");
|
|
|
|
auto *PtrTy = cast<PointerType>(PtrValue->getType());
|
|
|
|
Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace());
|
2020-09-12 13:36:45 +02:00
|
|
|
Value *AlignValue = ConstantInt::get(IntPtrTy, Alignment);
|
|
|
|
return CreateAlignmentAssumptionHelper(DL, PtrValue, AlignValue, OffsetValue);
|
2020-04-02 21:35:24 +02:00
|
|
|
}
|
|
|
|
|
2020-09-12 13:36:45 +02:00
|
|
|
CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL,
|
|
|
|
Value *PtrValue,
|
|
|
|
Value *Alignment,
|
|
|
|
Value *OffsetValue) {
|
2020-04-02 21:35:24 +02:00
|
|
|
assert(isa<PointerType>(PtrValue->getType()) &&
|
|
|
|
"trying to create an alignment assumption on a non-pointer?");
|
2020-09-12 13:36:45 +02:00
|
|
|
return CreateAlignmentAssumptionHelper(DL, PtrValue, Alignment, OffsetValue);
|
2020-04-02 21:35:24 +02:00
|
|
|
}
|
|
|
|
|
Reapply "[IRBuilder] Virtualize IRBuilder"
Relative to the original commit, this fixes some warnings,
and is based on the deletion of the IRBuilder copy constructor
in D74693. The automatic copy constructor would no longer be
safe.
-----
Related llvm-dev thread:
http://lists.llvm.org/pipermail/llvm-dev/2020-February/138951.html
This patch moves the IRBuilder from templating over the constant
folder and inserter towards making both of these virtual.
There are a couple of motivations for this:
1. It's not possible to share code between use-sites that use
different IRBuilder folders/inserters (short of templating the code
and moving it into headers).
2. Methods currently defined on IRBuilderBase (which is not templated)
do not use the custom inserter, resulting in subtle bugs (e.g.
incorrect InstCombine worklist management). It would be possible to
move those into the templated IRBuilder, but...
3. The vast majority of the IRBuilder implementation has to live
in the header, because it depends on the template arguments.
4. We have many unnecessary dependencies on IRBuilder.h,
because it is not easy to forward-declare. (Significant parts of
the backend depend on it via TargetLowering.h, for example.)
This patch addresses the issue by making the following changes:
* IRBuilderDefaultInserter::InsertHelper becomes virtual.
IRBuilderBase accepts a reference to it.
* IRBuilderFolder is introduced as a virtual base class. It is
implemented by ConstantFolder (default), NoFolder and TargetFolder.
IRBuilderBase has a reference to this as well.
* All the logic is moved from IRBuilder to IRBuilderBase. This means
that methods can in the future replace their IRBuilder<> & uses
(or other specific IRBuilder types) with IRBuilderBase & and thus
be usable with different IRBuilders.
* The IRBuilder class is now a thin wrapper around IRBuilderBase.
Essentially it only stores the folder and inserter and takes care
of constructing the base builder.
What this patch doesn't do, but should be simple followups after this change:
* Fixing use of the inserter for creation methods originally defined
on IRBuilderBase.
* Replacing IRBuilder<> uses in arguments with IRBuilderBase, where useful.
* Moving code from the IRBuilder header to the source file.
From the user perspective, these changes should be mostly transparent:
The only thing that consumers using a custom inserted may need to do is
inherit from IRBuilderDefaultInserter publicly and mark their InsertHelper
as public.
Differential Revision: https://reviews.llvm.org/D73835
2020-02-16 17:10:09 +01:00
|
|
|
IRBuilderDefaultInserter::~IRBuilderDefaultInserter() {}
|
|
|
|
IRBuilderCallbackInserter::~IRBuilderCallbackInserter() {}
|
|
|
|
IRBuilderFolder::~IRBuilderFolder() {}
|
|
|
|
void ConstantFolder::anchor() {}
|
|
|
|
void NoFolder::anchor() {}
|