2007-08-04 03:51:18 +02:00
|
|
|
//===-- AutoUpgrade.cpp - Implement auto-upgrade helper functions ---------===//
|
|
|
|
//
|
2019-01-19 09:50:56 +01:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2007-08-04 03:51:18 +02:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
2015-03-10 17:08:36 +01:00
|
|
|
// This file implements the auto-upgrade helper functions.
|
|
|
|
// This is where deprecated IR intrinsics and other IR features are updated to
|
|
|
|
// current specifications.
|
2007-08-04 03:51:18 +02:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2014-03-05 11:34:14 +01:00
|
|
|
#include "llvm/IR/AutoUpgrade.h"
|
[NVPTX] Auto-upgrade some NVPTX intrinsics to LLVM target-generic code.
Summary:
Specifically, we upgrade llvm.nvvm.:
* brev{32,64}
* clz.{i,ll}
* popc.{i,ll}
* abs.{i,ll}
* {min,max}.{i,ll,u,ull}
* h2f
These either map directly to an existing LLVM target-generic
intrinsic or map to a simple LLVM target-generic idiom.
In all cases, we check that the code we generate is lowered to PTX as we
expect.
These builtins don't need to be backfilled in clang: They're not
accessible to user code from nvcc.
Reviewers: tra
Subscribers: majnemer, cfe-commits, llvm-commits, jholewinski
Differential Revision: https://reviews.llvm.org/D28793
llvm-svn: 292694
2017-01-21 02:00:32 +01:00
|
|
|
#include "llvm/ADT/StringSwitch.h"
|
2013-01-02 12:36:10 +01:00
|
|
|
#include "llvm/IR/Constants.h"
|
2015-01-14 12:23:27 +01:00
|
|
|
#include "llvm/IR/DIBuilder.h"
|
2014-03-06 01:46:21 +01:00
|
|
|
#include "llvm/IR/DebugInfo.h"
|
2014-01-16 02:51:12 +01:00
|
|
|
#include "llvm/IR/DiagnosticInfo.h"
|
2013-01-02 12:36:10 +01:00
|
|
|
#include "llvm/IR/Function.h"
|
|
|
|
#include "llvm/IR/IRBuilder.h"
|
|
|
|
#include "llvm/IR/Instruction.h"
|
Remove alignment argument from memcpy/memmove/memset in favour of alignment attributes (Step 1)
Summary:
This is a resurrection of work first proposed and discussed in Aug 2015:
http://lists.llvm.org/pipermail/llvm-dev/2015-August/089384.html
and initially landed (but then backed out) in Nov 2015:
http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
The @llvm.memcpy/memmove/memset intrinsics currently have an explicit argument
which is required to be a constant integer. It represents the alignment of the
dest (and source), and so must be the minimum of the actual alignment of the
two.
This change is the first in a series that allows source and dest to each
have their own alignments by using the alignment attribute on their arguments.
In this change we:
1) Remove the alignment argument.
2) Add alignment attributes to the source & dest arguments. We, temporarily,
require that the alignments for source & dest be equal.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 100, i32 4, i1 false)
will now read
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 100, i1 false)
Downstream users may have to update their lit tests that check for
@llvm.memcpy/memmove/memset call/declaration patterns. The following extended sed script
may help with updating the majority of your tests, but it does not catch all possible
patterns so some manual checking and updating will be required.
s~declare void @llvm\.mem(set|cpy|move)\.p([^(]*)\((.*), i32, i1\)~declare void @llvm.mem\1.p\2(\3, i1)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* \3, i8 \4, i8 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* \3, i8 \4, i16 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* \3, i8 \4, i32 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* \3, i8 \4, i64 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* \3, i8 \4, i128 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* align \6 \3, i8 \4, i8 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* align \6 \3, i8 \4, i16 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* align \6 \3, i8 \4, i32 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* align \6 \3, i8 \4, i64 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* align \6 \3, i8 \4, i128 \5, i1 \7)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* \4, i8\5* \6, i8 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* \4, i8\5* \6, i16 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* \4, i8\5* \6, i32 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* \4, i8\5* \6, i64 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* \4, i8\5* \6, i128 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* align \8 \4, i8\5* align \8 \6, i8 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* align \8 \4, i8\5* align \8 \6, i16 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* align \8 \4, i8\5* align \8 \6, i32 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* align \8 \4, i8\5* align \8 \6, i64 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* align \8 \4, i8\5* align \8 \6, i128 \7, i1 \9)~g
The remaining changes in the series will:
Step 2) Expand the IRBuilder API to allow creation of memcpy/memmove with differing
source and dest alignments.
Step 3) Update Clang to use the new IRBuilder API.
Step 4) Update Polly to use the new IRBuilder API.
Step 5) Update LLVM passes that create memcpy/memmove calls to use the new IRBuilder API,
and those that use use MemIntrinsicInst::[get|set]Alignment() to use
getDestAlignment() and getSourceAlignment() instead.
Step 6) Remove the single-alignment IRBuilder API for memcpy/memmove, and the
MemIntrinsicInst::[get|set]Alignment() methods.
Reviewers: pete, hfinkel, lhames, reames, bollu
Reviewed By: reames
Subscribers: niosHD, reames, jholewinski, qcolombet, jfb, sanjoy, arsenm, dschuff, dylanmckay, mehdi_amini, sdardis, nemanjai, david2050, nhaehnle, javed.absar, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, asb, rbar, johnrusso, simoncook, jordy.potman.lists, apazos, sabuasal, llvm-commits
Differential Revision: https://reviews.llvm.org/D41675
llvm-svn: 322965
2018-01-19 18:13:12 +01:00
|
|
|
#include "llvm/IR/IntrinsicInst.h"
|
2019-12-11 16:55:26 +01:00
|
|
|
#include "llvm/IR/IntrinsicsAArch64.h"
|
|
|
|
#include "llvm/IR/IntrinsicsARM.h"
|
|
|
|
#include "llvm/IR/IntrinsicsX86.h"
|
2013-01-02 12:36:10 +01:00
|
|
|
#include "llvm/IR/LLVMContext.h"
|
|
|
|
#include "llvm/IR/Module.h"
|
2017-10-02 20:31:29 +02:00
|
|
|
#include "llvm/IR/Verifier.h"
|
2009-07-11 22:10:48 +02:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2015-09-30 12:56:37 +02:00
|
|
|
#include "llvm/Support/Regex.h"
|
2008-02-20 12:08:44 +01:00
|
|
|
#include <cstring>
|
2007-08-04 03:51:18 +02:00
|
|
|
using namespace llvm;
|
|
|
|
|
2016-10-03 17:51:42 +02:00
|
|
|
static void rename(GlobalValue *GV) { GV->setName(GV->getName() + ".old"); }
|
|
|
|
|
2017-02-17 08:07:21 +01:00
|
|
|
// Upgrade the declarations of the SSE4.1 ptest intrinsics whose arguments have
|
2012-06-10 20:42:51 +02:00
|
|
|
// changed their type from v4f32 to v2i64.
|
2017-02-17 08:07:21 +01:00
|
|
|
static bool UpgradePTESTIntrinsic(Function* F, Intrinsic::ID IID,
|
|
|
|
Function *&NewFn) {
|
2012-06-10 20:42:51 +02:00
|
|
|
// Check whether this is an old version of the function, which received
|
|
|
|
// v4f32 arguments.
|
|
|
|
Type *Arg0Type = F->getFunctionType()->getParamType(0);
|
|
|
|
if (Arg0Type != VectorType::get(Type::getFloatTy(F->getContext()), 4))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Yes, it's old, replace it with new version.
|
2016-10-03 17:51:42 +02:00
|
|
|
rename(F);
|
2012-06-10 20:42:51 +02:00
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
|
|
|
|
return true;
|
|
|
|
}
|
2007-08-04 03:51:18 +02:00
|
|
|
|
[x86] Fix a pretty horrible bug and inconsistency in the x86 asm
parsing (and latent bug in the instruction definitions).
This is effectively a revert of r136287 which tried to address
a specific and narrow case of immediate operands failing to be accepted
by x86 instructions with a pretty heavy hammer: it introduced a new kind
of operand that behaved differently. All of that is removed with this
commit, but the test cases are both preserved and enhanced.
The core problem that r136287 and this commit are trying to handle is
that gas accepts both of the following instructions:
insertps $192, %xmm0, %xmm1
insertps $-64, %xmm0, %xmm1
These will encode to the same byte sequence, with the immediate
occupying an 8-bit entry. The first form was fixed by r136287 but that
broke the prior handling of the second form! =[ Ironically, we would
still emit the second form in some cases and then be unable to
re-assemble the output.
The reason why the first instruction failed to be handled is because
prior to r136287 the operands ere marked 'i32i8imm' which forces them to
be sign-extenable. Clearly, that won't work for 192 in a single byte.
However, making thim zero-extended or "unsigned" doesn't really address
the core issue either because it breaks negative immediates. The correct
fix is to make these operands 'i8imm' reflecting that they can be either
signed or unsigned but must be 8-bit immediates. This patch backs out
r136287 and then changes those places as well as some others to use
'i8imm' rather than one of the extended variants.
Naturally, this broke something else. The custom DAG nodes had to be
updated to have a much more accurate type constraint of an i8 node, and
a bunch of Pat immediates needed to be specified as i8 values.
The fallout didn't end there though. We also then ceased to be able to
match the instruction-specific intrinsics to the instructions so
modified. Digging, this is because they too used i32 rather than i8 in
their signature. So I've also switched those intrinsics to i8 arguments
in line with the instructions.
In order to make the intrinsic adjustments of course, I also had to add
auto upgrading for the intrinsics.
I suspect that the intrinsic argument types may have led everything down
this rabbit hole. Pretty happy with the result.
llvm-svn: 217310
2014-09-06 12:00:01 +02:00
|
|
|
// Upgrade the declarations of intrinsic functions whose 8-bit immediate mask
|
|
|
|
// arguments have changed their type from i32 to i8.
|
|
|
|
static bool UpgradeX86IntrinsicsWith8BitMask(Function *F, Intrinsic::ID IID,
|
|
|
|
Function *&NewFn) {
|
|
|
|
// Check that the last argument is an i32.
|
|
|
|
Type *LastArgType = F->getFunctionType()->getParamType(
|
|
|
|
F->getFunctionType()->getNumParams() - 1);
|
|
|
|
if (!LastArgType->isIntegerTy(32))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Move this function aside and map down.
|
2016-10-03 17:51:42 +02:00
|
|
|
rename(F);
|
[x86] Fix a pretty horrible bug and inconsistency in the x86 asm
parsing (and latent bug in the instruction definitions).
This is effectively a revert of r136287 which tried to address
a specific and narrow case of immediate operands failing to be accepted
by x86 instructions with a pretty heavy hammer: it introduced a new kind
of operand that behaved differently. All of that is removed with this
commit, but the test cases are both preserved and enhanced.
The core problem that r136287 and this commit are trying to handle is
that gas accepts both of the following instructions:
insertps $192, %xmm0, %xmm1
insertps $-64, %xmm0, %xmm1
These will encode to the same byte sequence, with the immediate
occupying an 8-bit entry. The first form was fixed by r136287 but that
broke the prior handling of the second form! =[ Ironically, we would
still emit the second form in some cases and then be unable to
re-assemble the output.
The reason why the first instruction failed to be handled is because
prior to r136287 the operands ere marked 'i32i8imm' which forces them to
be sign-extenable. Clearly, that won't work for 192 in a single byte.
However, making thim zero-extended or "unsigned" doesn't really address
the core issue either because it breaks negative immediates. The correct
fix is to make these operands 'i8imm' reflecting that they can be either
signed or unsigned but must be 8-bit immediates. This patch backs out
r136287 and then changes those places as well as some others to use
'i8imm' rather than one of the extended variants.
Naturally, this broke something else. The custom DAG nodes had to be
updated to have a much more accurate type constraint of an i8 node, and
a bunch of Pat immediates needed to be specified as i8 values.
The fallout didn't end there though. We also then ceased to be able to
match the instruction-specific intrinsics to the instructions so
modified. Digging, this is because they too used i32 rather than i8 in
their signature. So I've also switched those intrinsics to i8 arguments
in line with the instructions.
In order to make the intrinsic adjustments of course, I also had to add
auto upgrading for the intrinsics.
I suspect that the intrinsic argument types may have led everything down
this rabbit hole. Pretty happy with the result.
llvm-svn: 217310
2014-09-06 12:00:01 +02:00
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-02-17 08:07:19 +01:00
|
|
|
static bool ShouldUpgradeX86Intrinsic(Function *F, StringRef Name) {
|
|
|
|
// All of the intrinsics matches below should be marked with which llvm
|
|
|
|
// version started autoupgrading them. At some point in the future we would
|
|
|
|
// like to use this information to remove upgrade code for some older
|
|
|
|
// intrinsics. It is currently undecided how we will determine that future
|
|
|
|
// point.
|
2018-12-10 07:07:50 +01:00
|
|
|
if (Name == "addcarryx.u32" || // Added in 8.0
|
|
|
|
Name == "addcarryx.u64" || // Added in 8.0
|
|
|
|
Name == "addcarry.u32" || // Added in 8.0
|
|
|
|
Name == "addcarry.u64" || // Added in 8.0
|
|
|
|
Name == "subborrow.u32" || // Added in 8.0
|
|
|
|
Name == "subborrow.u64" || // Added in 8.0
|
2018-12-21 10:04:14 +01:00
|
|
|
Name.startswith("sse2.padds.") || // Added in 8.0
|
|
|
|
Name.startswith("sse2.psubs.") || // Added in 8.0
|
2018-12-10 07:07:50 +01:00
|
|
|
Name.startswith("sse2.paddus.") || // Added in 8.0
|
2018-08-16 08:20:22 +02:00
|
|
|
Name.startswith("sse2.psubus.") || // Added in 8.0
|
2018-12-21 10:04:14 +01:00
|
|
|
Name.startswith("avx2.padds.") || // Added in 8.0
|
|
|
|
Name.startswith("avx2.psubs.") || // Added in 8.0
|
2018-08-16 08:20:22 +02:00
|
|
|
Name.startswith("avx2.paddus.") || // Added in 8.0
|
|
|
|
Name.startswith("avx2.psubus.") || // Added in 8.0
|
2018-12-21 10:04:14 +01:00
|
|
|
Name.startswith("avx512.padds.") || // Added in 8.0
|
|
|
|
Name.startswith("avx512.psubs.") || // Added in 8.0
|
|
|
|
Name.startswith("avx512.mask.padds.") || // Added in 8.0
|
|
|
|
Name.startswith("avx512.mask.psubs.") || // Added in 8.0
|
2018-08-16 08:20:22 +02:00
|
|
|
Name.startswith("avx512.mask.paddus.") || // Added in 8.0
|
|
|
|
Name.startswith("avx512.mask.psubus.") || // Added in 8.0
|
2018-08-14 10:00:56 +02:00
|
|
|
Name=="ssse3.pabs.b.128" || // Added in 6.0
|
2017-09-13 11:02:36 +02:00
|
|
|
Name=="ssse3.pabs.w.128" || // Added in 6.0
|
|
|
|
Name=="ssse3.pabs.d.128" || // Added in 6.0
|
2018-07-06 09:14:41 +02:00
|
|
|
Name.startswith("fma4.vfmadd.s") || // Added in 7.0
|
2018-07-05 08:52:55 +02:00
|
|
|
Name.startswith("fma.vfmadd.") || // Added in 7.0
|
2018-05-11 23:59:34 +02:00
|
|
|
Name.startswith("fma.vfmsub.") || // Added in 7.0
|
2018-07-05 20:43:58 +02:00
|
|
|
Name.startswith("fma.vfmaddsub.") || // Added in 7.0
|
2018-05-11 23:59:34 +02:00
|
|
|
Name.startswith("fma.vfmsubadd.") || // Added in 7.0
|
|
|
|
Name.startswith("fma.vfnmadd.") || // Added in 7.0
|
|
|
|
Name.startswith("fma.vfnmsub.") || // Added in 7.0
|
2018-07-12 02:29:56 +02:00
|
|
|
Name.startswith("avx512.mask.vfmadd.") || // Added in 7.0
|
|
|
|
Name.startswith("avx512.mask.vfnmadd.") || // Added in 7.0
|
|
|
|
Name.startswith("avx512.mask.vfnmsub.") || // Added in 7.0
|
|
|
|
Name.startswith("avx512.mask3.vfmadd.") || // Added in 7.0
|
|
|
|
Name.startswith("avx512.maskz.vfmadd.") || // Added in 7.0
|
|
|
|
Name.startswith("avx512.mask3.vfmsub.") || // Added in 7.0
|
|
|
|
Name.startswith("avx512.mask3.vfnmsub.") || // Added in 7.0
|
|
|
|
Name.startswith("avx512.mask.vfmaddsub.") || // Added in 7.0
|
|
|
|
Name.startswith("avx512.maskz.vfmaddsub.") || // Added in 7.0
|
|
|
|
Name.startswith("avx512.mask3.vfmaddsub.") || // Added in 7.0
|
|
|
|
Name.startswith("avx512.mask3.vfmsubadd.") || // Added in 7.0
|
2017-11-13 10:16:39 +01:00
|
|
|
Name.startswith("avx512.mask.shuf.i") || // Added in 6.0
|
|
|
|
Name.startswith("avx512.mask.shuf.f") || // Added in 6.0
|
2018-07-30 21:41:25 +02:00
|
|
|
Name.startswith("avx512.kunpck") || //added in 6.0
|
2017-09-13 11:02:36 +02:00
|
|
|
Name.startswith("avx2.pabs.") || // Added in 6.0
|
|
|
|
Name.startswith("avx512.mask.pabs.") || // Added in 6.0
|
2017-11-06 08:09:24 +01:00
|
|
|
Name.startswith("avx512.broadcastm") || // Added in 6.0
|
2018-06-15 20:05:24 +02:00
|
|
|
Name == "sse.sqrt.ss" || // Added in 7.0
|
|
|
|
Name == "sse2.sqrt.sd" || // Added in 7.0
|
2018-06-29 07:43:26 +02:00
|
|
|
Name.startswith("avx512.mask.sqrt.p") || // Added in 7.0
|
2018-06-15 20:05:24 +02:00
|
|
|
Name.startswith("avx.sqrt.p") || // Added in 7.0
|
|
|
|
Name.startswith("sse2.sqrt.p") || // Added in 7.0
|
|
|
|
Name.startswith("sse.sqrt.p") || // Added in 7.0
|
2017-09-19 13:03:06 +02:00
|
|
|
Name.startswith("avx512.mask.pbroadcast") || // Added in 6.0
|
2017-09-13 11:02:36 +02:00
|
|
|
Name.startswith("sse2.pcmpeq.") || // Added in 3.1
|
2017-02-17 08:07:19 +01:00
|
|
|
Name.startswith("sse2.pcmpgt.") || // Added in 3.1
|
|
|
|
Name.startswith("avx2.pcmpeq.") || // Added in 3.1
|
|
|
|
Name.startswith("avx2.pcmpgt.") || // Added in 3.1
|
|
|
|
Name.startswith("avx512.mask.pcmpeq.") || // Added in 3.9
|
|
|
|
Name.startswith("avx512.mask.pcmpgt.") || // Added in 3.9
|
2017-09-16 09:36:14 +02:00
|
|
|
Name.startswith("avx.vperm2f128.") || // Added in 6.0
|
|
|
|
Name == "avx2.vperm2i128" || // Added in 6.0
|
2017-02-17 08:07:19 +01:00
|
|
|
Name == "sse.add.ss" || // Added in 4.0
|
|
|
|
Name == "sse2.add.sd" || // Added in 4.0
|
|
|
|
Name == "sse.sub.ss" || // Added in 4.0
|
|
|
|
Name == "sse2.sub.sd" || // Added in 4.0
|
|
|
|
Name == "sse.mul.ss" || // Added in 4.0
|
|
|
|
Name == "sse2.mul.sd" || // Added in 4.0
|
|
|
|
Name == "sse.div.ss" || // Added in 4.0
|
|
|
|
Name == "sse2.div.sd" || // Added in 4.0
|
|
|
|
Name == "sse41.pmaxsb" || // Added in 3.9
|
|
|
|
Name == "sse2.pmaxs.w" || // Added in 3.9
|
|
|
|
Name == "sse41.pmaxsd" || // Added in 3.9
|
|
|
|
Name == "sse2.pmaxu.b" || // Added in 3.9
|
|
|
|
Name == "sse41.pmaxuw" || // Added in 3.9
|
|
|
|
Name == "sse41.pmaxud" || // Added in 3.9
|
|
|
|
Name == "sse41.pminsb" || // Added in 3.9
|
|
|
|
Name == "sse2.pmins.w" || // Added in 3.9
|
|
|
|
Name == "sse41.pminsd" || // Added in 3.9
|
|
|
|
Name == "sse2.pminu.b" || // Added in 3.9
|
|
|
|
Name == "sse41.pminuw" || // Added in 3.9
|
|
|
|
Name == "sse41.pminud" || // Added in 3.9
|
2018-02-03 21:18:25 +01:00
|
|
|
Name == "avx512.kand.w" || // Added in 7.0
|
|
|
|
Name == "avx512.kandn.w" || // Added in 7.0
|
|
|
|
Name == "avx512.knot.w" || // Added in 7.0
|
|
|
|
Name == "avx512.kor.w" || // Added in 7.0
|
|
|
|
Name == "avx512.kxor.w" || // Added in 7.0
|
|
|
|
Name == "avx512.kxnor.w" || // Added in 7.0
|
2018-02-08 21:16:06 +01:00
|
|
|
Name == "avx512.kortestc.w" || // Added in 7.0
|
|
|
|
Name == "avx512.kortestz.w" || // Added in 7.0
|
2017-02-17 08:07:19 +01:00
|
|
|
Name.startswith("avx512.mask.pshuf.b.") || // Added in 4.0
|
|
|
|
Name.startswith("avx2.pmax") || // Added in 3.9
|
|
|
|
Name.startswith("avx2.pmin") || // Added in 3.9
|
|
|
|
Name.startswith("avx512.mask.pmax") || // Added in 4.0
|
|
|
|
Name.startswith("avx512.mask.pmin") || // Added in 4.0
|
|
|
|
Name.startswith("avx2.vbroadcast") || // Added in 3.8
|
|
|
|
Name.startswith("avx2.pbroadcast") || // Added in 3.8
|
|
|
|
Name.startswith("avx.vpermil.") || // Added in 3.1
|
|
|
|
Name.startswith("sse2.pshuf") || // Added in 3.9
|
|
|
|
Name.startswith("avx512.pbroadcast") || // Added in 3.9
|
|
|
|
Name.startswith("avx512.mask.broadcast.s") || // Added in 3.9
|
|
|
|
Name.startswith("avx512.mask.movddup") || // Added in 3.9
|
|
|
|
Name.startswith("avx512.mask.movshdup") || // Added in 3.9
|
|
|
|
Name.startswith("avx512.mask.movsldup") || // Added in 3.9
|
|
|
|
Name.startswith("avx512.mask.pshuf.d.") || // Added in 3.9
|
|
|
|
Name.startswith("avx512.mask.pshufl.w.") || // Added in 3.9
|
|
|
|
Name.startswith("avx512.mask.pshufh.w.") || // Added in 3.9
|
|
|
|
Name.startswith("avx512.mask.shuf.p") || // Added in 4.0
|
|
|
|
Name.startswith("avx512.mask.vpermil.p") || // Added in 3.9
|
|
|
|
Name.startswith("avx512.mask.perm.df.") || // Added in 3.9
|
|
|
|
Name.startswith("avx512.mask.perm.di.") || // Added in 3.9
|
|
|
|
Name.startswith("avx512.mask.punpckl") || // Added in 3.9
|
|
|
|
Name.startswith("avx512.mask.punpckh") || // Added in 3.9
|
|
|
|
Name.startswith("avx512.mask.unpckl.") || // Added in 3.9
|
|
|
|
Name.startswith("avx512.mask.unpckh.") || // Added in 3.9
|
|
|
|
Name.startswith("avx512.mask.pand.") || // Added in 3.9
|
|
|
|
Name.startswith("avx512.mask.pandn.") || // Added in 3.9
|
|
|
|
Name.startswith("avx512.mask.por.") || // Added in 3.9
|
|
|
|
Name.startswith("avx512.mask.pxor.") || // Added in 3.9
|
|
|
|
Name.startswith("avx512.mask.and.") || // Added in 3.9
|
|
|
|
Name.startswith("avx512.mask.andn.") || // Added in 3.9
|
|
|
|
Name.startswith("avx512.mask.or.") || // Added in 3.9
|
|
|
|
Name.startswith("avx512.mask.xor.") || // Added in 3.9
|
|
|
|
Name.startswith("avx512.mask.padd.") || // Added in 4.0
|
|
|
|
Name.startswith("avx512.mask.psub.") || // Added in 4.0
|
|
|
|
Name.startswith("avx512.mask.pmull.") || // Added in 4.0
|
|
|
|
Name.startswith("avx512.mask.cvtdq2pd.") || // Added in 4.0
|
|
|
|
Name.startswith("avx512.mask.cvtudq2pd.") || // Added in 4.0
|
2019-01-26 03:41:54 +01:00
|
|
|
Name.startswith("avx512.mask.cvtudq2ps.") || // Added in 7.0 updated 9.0
|
|
|
|
Name.startswith("avx512.mask.cvtqq2pd.") || // Added in 7.0 updated 9.0
|
|
|
|
Name.startswith("avx512.mask.cvtuqq2pd.") || // Added in 7.0 updated 9.0
|
|
|
|
Name.startswith("avx512.mask.cvtdq2ps.") || // Added in 7.0 updated 9.0
|
|
|
|
Name == "avx512.mask.cvtqq2ps.256" || // Added in 9.0
|
|
|
|
Name == "avx512.mask.cvtqq2ps.512" || // Added in 9.0
|
|
|
|
Name == "avx512.mask.cvtuqq2ps.256" || // Added in 9.0
|
|
|
|
Name == "avx512.mask.cvtuqq2ps.512" || // Added in 9.0
|
2018-05-12 04:34:28 +02:00
|
|
|
Name == "avx512.mask.cvtpd2dq.256" || // Added in 7.0
|
|
|
|
Name == "avx512.mask.cvtpd2ps.256" || // Added in 7.0
|
|
|
|
Name == "avx512.mask.cvttpd2dq.256" || // Added in 7.0
|
|
|
|
Name == "avx512.mask.cvttps2dq.128" || // Added in 7.0
|
|
|
|
Name == "avx512.mask.cvttps2dq.256" || // Added in 7.0
|
|
|
|
Name == "avx512.mask.cvtps2pd.128" || // Added in 7.0
|
|
|
|
Name == "avx512.mask.cvtps2pd.256" || // Added in 7.0
|
2018-05-14 02:06:49 +02:00
|
|
|
Name == "avx512.cvtusi2sd" || // Added in 7.0
|
2018-05-21 01:34:04 +02:00
|
|
|
Name.startswith("avx512.mask.permvar.") || // Added in 7.0
|
2018-04-13 08:07:18 +02:00
|
|
|
Name == "sse2.pmulu.dq" || // Added in 7.0
|
|
|
|
Name == "sse41.pmuldq" || // Added in 7.0
|
|
|
|
Name == "avx2.pmulu.dq" || // Added in 7.0
|
|
|
|
Name == "avx2.pmul.dq" || // Added in 7.0
|
|
|
|
Name == "avx512.pmulu.dq.512" || // Added in 7.0
|
|
|
|
Name == "avx512.pmul.dq.512" || // Added in 7.0
|
2017-02-17 08:07:19 +01:00
|
|
|
Name.startswith("avx512.mask.pmul.dq.") || // Added in 4.0
|
|
|
|
Name.startswith("avx512.mask.pmulu.dq.") || // Added in 4.0
|
2018-02-20 08:28:14 +01:00
|
|
|
Name.startswith("avx512.mask.pmul.hr.sw.") || // Added in 7.0
|
|
|
|
Name.startswith("avx512.mask.pmulh.w.") || // Added in 7.0
|
|
|
|
Name.startswith("avx512.mask.pmulhu.w.") || // Added in 7.0
|
2018-04-11 06:55:04 +02:00
|
|
|
Name.startswith("avx512.mask.pmaddw.d.") || // Added in 7.0
|
|
|
|
Name.startswith("avx512.mask.pmaddubs.w.") || // Added in 7.0
|
2017-02-24 06:35:07 +01:00
|
|
|
Name.startswith("avx512.mask.packsswb.") || // Added in 5.0
|
|
|
|
Name.startswith("avx512.mask.packssdw.") || // Added in 5.0
|
|
|
|
Name.startswith("avx512.mask.packuswb.") || // Added in 5.0
|
|
|
|
Name.startswith("avx512.mask.packusdw.") || // Added in 5.0
|
2017-06-22 22:11:01 +02:00
|
|
|
Name.startswith("avx512.mask.cmp.b") || // Added in 5.0
|
|
|
|
Name.startswith("avx512.mask.cmp.d") || // Added in 5.0
|
|
|
|
Name.startswith("avx512.mask.cmp.q") || // Added in 5.0
|
|
|
|
Name.startswith("avx512.mask.cmp.w") || // Added in 5.0
|
2018-06-27 17:57:53 +02:00
|
|
|
Name.startswith("avx512.mask.cmp.p") || // Added in 7.0
|
2017-06-22 22:11:01 +02:00
|
|
|
Name.startswith("avx512.mask.ucmp.") || // Added in 5.0
|
2018-01-09 01:50:47 +01:00
|
|
|
Name.startswith("avx512.cvtb2mask.") || // Added in 7.0
|
|
|
|
Name.startswith("avx512.cvtw2mask.") || // Added in 7.0
|
|
|
|
Name.startswith("avx512.cvtd2mask.") || // Added in 7.0
|
|
|
|
Name.startswith("avx512.cvtq2mask.") || // Added in 7.0
|
2017-02-17 08:07:19 +01:00
|
|
|
Name.startswith("avx512.mask.vpermilvar.") || // Added in 4.0
|
|
|
|
Name.startswith("avx512.mask.psll.d") || // Added in 4.0
|
|
|
|
Name.startswith("avx512.mask.psll.q") || // Added in 4.0
|
|
|
|
Name.startswith("avx512.mask.psll.w") || // Added in 4.0
|
|
|
|
Name.startswith("avx512.mask.psra.d") || // Added in 4.0
|
|
|
|
Name.startswith("avx512.mask.psra.q") || // Added in 4.0
|
|
|
|
Name.startswith("avx512.mask.psra.w") || // Added in 4.0
|
|
|
|
Name.startswith("avx512.mask.psrl.d") || // Added in 4.0
|
|
|
|
Name.startswith("avx512.mask.psrl.q") || // Added in 4.0
|
|
|
|
Name.startswith("avx512.mask.psrl.w") || // Added in 4.0
|
|
|
|
Name.startswith("avx512.mask.pslli") || // Added in 4.0
|
|
|
|
Name.startswith("avx512.mask.psrai") || // Added in 4.0
|
|
|
|
Name.startswith("avx512.mask.psrli") || // Added in 4.0
|
|
|
|
Name.startswith("avx512.mask.psllv") || // Added in 4.0
|
|
|
|
Name.startswith("avx512.mask.psrav") || // Added in 4.0
|
|
|
|
Name.startswith("avx512.mask.psrlv") || // Added in 4.0
|
|
|
|
Name.startswith("sse41.pmovsx") || // Added in 3.8
|
|
|
|
Name.startswith("sse41.pmovzx") || // Added in 3.9
|
|
|
|
Name.startswith("avx2.pmovsx") || // Added in 3.9
|
|
|
|
Name.startswith("avx2.pmovzx") || // Added in 3.9
|
|
|
|
Name.startswith("avx512.mask.pmovsx") || // Added in 4.0
|
|
|
|
Name.startswith("avx512.mask.pmovzx") || // Added in 4.0
|
2017-02-24 06:35:04 +01:00
|
|
|
Name.startswith("avx512.mask.lzcnt.") || // Added in 5.0
|
2018-05-21 22:58:09 +02:00
|
|
|
Name.startswith("avx512.mask.pternlog.") || // Added in 7.0
|
|
|
|
Name.startswith("avx512.maskz.pternlog.") || // Added in 7.0
|
2018-05-26 20:55:19 +02:00
|
|
|
Name.startswith("avx512.mask.vpmadd52") || // Added in 7.0
|
|
|
|
Name.startswith("avx512.maskz.vpmadd52") || // Added in 7.0
|
2018-05-29 07:22:05 +02:00
|
|
|
Name.startswith("avx512.mask.vpermi2var.") || // Added in 7.0
|
|
|
|
Name.startswith("avx512.mask.vpermt2var.") || // Added in 7.0
|
|
|
|
Name.startswith("avx512.maskz.vpermt2var.") || // Added in 7.0
|
2018-06-04 01:24:17 +02:00
|
|
|
Name.startswith("avx512.mask.vpdpbusd.") || // Added in 7.0
|
|
|
|
Name.startswith("avx512.maskz.vpdpbusd.") || // Added in 7.0
|
|
|
|
Name.startswith("avx512.mask.vpdpbusds.") || // Added in 7.0
|
|
|
|
Name.startswith("avx512.maskz.vpdpbusds.") || // Added in 7.0
|
|
|
|
Name.startswith("avx512.mask.vpdpwssd.") || // Added in 7.0
|
|
|
|
Name.startswith("avx512.maskz.vpdpwssd.") || // Added in 7.0
|
|
|
|
Name.startswith("avx512.mask.vpdpwssds.") || // Added in 7.0
|
|
|
|
Name.startswith("avx512.maskz.vpdpwssds.") || // Added in 7.0
|
2018-06-11 08:18:22 +02:00
|
|
|
Name.startswith("avx512.mask.dbpsadbw.") || // Added in 7.0
|
2018-06-13 09:19:21 +02:00
|
|
|
Name.startswith("avx512.mask.vpshld.") || // Added in 7.0
|
|
|
|
Name.startswith("avx512.mask.vpshrd.") || // Added in 7.0
|
2019-01-07 22:00:32 +01:00
|
|
|
Name.startswith("avx512.mask.vpshldv.") || // Added in 8.0
|
|
|
|
Name.startswith("avx512.mask.vpshrdv.") || // Added in 8.0
|
|
|
|
Name.startswith("avx512.maskz.vpshldv.") || // Added in 8.0
|
|
|
|
Name.startswith("avx512.maskz.vpshrdv.") || // Added in 8.0
|
|
|
|
Name.startswith("avx512.vpshld.") || // Added in 8.0
|
|
|
|
Name.startswith("avx512.vpshrd.") || // Added in 8.0
|
2018-06-21 07:00:56 +02:00
|
|
|
Name.startswith("avx512.mask.add.p") || // Added in 7.0. 128/256 in 4.0
|
|
|
|
Name.startswith("avx512.mask.sub.p") || // Added in 7.0. 128/256 in 4.0
|
|
|
|
Name.startswith("avx512.mask.mul.p") || // Added in 7.0. 128/256 in 4.0
|
|
|
|
Name.startswith("avx512.mask.div.p") || // Added in 7.0. 128/256 in 4.0
|
|
|
|
Name.startswith("avx512.mask.max.p") || // Added in 7.0. 128/256 in 5.0
|
|
|
|
Name.startswith("avx512.mask.min.p") || // Added in 7.0. 128/256 in 5.0
|
2018-06-27 17:57:53 +02:00
|
|
|
Name.startswith("avx512.mask.fpclass.p") || // Added in 7.0
|
2019-01-14 01:03:50 +01:00
|
|
|
Name.startswith("avx512.mask.vpshufbitqmb.") || // Added in 8.0
|
2019-01-14 09:46:45 +01:00
|
|
|
Name.startswith("avx512.mask.pmultishift.qb.") || // Added in 8.0
|
2019-01-26 07:27:01 +01:00
|
|
|
Name.startswith("avx512.mask.conflict.") || // Added in 9.0
|
2019-01-21 09:16:59 +01:00
|
|
|
Name == "avx512.mask.pmov.qd.256" || // Added in 9.0
|
|
|
|
Name == "avx512.mask.pmov.qd.512" || // Added in 9.0
|
|
|
|
Name == "avx512.mask.pmov.wb.256" || // Added in 9.0
|
|
|
|
Name == "avx512.mask.pmov.wb.512" || // Added in 9.0
|
2018-05-13 01:14:39 +02:00
|
|
|
Name == "sse.cvtsi2ss" || // Added in 7.0
|
|
|
|
Name == "sse.cvtsi642ss" || // Added in 7.0
|
|
|
|
Name == "sse2.cvtsi2sd" || // Added in 7.0
|
|
|
|
Name == "sse2.cvtsi642sd" || // Added in 7.0
|
2018-05-13 02:29:40 +02:00
|
|
|
Name == "sse2.cvtss2sd" || // Added in 7.0
|
2017-02-17 08:07:19 +01:00
|
|
|
Name == "sse2.cvtdq2pd" || // Added in 3.9
|
2018-05-22 01:15:00 +02:00
|
|
|
Name == "sse2.cvtdq2ps" || // Added in 7.0
|
2017-02-17 08:07:19 +01:00
|
|
|
Name == "sse2.cvtps2pd" || // Added in 3.9
|
|
|
|
Name == "avx.cvtdq2.pd.256" || // Added in 3.9
|
2018-05-22 01:15:00 +02:00
|
|
|
Name == "avx.cvtdq2.ps.256" || // Added in 7.0
|
2017-02-17 08:07:19 +01:00
|
|
|
Name == "avx.cvt.ps2.pd.256" || // Added in 3.9
|
|
|
|
Name.startswith("avx.vinsertf128.") || // Added in 3.7
|
|
|
|
Name == "avx2.vinserti128" || // Added in 3.7
|
|
|
|
Name.startswith("avx512.mask.insert") || // Added in 4.0
|
|
|
|
Name.startswith("avx.vextractf128.") || // Added in 3.7
|
|
|
|
Name == "avx2.vextracti128" || // Added in 3.7
|
|
|
|
Name.startswith("avx512.mask.vextract") || // Added in 4.0
|
|
|
|
Name.startswith("sse4a.movnt.") || // Added in 3.9
|
|
|
|
Name.startswith("avx.movnt.") || // Added in 3.2
|
|
|
|
Name.startswith("avx512.storent.") || // Added in 3.9
|
2017-04-14 17:05:35 +02:00
|
|
|
Name == "sse41.movntdqa" || // Added in 5.0
|
|
|
|
Name == "avx2.movntdqa" || // Added in 5.0
|
|
|
|
Name == "avx512.movntdqa" || // Added in 5.0
|
2017-02-17 08:07:19 +01:00
|
|
|
Name == "sse2.storel.dq" || // Added in 3.9
|
|
|
|
Name.startswith("sse.storeu.") || // Added in 3.9
|
|
|
|
Name.startswith("sse2.storeu.") || // Added in 3.9
|
|
|
|
Name.startswith("avx.storeu.") || // Added in 3.9
|
|
|
|
Name.startswith("avx512.mask.storeu.") || // Added in 3.9
|
|
|
|
Name.startswith("avx512.mask.store.p") || // Added in 3.9
|
|
|
|
Name.startswith("avx512.mask.store.b.") || // Added in 3.9
|
|
|
|
Name.startswith("avx512.mask.store.w.") || // Added in 3.9
|
|
|
|
Name.startswith("avx512.mask.store.d.") || // Added in 3.9
|
|
|
|
Name.startswith("avx512.mask.store.q.") || // Added in 3.9
|
2018-05-11 06:33:18 +02:00
|
|
|
Name == "avx512.mask.store.ss" || // Added in 7.0
|
2017-02-17 08:07:19 +01:00
|
|
|
Name.startswith("avx512.mask.loadu.") || // Added in 3.9
|
|
|
|
Name.startswith("avx512.mask.load.") || // Added in 3.9
|
2018-06-11 03:25:22 +02:00
|
|
|
Name.startswith("avx512.mask.expand.load.") || // Added in 7.0
|
|
|
|
Name.startswith("avx512.mask.compress.store.") || // Added in 7.0
|
2019-01-28 08:03:03 +01:00
|
|
|
Name.startswith("avx512.mask.expand.b") || // Added in 9.0
|
|
|
|
Name.startswith("avx512.mask.expand.w") || // Added in 9.0
|
|
|
|
Name.startswith("avx512.mask.expand.d") || // Added in 9.0
|
|
|
|
Name.startswith("avx512.mask.expand.q") || // Added in 9.0
|
|
|
|
Name.startswith("avx512.mask.expand.p") || // Added in 9.0
|
|
|
|
Name.startswith("avx512.mask.compress.b") || // Added in 9.0
|
|
|
|
Name.startswith("avx512.mask.compress.w") || // Added in 9.0
|
|
|
|
Name.startswith("avx512.mask.compress.d") || // Added in 9.0
|
|
|
|
Name.startswith("avx512.mask.compress.q") || // Added in 9.0
|
|
|
|
Name.startswith("avx512.mask.compress.p") || // Added in 9.0
|
2017-02-17 08:07:19 +01:00
|
|
|
Name == "sse42.crc32.64.8" || // Added in 3.4
|
|
|
|
Name.startswith("avx.vbroadcast.s") || // Added in 3.5
|
2018-05-14 20:21:22 +02:00
|
|
|
Name.startswith("avx512.vbroadcast.s") || // Added in 7.0
|
2017-02-17 08:07:19 +01:00
|
|
|
Name.startswith("avx512.mask.palignr.") || // Added in 3.9
|
|
|
|
Name.startswith("avx512.mask.valign.") || // Added in 4.0
|
|
|
|
Name.startswith("sse2.psll.dq") || // Added in 3.7
|
|
|
|
Name.startswith("sse2.psrl.dq") || // Added in 3.7
|
|
|
|
Name.startswith("avx2.psll.dq") || // Added in 3.7
|
|
|
|
Name.startswith("avx2.psrl.dq") || // Added in 3.7
|
|
|
|
Name.startswith("avx512.psll.dq") || // Added in 3.9
|
|
|
|
Name.startswith("avx512.psrl.dq") || // Added in 3.9
|
|
|
|
Name == "sse41.pblendw" || // Added in 3.7
|
|
|
|
Name.startswith("sse41.blendp") || // Added in 3.7
|
|
|
|
Name.startswith("avx.blend.p") || // Added in 3.7
|
|
|
|
Name == "avx2.pblendw" || // Added in 3.7
|
|
|
|
Name.startswith("avx2.pblendd.") || // Added in 3.7
|
|
|
|
Name.startswith("avx.vbroadcastf128") || // Added in 4.0
|
|
|
|
Name == "avx2.vbroadcasti128" || // Added in 3.7
|
2017-09-26 09:39:39 +02:00
|
|
|
Name.startswith("avx512.mask.broadcastf") || // Added in 6.0
|
|
|
|
Name.startswith("avx512.mask.broadcasti") || // Added in 6.0
|
2017-02-23 04:22:14 +01:00
|
|
|
Name == "xop.vpcmov" || // Added in 3.8
|
2017-02-24 06:35:07 +01:00
|
|
|
Name == "xop.vpcmov.256" || // Added in 5.0
|
2017-02-17 08:07:19 +01:00
|
|
|
Name.startswith("avx512.mask.move.s") || // Added in 4.0
|
2017-04-04 15:32:14 +02:00
|
|
|
Name.startswith("avx512.cvtmask2") || // Added in 5.0
|
2019-01-20 20:27:40 +01:00
|
|
|
Name.startswith("xop.vpcom") || // Added in 3.2, Updated in 9.0
|
2018-12-20 20:01:07 +01:00
|
|
|
Name.startswith("xop.vprot") || // Added in 8.0
|
|
|
|
Name.startswith("avx512.prol") || // Added in 8.0
|
|
|
|
Name.startswith("avx512.pror") || // Added in 8.0
|
|
|
|
Name.startswith("avx512.mask.prorv.") || // Added in 8.0
|
|
|
|
Name.startswith("avx512.mask.pror.") || // Added in 8.0
|
|
|
|
Name.startswith("avx512.mask.prolv.") || // Added in 8.0
|
|
|
|
Name.startswith("avx512.mask.prol.") || // Added in 8.0
|
2017-11-13 13:51:18 +01:00
|
|
|
Name.startswith("avx512.ptestm") || //Added in 6.0
|
|
|
|
Name.startswith("avx512.ptestnm") || //Added in 6.0
|
2017-09-12 09:50:35 +02:00
|
|
|
Name.startswith("avx512.mask.pavg")) // Added in 6.0
|
2017-02-17 08:07:19 +01:00
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool UpgradeX86IntrinsicFunction(Function *F, StringRef Name,
|
|
|
|
Function *&NewFn) {
|
|
|
|
// Only handle intrinsics that start with "x86.".
|
|
|
|
if (!Name.startswith("x86."))
|
|
|
|
return false;
|
|
|
|
// Remove "x86." prefix.
|
|
|
|
Name = Name.substr(4);
|
|
|
|
|
|
|
|
if (ShouldUpgradeX86Intrinsic(F, Name)) {
|
|
|
|
NewFn = nullptr;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-12-09 19:02:40 +01:00
|
|
|
if (Name == "rdtscp") { // Added in 8.0
|
2018-09-07 21:14:15 +02:00
|
|
|
// If this intrinsic has 0 operands, it's the new version.
|
|
|
|
if (F->getFunctionType()->getNumParams() == 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
rename(F);
|
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(),
|
|
|
|
Intrinsic::x86_rdtscp);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-02-17 08:07:19 +01:00
|
|
|
// SSE4.1 ptest functions may have an old signature.
|
|
|
|
if (Name.startswith("sse41.ptest")) { // Added in 3.2
|
|
|
|
if (Name.substr(11) == "c")
|
2017-02-17 08:07:21 +01:00
|
|
|
return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestc, NewFn);
|
2017-02-17 08:07:19 +01:00
|
|
|
if (Name.substr(11) == "z")
|
2017-02-17 08:07:21 +01:00
|
|
|
return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestz, NewFn);
|
2017-02-17 08:07:19 +01:00
|
|
|
if (Name.substr(11) == "nzc")
|
2017-02-17 08:07:21 +01:00
|
|
|
return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestnzc, NewFn);
|
2017-02-17 08:07:19 +01:00
|
|
|
}
|
|
|
|
// Several blend and other instructions with masks used the wrong number of
|
|
|
|
// bits.
|
|
|
|
if (Name == "sse41.insertps") // Added in 3.6
|
|
|
|
return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_insertps,
|
|
|
|
NewFn);
|
|
|
|
if (Name == "sse41.dppd") // Added in 3.6
|
|
|
|
return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dppd,
|
|
|
|
NewFn);
|
|
|
|
if (Name == "sse41.dpps") // Added in 3.6
|
|
|
|
return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dpps,
|
|
|
|
NewFn);
|
|
|
|
if (Name == "sse41.mpsadbw") // Added in 3.6
|
|
|
|
return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_mpsadbw,
|
|
|
|
NewFn);
|
|
|
|
if (Name == "avx.dp.ps.256") // Added in 3.6
|
|
|
|
return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx_dp_ps_256,
|
|
|
|
NewFn);
|
|
|
|
if (Name == "avx2.mpsadbw") // Added in 3.6
|
|
|
|
return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx2_mpsadbw,
|
|
|
|
NewFn);
|
|
|
|
|
|
|
|
// frcz.ss/sd may need to have an argument dropped. Added in 3.2
|
|
|
|
if (Name.startswith("xop.vfrcz.ss") && F->arg_size() == 2) {
|
|
|
|
rename(F);
|
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(),
|
|
|
|
Intrinsic::x86_xop_vfrcz_ss);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (Name.startswith("xop.vfrcz.sd") && F->arg_size() == 2) {
|
|
|
|
rename(F);
|
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(),
|
|
|
|
Intrinsic::x86_xop_vfrcz_sd);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
// Upgrade any XOP PERMIL2 index operand still using a float/double vector.
|
|
|
|
if (Name.startswith("xop.vpermil2")) { // Added in 3.9
|
2017-02-17 08:07:24 +01:00
|
|
|
auto Idx = F->getFunctionType()->getParamType(2);
|
|
|
|
if (Idx->isFPOrFPVectorTy()) {
|
2017-02-17 08:07:19 +01:00
|
|
|
rename(F);
|
|
|
|
unsigned IdxSize = Idx->getPrimitiveSizeInBits();
|
|
|
|
unsigned EltSize = Idx->getScalarSizeInBits();
|
|
|
|
Intrinsic::ID Permil2ID;
|
|
|
|
if (EltSize == 64 && IdxSize == 128)
|
|
|
|
Permil2ID = Intrinsic::x86_xop_vpermil2pd;
|
|
|
|
else if (EltSize == 32 && IdxSize == 128)
|
|
|
|
Permil2ID = Intrinsic::x86_xop_vpermil2ps;
|
|
|
|
else if (EltSize == 64 && IdxSize == 256)
|
|
|
|
Permil2ID = Intrinsic::x86_xop_vpermil2pd_256;
|
|
|
|
else
|
|
|
|
Permil2ID = Intrinsic::x86_xop_vpermil2ps_256;
|
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(), Permil2ID);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-02 02:32:48 +01:00
|
|
|
if (Name == "seh.recoverfp") {
|
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::eh_recoverfp);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-02-17 08:07:19 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2007-12-17 23:33:23 +01:00
|
|
|
static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
|
2007-08-04 03:51:18 +02:00
|
|
|
assert(F && "Illegal to upgrade a non-existent Function.");
|
|
|
|
|
|
|
|
// Quickly eliminate it, if it's not a candidate.
|
2016-07-08 18:09:51 +02:00
|
|
|
StringRef Name = F->getName();
|
2011-06-18 20:56:39 +02:00
|
|
|
if (Name.size() <= 8 || !Name.startswith("llvm."))
|
2007-12-17 23:33:23 +01:00
|
|
|
return false;
|
2011-06-18 20:56:39 +02:00
|
|
|
Name = Name.substr(5); // Strip off "llvm."
|
2011-11-27 09:42:07 +01:00
|
|
|
|
2011-06-18 20:56:39 +02:00
|
|
|
switch (Name[0]) {
|
2007-08-04 03:51:18 +02:00
|
|
|
default: break;
|
2012-07-14 01:25:25 +02:00
|
|
|
case 'a': {
|
2017-01-10 20:23:51 +01:00
|
|
|
if (Name.startswith("arm.rbit") || Name.startswith("aarch64.rbit")) {
|
2017-01-10 18:20:33 +01:00
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::bitreverse,
|
|
|
|
F->arg_begin()->getType());
|
|
|
|
return true;
|
|
|
|
}
|
2012-07-14 01:25:25 +02:00
|
|
|
if (Name.startswith("arm.neon.vclz")) {
|
|
|
|
Type* args[2] = {
|
2013-07-20 19:46:00 +02:00
|
|
|
F->arg_begin()->getType(),
|
2012-07-14 01:25:25 +02:00
|
|
|
Type::getInt1Ty(F->getContext())
|
|
|
|
};
|
|
|
|
// Can't use Intrinsic::getDeclaration here as it adds a ".i1" to
|
|
|
|
// the end of the name. Change name from llvm.arm.neon.vclz.* to
|
|
|
|
// llvm.ctlz.*
|
|
|
|
FunctionType* fType = FunctionType::get(F->getReturnType(), args, false);
|
2018-08-23 11:25:17 +02:00
|
|
|
NewFn = Function::Create(fType, F->getLinkage(), F->getAddressSpace(),
|
2012-07-14 01:25:25 +02:00
|
|
|
"llvm.ctlz." + Name.substr(14), F->getParent());
|
|
|
|
return true;
|
|
|
|
}
|
2012-07-18 02:02:16 +02:00
|
|
|
if (Name.startswith("arm.neon.vcnt")) {
|
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop,
|
|
|
|
F->arg_begin()->getType());
|
|
|
|
return true;
|
|
|
|
}
|
2019-09-24 16:42:36 +02:00
|
|
|
static const Regex vldRegex("^arm\\.neon\\.vld([1234]|[234]lane)\\.v[a-z0-9]*$");
|
2015-09-30 12:56:37 +02:00
|
|
|
if (vldRegex.match(Name)) {
|
|
|
|
auto fArgs = F->getFunctionType()->params();
|
|
|
|
SmallVector<Type *, 4> Tys(fArgs.begin(), fArgs.end());
|
|
|
|
// Can't use Intrinsic::getDeclaration here as the return types might
|
|
|
|
// then only be structurally equal.
|
|
|
|
FunctionType* fType = FunctionType::get(F->getReturnType(), Tys, false);
|
2018-08-23 11:25:17 +02:00
|
|
|
NewFn = Function::Create(fType, F->getLinkage(), F->getAddressSpace(),
|
2015-09-30 12:56:37 +02:00
|
|
|
"llvm." + Name + ".p0i8", F->getParent());
|
|
|
|
return true;
|
|
|
|
}
|
2019-09-24 16:42:36 +02:00
|
|
|
static const Regex vstRegex("^arm\\.neon\\.vst([1234]|[234]lane)\\.v[a-z0-9]*$");
|
2015-09-30 12:56:37 +02:00
|
|
|
if (vstRegex.match(Name)) {
|
2015-10-18 07:15:34 +02:00
|
|
|
static const Intrinsic::ID StoreInts[] = {Intrinsic::arm_neon_vst1,
|
|
|
|
Intrinsic::arm_neon_vst2,
|
|
|
|
Intrinsic::arm_neon_vst3,
|
|
|
|
Intrinsic::arm_neon_vst4};
|
2015-09-30 12:56:37 +02:00
|
|
|
|
2015-10-18 07:15:34 +02:00
|
|
|
static const Intrinsic::ID StoreLaneInts[] = {
|
|
|
|
Intrinsic::arm_neon_vst2lane, Intrinsic::arm_neon_vst3lane,
|
|
|
|
Intrinsic::arm_neon_vst4lane
|
|
|
|
};
|
2015-09-30 12:56:37 +02:00
|
|
|
|
|
|
|
auto fArgs = F->getFunctionType()->params();
|
|
|
|
Type *Tys[] = {fArgs[0], fArgs[1]};
|
|
|
|
if (Name.find("lane") == StringRef::npos)
|
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(),
|
|
|
|
StoreInts[fArgs.size() - 3], Tys);
|
|
|
|
else
|
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(),
|
|
|
|
StoreLaneInts[fArgs.size() - 5], Tys);
|
|
|
|
return true;
|
|
|
|
}
|
2016-04-19 22:51:05 +02:00
|
|
|
if (Name == "aarch64.thread.pointer" || Name == "arm.thread.pointer") {
|
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::thread_pointer);
|
|
|
|
return true;
|
|
|
|
}
|
2019-11-27 12:01:27 +01:00
|
|
|
if (Name.startswith("arm.neon.vqadds.")) {
|
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::sadd_sat,
|
|
|
|
F->arg_begin()->getType());
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (Name.startswith("arm.neon.vqaddu.")) {
|
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::uadd_sat,
|
|
|
|
F->arg_begin()->getType());
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (Name.startswith("arm.neon.vqsubs.")) {
|
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ssub_sat,
|
|
|
|
F->arg_begin()->getType());
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (Name.startswith("arm.neon.vqsubu.")) {
|
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::usub_sat,
|
|
|
|
F->arg_begin()->getType());
|
|
|
|
return true;
|
|
|
|
}
|
2019-03-21 23:31:37 +01:00
|
|
|
if (Name.startswith("aarch64.neon.addp")) {
|
|
|
|
if (F->arg_size() != 2)
|
|
|
|
break; // Invalid IR.
|
2020-01-09 15:28:48 +01:00
|
|
|
VectorType *Ty = dyn_cast<VectorType>(F->getReturnType());
|
|
|
|
if (Ty && Ty->getElementType()->isFloatingPointTy()) {
|
2019-03-21 23:31:37 +01:00
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(),
|
2020-01-09 15:28:48 +01:00
|
|
|
Intrinsic::aarch64_neon_faddp, Ty);
|
2019-03-21 23:31:37 +01:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
2012-07-14 01:25:25 +02:00
|
|
|
break;
|
|
|
|
}
|
2015-09-30 12:56:37 +02:00
|
|
|
|
2011-12-12 05:26:04 +01:00
|
|
|
case 'c': {
|
|
|
|
if (Name.startswith("ctlz.") && F->arg_size() == 1) {
|
2016-10-03 17:51:42 +02:00
|
|
|
rename(F);
|
2011-12-12 11:57:20 +01:00
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz,
|
|
|
|
F->arg_begin()->getType());
|
2011-12-12 05:26:04 +01:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (Name.startswith("cttz.") && F->arg_size() == 1) {
|
2016-10-03 17:51:42 +02:00
|
|
|
rename(F);
|
2011-12-12 11:57:20 +01:00
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::cttz,
|
|
|
|
F->arg_begin()->getType());
|
2011-12-12 05:26:04 +01:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2017-07-28 22:21:02 +02:00
|
|
|
case 'd': {
|
|
|
|
if (Name == "dbg.value" && F->arg_size() == 4) {
|
|
|
|
rename(F);
|
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::dbg_value);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2019-06-11 10:22:10 +02:00
|
|
|
case 'e': {
|
|
|
|
SmallVector<StringRef, 2> Groups;
|
2019-09-24 16:42:36 +02:00
|
|
|
static const Regex R("^experimental.vector.reduce.([a-z]+)\\.[fi][0-9]+");
|
2019-06-11 10:22:10 +02:00
|
|
|
if (R.match(Name, &Groups)) {
|
|
|
|
Intrinsic::ID ID = Intrinsic::not_intrinsic;
|
|
|
|
if (Groups[1] == "fadd")
|
|
|
|
ID = Intrinsic::experimental_vector_reduce_v2_fadd;
|
|
|
|
if (Groups[1] == "fmul")
|
|
|
|
ID = Intrinsic::experimental_vector_reduce_v2_fmul;
|
|
|
|
|
|
|
|
if (ID != Intrinsic::not_intrinsic) {
|
|
|
|
rename(F);
|
|
|
|
auto Args = F->getFunctionType()->params();
|
|
|
|
Type *Tys[] = {F->getFunctionType()->getReturnType(), Args[1]};
|
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(), ID, Tys);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2017-04-10 22:18:21 +02:00
|
|
|
case 'i':
|
|
|
|
case 'l': {
|
|
|
|
bool IsLifetimeStart = Name.startswith("lifetime.start");
|
|
|
|
if (IsLifetimeStart || Name.startswith("invariant.start")) {
|
|
|
|
Intrinsic::ID ID = IsLifetimeStart ?
|
|
|
|
Intrinsic::lifetime_start : Intrinsic::invariant_start;
|
2016-08-14 01:31:24 +02:00
|
|
|
auto Args = F->getFunctionType()->params();
|
|
|
|
Type* ObjectPtr[1] = {Args[1]};
|
2017-04-10 22:18:21 +02:00
|
|
|
if (F->getName() != Intrinsic::getName(ID, ObjectPtr)) {
|
2016-10-03 17:51:42 +02:00
|
|
|
rename(F);
|
2017-04-10 22:18:21 +02:00
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(), ID, ObjectPtr);
|
2016-08-14 01:31:24 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
2017-04-10 22:18:21 +02:00
|
|
|
|
|
|
|
bool IsLifetimeEnd = Name.startswith("lifetime.end");
|
|
|
|
if (IsLifetimeEnd || Name.startswith("invariant.end")) {
|
|
|
|
Intrinsic::ID ID = IsLifetimeEnd ?
|
|
|
|
Intrinsic::lifetime_end : Intrinsic::invariant_end;
|
|
|
|
|
2016-08-14 01:31:24 +02:00
|
|
|
auto Args = F->getFunctionType()->params();
|
2017-04-10 22:18:21 +02:00
|
|
|
Type* ObjectPtr[1] = {Args[IsLifetimeEnd ? 1 : 2]};
|
|
|
|
if (F->getName() != Intrinsic::getName(ID, ObjectPtr)) {
|
2016-10-03 17:51:42 +02:00
|
|
|
rename(F);
|
2017-04-10 22:18:21 +02:00
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(), ID, ObjectPtr);
|
2016-08-14 01:31:24 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
2018-05-03 13:03:01 +02:00
|
|
|
if (Name.startswith("invariant.group.barrier")) {
|
|
|
|
// Rename invariant.group.barrier to launder.invariant.group
|
|
|
|
auto Args = F->getFunctionType()->params();
|
|
|
|
Type* ObjectPtr[1] = {Args[0]};
|
|
|
|
rename(F);
|
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(),
|
|
|
|
Intrinsic::launder_invariant_group, ObjectPtr);
|
|
|
|
return true;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2016-08-14 01:31:24 +02:00
|
|
|
break;
|
|
|
|
}
|
2016-06-28 20:27:25 +02:00
|
|
|
case 'm': {
|
|
|
|
if (Name.startswith("masked.load.")) {
|
|
|
|
Type *Tys[] = { F->getReturnType(), F->arg_begin()->getType() };
|
|
|
|
if (F->getName() != Intrinsic::getName(Intrinsic::masked_load, Tys)) {
|
2016-10-03 17:51:42 +02:00
|
|
|
rename(F);
|
2016-06-28 20:27:25 +02:00
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(),
|
|
|
|
Intrinsic::masked_load,
|
|
|
|
Tys);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (Name.startswith("masked.store.")) {
|
|
|
|
auto Args = F->getFunctionType()->params();
|
|
|
|
Type *Tys[] = { Args[0], Args[1] };
|
|
|
|
if (F->getName() != Intrinsic::getName(Intrinsic::masked_store, Tys)) {
|
2016-10-03 17:51:42 +02:00
|
|
|
rename(F);
|
2016-06-28 20:27:25 +02:00
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(),
|
|
|
|
Intrinsic::masked_store,
|
|
|
|
Tys);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
2017-05-03 14:28:54 +02:00
|
|
|
// Renaming gather/scatter intrinsics with no address space overloading
|
|
|
|
// to the new overload which includes an address space
|
|
|
|
if (Name.startswith("masked.gather.")) {
|
|
|
|
Type *Tys[] = {F->getReturnType(), F->arg_begin()->getType()};
|
|
|
|
if (F->getName() != Intrinsic::getName(Intrinsic::masked_gather, Tys)) {
|
|
|
|
rename(F);
|
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(),
|
|
|
|
Intrinsic::masked_gather, Tys);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (Name.startswith("masked.scatter.")) {
|
|
|
|
auto Args = F->getFunctionType()->params();
|
|
|
|
Type *Tys[] = {Args[0], Args[1]};
|
|
|
|
if (F->getName() != Intrinsic::getName(Intrinsic::masked_scatter, Tys)) {
|
|
|
|
rename(F);
|
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(),
|
|
|
|
Intrinsic::masked_scatter, Tys);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
Remove alignment argument from memcpy/memmove/memset in favour of alignment attributes (Step 1)
Summary:
This is a resurrection of work first proposed and discussed in Aug 2015:
http://lists.llvm.org/pipermail/llvm-dev/2015-August/089384.html
and initially landed (but then backed out) in Nov 2015:
http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
The @llvm.memcpy/memmove/memset intrinsics currently have an explicit argument
which is required to be a constant integer. It represents the alignment of the
dest (and source), and so must be the minimum of the actual alignment of the
two.
This change is the first in a series that allows source and dest to each
have their own alignments by using the alignment attribute on their arguments.
In this change we:
1) Remove the alignment argument.
2) Add alignment attributes to the source & dest arguments. We, temporarily,
require that the alignments for source & dest be equal.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 100, i32 4, i1 false)
will now read
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 100, i1 false)
Downstream users may have to update their lit tests that check for
@llvm.memcpy/memmove/memset call/declaration patterns. The following extended sed script
may help with updating the majority of your tests, but it does not catch all possible
patterns so some manual checking and updating will be required.
s~declare void @llvm\.mem(set|cpy|move)\.p([^(]*)\((.*), i32, i1\)~declare void @llvm.mem\1.p\2(\3, i1)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* \3, i8 \4, i8 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* \3, i8 \4, i16 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* \3, i8 \4, i32 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* \3, i8 \4, i64 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* \3, i8 \4, i128 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* align \6 \3, i8 \4, i8 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* align \6 \3, i8 \4, i16 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* align \6 \3, i8 \4, i32 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* align \6 \3, i8 \4, i64 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* align \6 \3, i8 \4, i128 \5, i1 \7)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* \4, i8\5* \6, i8 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* \4, i8\5* \6, i16 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* \4, i8\5* \6, i32 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* \4, i8\5* \6, i64 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* \4, i8\5* \6, i128 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* align \8 \4, i8\5* align \8 \6, i8 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* align \8 \4, i8\5* align \8 \6, i16 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* align \8 \4, i8\5* align \8 \6, i32 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* align \8 \4, i8\5* align \8 \6, i64 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* align \8 \4, i8\5* align \8 \6, i128 \7, i1 \9)~g
The remaining changes in the series will:
Step 2) Expand the IRBuilder API to allow creation of memcpy/memmove with differing
source and dest alignments.
Step 3) Update Clang to use the new IRBuilder API.
Step 4) Update Polly to use the new IRBuilder API.
Step 5) Update LLVM passes that create memcpy/memmove calls to use the new IRBuilder API,
and those that use use MemIntrinsicInst::[get|set]Alignment() to use
getDestAlignment() and getSourceAlignment() instead.
Step 6) Remove the single-alignment IRBuilder API for memcpy/memmove, and the
MemIntrinsicInst::[get|set]Alignment() methods.
Reviewers: pete, hfinkel, lhames, reames, bollu
Reviewed By: reames
Subscribers: niosHD, reames, jholewinski, qcolombet, jfb, sanjoy, arsenm, dschuff, dylanmckay, mehdi_amini, sdardis, nemanjai, david2050, nhaehnle, javed.absar, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, asb, rbar, johnrusso, simoncook, jordy.potman.lists, apazos, sabuasal, llvm-commits
Differential Revision: https://reviews.llvm.org/D41675
llvm-svn: 322965
2018-01-19 18:13:12 +01:00
|
|
|
// Updating the memory intrinsics (memcpy/memmove/memset) that have an
|
|
|
|
// alignment parameter to embedding the alignment as an attribute of
|
|
|
|
// the pointer args.
|
|
|
|
if (Name.startswith("memcpy.") && F->arg_size() == 5) {
|
|
|
|
rename(F);
|
|
|
|
// Get the types of dest, src, and len
|
|
|
|
ArrayRef<Type *> ParamTypes = F->getFunctionType()->params().slice(0, 3);
|
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memcpy,
|
|
|
|
ParamTypes);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (Name.startswith("memmove.") && F->arg_size() == 5) {
|
|
|
|
rename(F);
|
|
|
|
// Get the types of dest, src, and len
|
|
|
|
ArrayRef<Type *> ParamTypes = F->getFunctionType()->params().slice(0, 3);
|
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memmove,
|
|
|
|
ParamTypes);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (Name.startswith("memset.") && F->arg_size() == 5) {
|
|
|
|
rename(F);
|
|
|
|
// Get the types of dest, and len
|
|
|
|
const auto *FT = F->getFunctionType();
|
|
|
|
Type *ParamTypes[2] = {
|
|
|
|
FT->getParamType(0), // Dest
|
|
|
|
FT->getParamType(2) // len
|
|
|
|
};
|
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memset,
|
|
|
|
ParamTypes);
|
|
|
|
return true;
|
|
|
|
}
|
2016-06-28 20:27:25 +02:00
|
|
|
break;
|
|
|
|
}
|
[NVPTX] Auto-upgrade some NVPTX intrinsics to LLVM target-generic code.
Summary:
Specifically, we upgrade llvm.nvvm.:
* brev{32,64}
* clz.{i,ll}
* popc.{i,ll}
* abs.{i,ll}
* {min,max}.{i,ll,u,ull}
* h2f
These either map directly to an existing LLVM target-generic
intrinsic or map to a simple LLVM target-generic idiom.
In all cases, we check that the code we generate is lowered to PTX as we
expect.
These builtins don't need to be backfilled in clang: They're not
accessible to user code from nvcc.
Reviewers: tra
Subscribers: majnemer, cfe-commits, llvm-commits, jholewinski
Differential Revision: https://reviews.llvm.org/D28793
llvm-svn: 292694
2017-01-21 02:00:32 +01:00
|
|
|
case 'n': {
|
|
|
|
if (Name.startswith("nvvm.")) {
|
|
|
|
Name = Name.substr(5);
|
|
|
|
|
|
|
|
// The following nvvm intrinsics correspond exactly to an LLVM intrinsic.
|
|
|
|
Intrinsic::ID IID = StringSwitch<Intrinsic::ID>(Name)
|
|
|
|
.Cases("brev32", "brev64", Intrinsic::bitreverse)
|
|
|
|
.Case("clz.i", Intrinsic::ctlz)
|
|
|
|
.Case("popc.i", Intrinsic::ctpop)
|
|
|
|
.Default(Intrinsic::not_intrinsic);
|
|
|
|
if (IID != Intrinsic::not_intrinsic && F->arg_size() == 1) {
|
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(), IID,
|
|
|
|
{F->getReturnType()});
|
|
|
|
return true;
|
|
|
|
}
|
2016-06-28 20:27:25 +02:00
|
|
|
|
[NVPTX] Auto-upgrade some NVPTX intrinsics to LLVM target-generic code.
Summary:
Specifically, we upgrade llvm.nvvm.:
* brev{32,64}
* clz.{i,ll}
* popc.{i,ll}
* abs.{i,ll}
* {min,max}.{i,ll,u,ull}
* h2f
These either map directly to an existing LLVM target-generic
intrinsic or map to a simple LLVM target-generic idiom.
In all cases, we check that the code we generate is lowered to PTX as we
expect.
These builtins don't need to be backfilled in clang: They're not
accessible to user code from nvcc.
Reviewers: tra
Subscribers: majnemer, cfe-commits, llvm-commits, jholewinski
Differential Revision: https://reviews.llvm.org/D28793
llvm-svn: 292694
2017-01-21 02:00:32 +01:00
|
|
|
// The following nvvm intrinsics correspond exactly to an LLVM idiom, but
|
|
|
|
// not to an intrinsic alone. We expand them in UpgradeIntrinsicCall.
|
|
|
|
//
|
|
|
|
// TODO: We could add lohi.i2d.
|
|
|
|
bool Expand = StringSwitch<bool>(Name)
|
|
|
|
.Cases("abs.i", "abs.ll", true)
|
|
|
|
.Cases("clz.ll", "popc.ll", "h2f", true)
|
|
|
|
.Cases("max.i", "max.ll", "max.ui", "max.ull", true)
|
|
|
|
.Cases("min.i", "min.ll", "min.ui", "min.ull", true)
|
2019-07-11 19:11:25 +02:00
|
|
|
.StartsWith("atomic.load.add.f32.p", true)
|
|
|
|
.StartsWith("atomic.load.add.f64.p", true)
|
[NVPTX] Auto-upgrade some NVPTX intrinsics to LLVM target-generic code.
Summary:
Specifically, we upgrade llvm.nvvm.:
* brev{32,64}
* clz.{i,ll}
* popc.{i,ll}
* abs.{i,ll}
* {min,max}.{i,ll,u,ull}
* h2f
These either map directly to an existing LLVM target-generic
intrinsic or map to a simple LLVM target-generic idiom.
In all cases, we check that the code we generate is lowered to PTX as we
expect.
These builtins don't need to be backfilled in clang: They're not
accessible to user code from nvcc.
Reviewers: tra
Subscribers: majnemer, cfe-commits, llvm-commits, jholewinski
Differential Revision: https://reviews.llvm.org/D28793
llvm-svn: 292694
2017-01-21 02:00:32 +01:00
|
|
|
.Default(false);
|
|
|
|
if (Expand) {
|
|
|
|
NewFn = nullptr;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
2017-05-19 22:31:51 +02:00
|
|
|
break;
|
[NVPTX] Auto-upgrade some NVPTX intrinsics to LLVM target-generic code.
Summary:
Specifically, we upgrade llvm.nvvm.:
* brev{32,64}
* clz.{i,ll}
* popc.{i,ll}
* abs.{i,ll}
* {min,max}.{i,ll,u,ull}
* h2f
These either map directly to an existing LLVM target-generic
intrinsic or map to a simple LLVM target-generic idiom.
In all cases, we check that the code we generate is lowered to PTX as we
expect.
These builtins don't need to be backfilled in clang: They're not
accessible to user code from nvcc.
Reviewers: tra
Subscribers: majnemer, cfe-commits, llvm-commits, jholewinski
Differential Revision: https://reviews.llvm.org/D28793
llvm-svn: 292694
2017-01-21 02:00:32 +01:00
|
|
|
}
|
2013-10-07 20:06:48 +02:00
|
|
|
case 'o':
|
|
|
|
// We only need to change the name to match the mangling including the
|
|
|
|
// address space.
|
2017-03-21 21:08:59 +01:00
|
|
|
if (Name.startswith("objectsize.")) {
|
2013-10-07 20:06:48 +02:00
|
|
|
Type *Tys[2] = { F->getReturnType(), F->arg_begin()->getType() };
|
2019-02-12 22:55:38 +01:00
|
|
|
if (F->arg_size() == 2 || F->arg_size() == 3 ||
|
2017-03-21 21:08:59 +01:00
|
|
|
F->getName() != Intrinsic::getName(Intrinsic::objectsize, Tys)) {
|
2016-10-03 17:51:42 +02:00
|
|
|
rename(F);
|
2017-03-21 21:08:59 +01:00
|
|
|
NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::objectsize,
|
|
|
|
Tys);
|
2013-10-07 20:06:48 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
Allow prefetching from non-zero address spaces
Summary:
This is useful for targets which have prefetch instructions for non-default address spaces.
<rdar://problem/42662136>
Subscribers: nemanjai, javed.absar, hiraditya, kbarton, jkorous, dexonsmith, cfe-commits, llvm-commits, RKSimon, hfinkel, t.p.northover, craig.topper, anemet
Tags: #clang, #llvm
Differential Revision: https://reviews.llvm.org/D65254
llvm-svn: 367032
2019-07-25 18:11:57 +02:00
|
|
|
case 'p':
|
|
|
|
if (Name == "prefetch") {
|
|
|
|
// Handle address space overloading.
|
|
|
|
Type *Tys[] = {F->arg_begin()->getType()};
|
|
|
|
if (F->getName() != Intrinsic::getName(Intrinsic::prefetch, Tys)) {
|
|
|
|
rename(F);
|
|
|
|
NewFn =
|
|
|
|
Intrinsic::getDeclaration(F->getParent(), Intrinsic::prefetch, Tys);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2016-04-08 23:26:31 +02:00
|
|
|
case 's':
|
|
|
|
if (Name == "stackprotectorcheck") {
|
|
|
|
NewFn = nullptr;
|
|
|
|
return true;
|
|
|
|
}
|
2016-11-08 05:01:50 +01:00
|
|
|
break;
|
2016-04-08 23:26:31 +02:00
|
|
|
|
2017-02-17 08:07:19 +01:00
|
|
|
case 'x':
|
|
|
|
if (UpgradeX86IntrinsicFunction(F, Name, NewFn))
|
2012-06-13 09:18:53 +02:00
|
|
|
return true;
|
2007-08-04 03:51:18 +02:00
|
|
|
}
|
2017-02-16 00:16:20 +01:00
|
|
|
// Remangle our intrinsic since we upgrade the mangling
|
|
|
|
auto Result = llvm::Intrinsic::remangleIntrinsicFunction(F);
|
|
|
|
if (Result != None) {
|
|
|
|
NewFn = Result.getValue();
|
|
|
|
return true;
|
|
|
|
}
|
2007-08-04 03:51:18 +02:00
|
|
|
|
2012-06-10 20:42:51 +02:00
|
|
|
// This may not belong here. This function is effectively being overloaded
|
|
|
|
// to both detect an intrinsic which needs upgrading, and to provide the
|
|
|
|
// upgraded form of the intrinsic. We should perhaps have two separate
|
2007-08-04 03:51:18 +02:00
|
|
|
// functions for this.
|
2007-12-17 23:33:23 +01:00
|
|
|
return false;
|
2007-08-04 03:51:18 +02:00
|
|
|
}
|
|
|
|
|
2007-12-17 23:33:23 +01:00
|
|
|
bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) {
|
2014-04-09 08:08:46 +02:00
|
|
|
NewFn = nullptr;
|
2007-12-17 23:33:23 +01:00
|
|
|
bool Upgraded = UpgradeIntrinsicFunction1(F, NewFn);
|
2015-07-03 22:12:01 +02:00
|
|
|
assert(F != NewFn && "Intrinsic function upgraded to the same function");
|
2007-12-03 21:06:50 +01:00
|
|
|
|
|
|
|
// Upgrade intrinsic attributes. This does not change the function.
|
2007-12-17 23:33:23 +01:00
|
|
|
if (NewFn)
|
|
|
|
F = NewFn;
|
2015-05-20 19:16:39 +02:00
|
|
|
if (Intrinsic::ID id = F->getIntrinsicID())
|
|
|
|
F->setAttributes(Intrinsic::getAttributes(F->getContext(), id));
|
2007-12-03 21:06:50 +01:00
|
|
|
return Upgraded;
|
|
|
|
}
|
|
|
|
|
2019-05-15 04:35:32 +02:00
|
|
|
GlobalVariable *llvm::UpgradeGlobalVariable(GlobalVariable *GV) {
|
|
|
|
if (!(GV->hasName() && (GV->getName() == "llvm.global_ctors" ||
|
|
|
|
GV->getName() == "llvm.global_dtors")) ||
|
|
|
|
!GV->hasInitializer())
|
|
|
|
return nullptr;
|
|
|
|
ArrayType *ATy = dyn_cast<ArrayType>(GV->getValueType());
|
|
|
|
if (!ATy)
|
|
|
|
return nullptr;
|
|
|
|
StructType *STy = dyn_cast<StructType>(ATy->getElementType());
|
|
|
|
if (!STy || STy->getNumElements() != 2)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
LLVMContext &C = GV->getContext();
|
|
|
|
IRBuilder<> IRB(C);
|
|
|
|
auto EltTy = StructType::get(STy->getElementType(0), STy->getElementType(1),
|
|
|
|
IRB.getInt8PtrTy());
|
|
|
|
Constant *Init = GV->getInitializer();
|
|
|
|
unsigned N = Init->getNumOperands();
|
|
|
|
std::vector<Constant *> NewCtors(N);
|
|
|
|
for (unsigned i = 0; i != N; ++i) {
|
|
|
|
auto Ctor = cast<Constant>(Init->getOperand(i));
|
|
|
|
NewCtors[i] = ConstantStruct::get(
|
|
|
|
EltTy, Ctor->getAggregateElement(0u), Ctor->getAggregateElement(1),
|
|
|
|
Constant::getNullValue(IRB.getInt8PtrTy()));
|
|
|
|
}
|
|
|
|
Constant *NewInit = ConstantArray::get(ArrayType::get(EltTy, N), NewCtors);
|
|
|
|
|
|
|
|
return new GlobalVariable(NewInit->getType(), false, GV->getLinkage(),
|
|
|
|
NewInit, GV->getName());
|
2010-09-10 20:51:56 +02:00
|
|
|
}
|
|
|
|
|
2016-06-09 23:09:03 +02:00
|
|
|
// Handles upgrading SSE2/AVX2/AVX512BW PSLLDQ intrinsics by converting them
|
2015-02-18 07:24:44 +01:00
|
|
|
// to byte shuffles.
|
2016-07-12 03:42:33 +02:00
|
|
|
static Value *UpgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder,
|
2016-05-29 08:37:33 +02:00
|
|
|
Value *Op, unsigned Shift) {
|
|
|
|
Type *ResultTy = Op->getType();
|
|
|
|
unsigned NumElts = ResultTy->getVectorNumElements() * 8;
|
2015-02-18 07:24:44 +01:00
|
|
|
|
|
|
|
// Bitcast from a 64-bit element type to a byte element type.
|
2016-07-12 03:42:33 +02:00
|
|
|
Type *VecTy = VectorType::get(Builder.getInt8Ty(), NumElts);
|
2016-05-29 08:37:33 +02:00
|
|
|
Op = Builder.CreateBitCast(Op, VecTy, "cast");
|
|
|
|
|
2015-02-18 07:24:44 +01:00
|
|
|
// We'll be shuffling in zeroes.
|
2016-05-29 08:37:33 +02:00
|
|
|
Value *Res = Constant::getNullValue(VecTy);
|
2015-02-18 07:24:44 +01:00
|
|
|
|
|
|
|
// If shift is less than 16, emit a shuffle to move the bytes. Otherwise,
|
|
|
|
// we'll just return the zero vector.
|
|
|
|
if (Shift < 16) {
|
2016-06-12 02:41:19 +02:00
|
|
|
uint32_t Idxs[64];
|
2016-06-09 23:09:03 +02:00
|
|
|
// 256/512-bit version is split into 2/4 16-byte lanes.
|
2015-02-18 07:24:44 +01:00
|
|
|
for (unsigned l = 0; l != NumElts; l += 16)
|
|
|
|
for (unsigned i = 0; i != 16; ++i) {
|
|
|
|
unsigned Idx = NumElts + i - Shift;
|
|
|
|
if (Idx < NumElts)
|
|
|
|
Idx -= NumElts - 16; // end of lane, switch operand.
|
2016-05-29 08:37:33 +02:00
|
|
|
Idxs[l + i] = Idx + l;
|
2015-02-18 07:24:44 +01:00
|
|
|
}
|
|
|
|
|
2016-05-29 08:37:33 +02:00
|
|
|
Res = Builder.CreateShuffleVector(Res, Op, makeArrayRef(Idxs, NumElts));
|
2015-02-18 07:24:44 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Bitcast back to a 64-bit element type.
|
2016-05-29 08:37:33 +02:00
|
|
|
return Builder.CreateBitCast(Res, ResultTy, "cast");
|
2015-02-18 07:24:44 +01:00
|
|
|
}
|
|
|
|
|
2016-06-13 04:36:42 +02:00
|
|
|
// Handles upgrading SSE2/AVX2/AVX512BW PSRLDQ intrinsics by converting them
|
|
|
|
// to byte shuffles.
|
2016-07-12 03:42:33 +02:00
|
|
|
static Value *UpgradeX86PSRLDQIntrinsics(IRBuilder<> &Builder, Value *Op,
|
2016-06-13 04:36:42 +02:00
|
|
|
unsigned Shift) {
|
|
|
|
Type *ResultTy = Op->getType();
|
|
|
|
unsigned NumElts = ResultTy->getVectorNumElements() * 8;
|
|
|
|
|
|
|
|
// Bitcast from a 64-bit element type to a byte element type.
|
2016-07-12 03:42:33 +02:00
|
|
|
Type *VecTy = VectorType::get(Builder.getInt8Ty(), NumElts);
|
2016-06-13 04:36:42 +02:00
|
|
|
Op = Builder.CreateBitCast(Op, VecTy, "cast");
|
|
|
|
|
|
|
|
// We'll be shuffling in zeroes.
|
|
|
|
Value *Res = Constant::getNullValue(VecTy);
|
|
|
|
|
|
|
|
// If shift is less than 16, emit a shuffle to move the bytes. Otherwise,
|
|
|
|
// we'll just return the zero vector.
|
|
|
|
if (Shift < 16) {
|
|
|
|
uint32_t Idxs[64];
|
|
|
|
// 256/512-bit version is split into 2/4 16-byte lanes.
|
|
|
|
for (unsigned l = 0; l != NumElts; l += 16)
|
|
|
|
for (unsigned i = 0; i != 16; ++i) {
|
|
|
|
unsigned Idx = i + Shift;
|
|
|
|
if (Idx >= 16)
|
|
|
|
Idx += NumElts - 16; // end of lane, switch operand.
|
|
|
|
Idxs[l + i] = Idx + l;
|
|
|
|
}
|
|
|
|
|
|
|
|
Res = Builder.CreateShuffleVector(Op, Res, makeArrayRef(Idxs, NumElts));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Bitcast back to a 64-bit element type.
|
|
|
|
return Builder.CreateBitCast(Res, ResultTy, "cast");
|
|
|
|
}
|
|
|
|
|
|
|
|
static Value *getX86MaskVec(IRBuilder<> &Builder, Value *Mask,
|
|
|
|
unsigned NumElts) {
|
|
|
|
llvm::VectorType *MaskTy = llvm::VectorType::get(Builder.getInt1Ty(),
|
|
|
|
cast<IntegerType>(Mask->getType())->getBitWidth());
|
|
|
|
Mask = Builder.CreateBitCast(Mask, MaskTy);
|
|
|
|
|
|
|
|
// If we have less than 8 elements, then the starting mask was an i8 and
|
|
|
|
// we need to extract down to the right number of elements.
|
|
|
|
if (NumElts < 8) {
|
|
|
|
uint32_t Indices[4];
|
|
|
|
for (unsigned i = 0; i != NumElts; ++i)
|
|
|
|
Indices[i] = i;
|
|
|
|
Mask = Builder.CreateShuffleVector(Mask, Mask,
|
|
|
|
makeArrayRef(Indices, NumElts),
|
|
|
|
"extract");
|
|
|
|
}
|
|
|
|
|
|
|
|
return Mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
static Value *EmitX86Select(IRBuilder<> &Builder, Value *Mask,
|
|
|
|
Value *Op0, Value *Op1) {
|
2018-07-12 02:29:56 +02:00
|
|
|
// If the mask is all ones just emit the first operation.
|
2016-06-13 04:36:42 +02:00
|
|
|
if (const auto *C = dyn_cast<Constant>(Mask))
|
|
|
|
if (C->isAllOnesValue())
|
|
|
|
return Op0;
|
|
|
|
|
|
|
|
Mask = getX86MaskVec(Builder, Mask, Op0->getType()->getVectorNumElements());
|
|
|
|
return Builder.CreateSelect(Mask, Op0, Op1);
|
|
|
|
}
|
|
|
|
|
2018-07-12 02:29:56 +02:00
|
|
|
static Value *EmitX86ScalarSelect(IRBuilder<> &Builder, Value *Mask,
|
|
|
|
Value *Op0, Value *Op1) {
|
|
|
|
// If the mask is all ones just emit the first operation.
|
|
|
|
if (const auto *C = dyn_cast<Constant>(Mask))
|
|
|
|
if (C->isAllOnesValue())
|
|
|
|
return Op0;
|
|
|
|
|
|
|
|
llvm::VectorType *MaskTy =
|
|
|
|
llvm::VectorType::get(Builder.getInt1Ty(),
|
|
|
|
Mask->getType()->getIntegerBitWidth());
|
|
|
|
Mask = Builder.CreateBitCast(Mask, MaskTy);
|
|
|
|
Mask = Builder.CreateExtractElement(Mask, (uint64_t)0);
|
|
|
|
return Builder.CreateSelect(Mask, Op0, Op1);
|
|
|
|
}
|
|
|
|
|
2016-11-23 07:54:55 +01:00
|
|
|
// Handle autoupgrade for masked PALIGNR and VALIGND/Q intrinsics.
|
|
|
|
// PALIGNR handles large immediates by shifting while VALIGN masks the immediate
|
|
|
|
// so we need to handle both cases. VALIGN also doesn't have 128-bit lanes.
|
|
|
|
static Value *UpgradeX86ALIGNIntrinsics(IRBuilder<> &Builder, Value *Op0,
|
|
|
|
Value *Op1, Value *Shift,
|
|
|
|
Value *Passthru, Value *Mask,
|
|
|
|
bool IsVALIGN) {
|
2016-06-06 08:12:54 +02:00
|
|
|
unsigned ShiftVal = cast<llvm::ConstantInt>(Shift)->getZExtValue();
|
|
|
|
|
|
|
|
unsigned NumElts = Op0->getType()->getVectorNumElements();
|
2016-11-23 07:54:55 +01:00
|
|
|
assert((IsVALIGN || NumElts % 16 == 0) && "Illegal NumElts for PALIGNR!");
|
|
|
|
assert((!IsVALIGN || NumElts <= 16) && "NumElts too large for VALIGN!");
|
|
|
|
assert(isPowerOf2_32(NumElts) && "NumElts not a power of 2!");
|
|
|
|
|
|
|
|
// Mask the immediate for VALIGN.
|
|
|
|
if (IsVALIGN)
|
|
|
|
ShiftVal &= (NumElts - 1);
|
2016-06-06 08:12:54 +02:00
|
|
|
|
|
|
|
// If palignr is shifting the pair of vectors more than the size of two
|
|
|
|
// lanes, emit zero.
|
|
|
|
if (ShiftVal >= 32)
|
|
|
|
return llvm::Constant::getNullValue(Op0->getType());
|
|
|
|
|
|
|
|
// If palignr is shifting the pair of input vectors more than one lane,
|
|
|
|
// but less than two lanes, convert to shifting in zeroes.
|
|
|
|
if (ShiftVal > 16) {
|
|
|
|
ShiftVal -= 16;
|
|
|
|
Op1 = Op0;
|
|
|
|
Op0 = llvm::Constant::getNullValue(Op0->getType());
|
|
|
|
}
|
|
|
|
|
2016-06-12 02:41:19 +02:00
|
|
|
uint32_t Indices[64];
|
2016-06-06 08:12:54 +02:00
|
|
|
// 256-bit palignr operates on 128-bit lanes so we need to handle that
|
2016-11-23 07:54:55 +01:00
|
|
|
for (unsigned l = 0; l < NumElts; l += 16) {
|
2016-06-06 08:12:54 +02:00
|
|
|
for (unsigned i = 0; i != 16; ++i) {
|
|
|
|
unsigned Idx = ShiftVal + i;
|
2016-11-23 07:54:55 +01:00
|
|
|
if (!IsVALIGN && Idx >= 16) // Disable wrap for VALIGN.
|
2016-06-06 08:12:54 +02:00
|
|
|
Idx += NumElts - 16; // End of lane, switch operand.
|
|
|
|
Indices[l + i] = Idx + l;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Value *Align = Builder.CreateShuffleVector(Op1, Op0,
|
|
|
|
makeArrayRef(Indices, NumElts),
|
|
|
|
"palignr");
|
|
|
|
|
2016-06-13 04:36:42 +02:00
|
|
|
return EmitX86Select(Builder, Mask, Align, Passthru);
|
2015-02-18 07:24:44 +01:00
|
|
|
}
|
|
|
|
|
2019-01-07 21:13:45 +01:00
|
|
|
static Value *UpgradeX86VPERMT2Intrinsics(IRBuilder<> &Builder, CallInst &CI,
|
|
|
|
bool ZeroMask, bool IndexForm) {
|
|
|
|
Type *Ty = CI.getType();
|
|
|
|
unsigned VecWidth = Ty->getPrimitiveSizeInBits();
|
|
|
|
unsigned EltWidth = Ty->getScalarSizeInBits();
|
|
|
|
bool IsFloat = Ty->isFPOrFPVectorTy();
|
|
|
|
Intrinsic::ID IID;
|
|
|
|
if (VecWidth == 128 && EltWidth == 32 && IsFloat)
|
|
|
|
IID = Intrinsic::x86_avx512_vpermi2var_ps_128;
|
|
|
|
else if (VecWidth == 128 && EltWidth == 32 && !IsFloat)
|
|
|
|
IID = Intrinsic::x86_avx512_vpermi2var_d_128;
|
|
|
|
else if (VecWidth == 128 && EltWidth == 64 && IsFloat)
|
|
|
|
IID = Intrinsic::x86_avx512_vpermi2var_pd_128;
|
|
|
|
else if (VecWidth == 128 && EltWidth == 64 && !IsFloat)
|
|
|
|
IID = Intrinsic::x86_avx512_vpermi2var_q_128;
|
|
|
|
else if (VecWidth == 256 && EltWidth == 32 && IsFloat)
|
|
|
|
IID = Intrinsic::x86_avx512_vpermi2var_ps_256;
|
|
|
|
else if (VecWidth == 256 && EltWidth == 32 && !IsFloat)
|
|
|
|
IID = Intrinsic::x86_avx512_vpermi2var_d_256;
|
|
|
|
else if (VecWidth == 256 && EltWidth == 64 && IsFloat)
|
|
|
|
IID = Intrinsic::x86_avx512_vpermi2var_pd_256;
|
|
|
|
else if (VecWidth == 256 && EltWidth == 64 && !IsFloat)
|
|
|
|
IID = Intrinsic::x86_avx512_vpermi2var_q_256;
|
|
|
|
else if (VecWidth == 512 && EltWidth == 32 && IsFloat)
|
|
|
|
IID = Intrinsic::x86_avx512_vpermi2var_ps_512;
|
|
|
|
else if (VecWidth == 512 && EltWidth == 32 && !IsFloat)
|
|
|
|
IID = Intrinsic::x86_avx512_vpermi2var_d_512;
|
|
|
|
else if (VecWidth == 512 && EltWidth == 64 && IsFloat)
|
|
|
|
IID = Intrinsic::x86_avx512_vpermi2var_pd_512;
|
|
|
|
else if (VecWidth == 512 && EltWidth == 64 && !IsFloat)
|
|
|
|
IID = Intrinsic::x86_avx512_vpermi2var_q_512;
|
|
|
|
else if (VecWidth == 128 && EltWidth == 16)
|
|
|
|
IID = Intrinsic::x86_avx512_vpermi2var_hi_128;
|
|
|
|
else if (VecWidth == 256 && EltWidth == 16)
|
|
|
|
IID = Intrinsic::x86_avx512_vpermi2var_hi_256;
|
|
|
|
else if (VecWidth == 512 && EltWidth == 16)
|
|
|
|
IID = Intrinsic::x86_avx512_vpermi2var_hi_512;
|
|
|
|
else if (VecWidth == 128 && EltWidth == 8)
|
|
|
|
IID = Intrinsic::x86_avx512_vpermi2var_qi_128;
|
|
|
|
else if (VecWidth == 256 && EltWidth == 8)
|
|
|
|
IID = Intrinsic::x86_avx512_vpermi2var_qi_256;
|
|
|
|
else if (VecWidth == 512 && EltWidth == 8)
|
|
|
|
IID = Intrinsic::x86_avx512_vpermi2var_qi_512;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected intrinsic");
|
|
|
|
|
|
|
|
Value *Args[] = { CI.getArgOperand(0) , CI.getArgOperand(1),
|
|
|
|
CI.getArgOperand(2) };
|
|
|
|
|
|
|
|
// If this isn't index form we need to swap operand 0 and 1.
|
|
|
|
if (!IndexForm)
|
|
|
|
std::swap(Args[0], Args[1]);
|
|
|
|
|
|
|
|
Value *V = Builder.CreateCall(Intrinsic::getDeclaration(CI.getModule(), IID),
|
|
|
|
Args);
|
|
|
|
Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty)
|
|
|
|
: Builder.CreateBitCast(CI.getArgOperand(1),
|
|
|
|
Ty);
|
|
|
|
return EmitX86Select(Builder, CI.getArgOperand(3), V, PassThru);
|
|
|
|
}
|
|
|
|
|
2018-08-14 10:00:56 +02:00
|
|
|
static Value *UpgradeX86AddSubSatIntrinsics(IRBuilder<> &Builder, CallInst &CI,
|
2018-12-21 10:04:14 +01:00
|
|
|
bool IsSigned, bool IsAddition) {
|
2018-12-19 15:43:36 +01:00
|
|
|
Type *Ty = CI.getType();
|
2018-08-14 10:00:56 +02:00
|
|
|
Value *Op0 = CI.getOperand(0);
|
|
|
|
Value *Op1 = CI.getOperand(1);
|
|
|
|
|
2018-12-21 10:04:14 +01:00
|
|
|
Intrinsic::ID IID =
|
|
|
|
IsSigned ? (IsAddition ? Intrinsic::sadd_sat : Intrinsic::ssub_sat)
|
|
|
|
: (IsAddition ? Intrinsic::uadd_sat : Intrinsic::usub_sat);
|
2018-12-19 15:43:36 +01:00
|
|
|
Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty);
|
|
|
|
Value *Res = Builder.CreateCall(Intrin, {Op0, Op1});
|
2018-08-14 10:00:56 +02:00
|
|
|
|
|
|
|
if (CI.getNumArgOperands() == 4) { // For masked intrinsics.
|
|
|
|
Value *VecSrc = CI.getOperand(2);
|
|
|
|
Value *Mask = CI.getOperand(3);
|
|
|
|
Res = EmitX86Select(Builder, Mask, Res, VecSrc);
|
|
|
|
}
|
|
|
|
return Res;
|
|
|
|
}
|
|
|
|
|
2018-12-20 20:01:07 +01:00
|
|
|
static Value *upgradeX86Rotate(IRBuilder<> &Builder, CallInst &CI,
|
|
|
|
bool IsRotateRight) {
|
|
|
|
Type *Ty = CI.getType();
|
|
|
|
Value *Src = CI.getArgOperand(0);
|
|
|
|
Value *Amt = CI.getArgOperand(1);
|
|
|
|
|
|
|
|
// Amount may be scalar immediate, in which case create a splat vector.
|
|
|
|
// Funnel shifts amounts are treated as modulo and types are all power-of-2 so
|
|
|
|
// we only care about the lowest log2 bits anyway.
|
|
|
|
if (Amt->getType() != Ty) {
|
|
|
|
unsigned NumElts = Ty->getVectorNumElements();
|
|
|
|
Amt = Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
|
|
|
|
Amt = Builder.CreateVectorSplat(NumElts, Amt);
|
|
|
|
}
|
|
|
|
|
|
|
|
Intrinsic::ID IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
|
|
|
|
Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty);
|
|
|
|
Value *Res = Builder.CreateCall(Intrin, {Src, Src, Amt});
|
|
|
|
|
|
|
|
if (CI.getNumArgOperands() == 4) { // For masked intrinsics.
|
|
|
|
Value *VecSrc = CI.getOperand(2);
|
|
|
|
Value *Mask = CI.getOperand(3);
|
|
|
|
Res = EmitX86Select(Builder, Mask, Res, VecSrc);
|
|
|
|
}
|
|
|
|
return Res;
|
|
|
|
}
|
|
|
|
|
2019-01-20 18:36:22 +01:00
|
|
|
static Value *upgradeX86vpcom(IRBuilder<> &Builder, CallInst &CI, unsigned Imm,
|
|
|
|
bool IsSigned) {
|
|
|
|
Type *Ty = CI.getType();
|
|
|
|
Value *LHS = CI.getArgOperand(0);
|
|
|
|
Value *RHS = CI.getArgOperand(1);
|
|
|
|
|
|
|
|
CmpInst::Predicate Pred;
|
|
|
|
switch (Imm) {
|
|
|
|
case 0x0:
|
|
|
|
Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
|
|
|
|
break;
|
|
|
|
case 0x1:
|
|
|
|
Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
|
|
|
|
break;
|
|
|
|
case 0x2:
|
|
|
|
Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
|
|
|
|
break;
|
|
|
|
case 0x3:
|
|
|
|
Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
|
|
|
|
break;
|
|
|
|
case 0x4:
|
|
|
|
Pred = ICmpInst::ICMP_EQ;
|
|
|
|
break;
|
|
|
|
case 0x5:
|
|
|
|
Pred = ICmpInst::ICMP_NE;
|
|
|
|
break;
|
|
|
|
case 0x6:
|
|
|
|
return Constant::getNullValue(Ty); // FALSE
|
|
|
|
case 0x7:
|
|
|
|
return Constant::getAllOnesValue(Ty); // TRUE
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unknown XOP vpcom/vpcomu predicate");
|
|
|
|
}
|
|
|
|
|
|
|
|
Value *Cmp = Builder.CreateICmp(Pred, LHS, RHS);
|
|
|
|
Value *Ext = Builder.CreateSExt(Cmp, Ty);
|
|
|
|
return Ext;
|
|
|
|
}
|
|
|
|
|
2019-01-07 22:00:32 +01:00
|
|
|
static Value *upgradeX86ConcatShift(IRBuilder<> &Builder, CallInst &CI,
|
|
|
|
bool IsShiftRight, bool ZeroMask) {
|
|
|
|
Type *Ty = CI.getType();
|
|
|
|
Value *Op0 = CI.getArgOperand(0);
|
|
|
|
Value *Op1 = CI.getArgOperand(1);
|
|
|
|
Value *Amt = CI.getArgOperand(2);
|
|
|
|
|
|
|
|
if (IsShiftRight)
|
|
|
|
std::swap(Op0, Op1);
|
|
|
|
|
|
|
|
// Amount may be scalar immediate, in which case create a splat vector.
|
|
|
|
// Funnel shifts amounts are treated as modulo and types are all power-of-2 so
|
|
|
|
// we only care about the lowest log2 bits anyway.
|
|
|
|
if (Amt->getType() != Ty) {
|
|
|
|
unsigned NumElts = Ty->getVectorNumElements();
|
|
|
|
Amt = Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
|
|
|
|
Amt = Builder.CreateVectorSplat(NumElts, Amt);
|
|
|
|
}
|
|
|
|
|
|
|
|
Intrinsic::ID IID = IsShiftRight ? Intrinsic::fshr : Intrinsic::fshl;
|
|
|
|
Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty);
|
|
|
|
Value *Res = Builder.CreateCall(Intrin, {Op0, Op1, Amt});
|
|
|
|
|
|
|
|
unsigned NumArgs = CI.getNumArgOperands();
|
|
|
|
if (NumArgs >= 4) { // For masked intrinsics.
|
|
|
|
Value *VecSrc = NumArgs == 5 ? CI.getArgOperand(3) :
|
|
|
|
ZeroMask ? ConstantAggregateZero::get(CI.getType()) :
|
|
|
|
CI.getArgOperand(0);
|
|
|
|
Value *Mask = CI.getOperand(NumArgs - 1);
|
|
|
|
Res = EmitX86Select(Builder, Mask, Res, VecSrc);
|
|
|
|
}
|
|
|
|
return Res;
|
|
|
|
}
|
|
|
|
|
2016-07-12 03:42:33 +02:00
|
|
|
static Value *UpgradeMaskedStore(IRBuilder<> &Builder,
|
2016-05-31 03:50:02 +02:00
|
|
|
Value *Ptr, Value *Data, Value *Mask,
|
|
|
|
bool Aligned) {
|
|
|
|
// Cast the pointer to the right type.
|
|
|
|
Ptr = Builder.CreateBitCast(Ptr,
|
|
|
|
llvm::PointerType::getUnqual(Data->getType()));
|
2020-01-21 16:13:04 +01:00
|
|
|
const Align Alignment =
|
|
|
|
Aligned ? Align(cast<VectorType>(Data->getType())->getBitWidth() / 8)
|
[Alignment][NFC] Deprecate Align::None()
Summary:
This is a follow up on https://reviews.llvm.org/D71473#inline-647262.
There's a caveat here that `Align(1)` relies on the compiler understanding of `Log2_64` implementation to produce good code. One could use `Align()` as a replacement but I believe it is less clear that the alignment is one in that case.
Reviewers: xbolva00, courbet, bollu
Subscribers: arsenm, dylanmckay, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, jrtc27, atanasyan, jsji, Jim, kerbowa, cfe-commits, llvm-commits
Tags: #clang, #llvm
Differential Revision: https://reviews.llvm.org/D73099
2020-01-21 15:00:04 +01:00
|
|
|
: Align(1);
|
2016-05-31 03:50:02 +02:00
|
|
|
|
|
|
|
// If the mask is all ones just emit a regular store.
|
|
|
|
if (const auto *C = dyn_cast<Constant>(Mask))
|
|
|
|
if (C->isAllOnesValue())
|
2020-01-21 16:13:04 +01:00
|
|
|
return Builder.CreateAlignedStore(Data, Ptr, Alignment);
|
2016-05-31 03:50:02 +02:00
|
|
|
|
|
|
|
// Convert the mask from an integer type to a vector of i1.
|
|
|
|
unsigned NumElts = Data->getType()->getVectorNumElements();
|
2016-06-13 04:36:42 +02:00
|
|
|
Mask = getX86MaskVec(Builder, Mask, NumElts);
|
2020-01-21 16:13:04 +01:00
|
|
|
return Builder.CreateMaskedStore(Data, Ptr, Alignment, Mask);
|
2016-05-31 03:50:02 +02:00
|
|
|
}
|
|
|
|
|
2016-07-12 03:42:33 +02:00
|
|
|
static Value *UpgradeMaskedLoad(IRBuilder<> &Builder,
|
2016-06-02 06:19:36 +02:00
|
|
|
Value *Ptr, Value *Passthru, Value *Mask,
|
|
|
|
bool Aligned) {
|
2019-02-01 21:44:24 +01:00
|
|
|
Type *ValTy = Passthru->getType();
|
2016-06-02 06:19:36 +02:00
|
|
|
// Cast the pointer to the right type.
|
2019-02-01 21:44:24 +01:00
|
|
|
Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(ValTy));
|
2020-01-21 11:21:31 +01:00
|
|
|
const Align Alignment =
|
|
|
|
Aligned ? Align(cast<VectorType>(Passthru->getType())->getBitWidth() / 8)
|
[Alignment][NFC] Deprecate Align::None()
Summary:
This is a follow up on https://reviews.llvm.org/D71473#inline-647262.
There's a caveat here that `Align(1)` relies on the compiler understanding of `Log2_64` implementation to produce good code. One could use `Align()` as a replacement but I believe it is less clear that the alignment is one in that case.
Reviewers: xbolva00, courbet, bollu
Subscribers: arsenm, dylanmckay, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, jrtc27, atanasyan, jsji, Jim, kerbowa, cfe-commits, llvm-commits
Tags: #clang, #llvm
Differential Revision: https://reviews.llvm.org/D73099
2020-01-21 15:00:04 +01:00
|
|
|
: Align(1);
|
2016-06-02 06:19:36 +02:00
|
|
|
|
|
|
|
// If the mask is all ones just emit a regular store.
|
|
|
|
if (const auto *C = dyn_cast<Constant>(Mask))
|
|
|
|
if (C->isAllOnesValue())
|
2020-01-21 11:21:31 +01:00
|
|
|
return Builder.CreateAlignedLoad(ValTy, Ptr, Alignment);
|
2016-06-02 06:19:36 +02:00
|
|
|
|
|
|
|
// Convert the mask from an integer type to a vector of i1.
|
|
|
|
unsigned NumElts = Passthru->getType()->getVectorNumElements();
|
2016-06-13 04:36:42 +02:00
|
|
|
Mask = getX86MaskVec(Builder, Mask, NumElts);
|
2020-01-21 11:21:31 +01:00
|
|
|
return Builder.CreateMaskedLoad(Ptr, Alignment, Mask, Passthru);
|
2016-06-02 06:19:36 +02:00
|
|
|
}
|
|
|
|
|
2017-09-13 11:02:36 +02:00
|
|
|
static Value *upgradeAbs(IRBuilder<> &Builder, CallInst &CI) {
|
|
|
|
Value *Op0 = CI.getArgOperand(0);
|
|
|
|
llvm::Type *Ty = Op0->getType();
|
|
|
|
Value *Zero = llvm::Constant::getNullValue(Ty);
|
|
|
|
Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_SGT, Op0, Zero);
|
|
|
|
Value *Neg = Builder.CreateNeg(Op0);
|
|
|
|
Value *Res = Builder.CreateSelect(Cmp, Op0, Neg);
|
|
|
|
|
|
|
|
if (CI.getNumArgOperands() == 3)
|
|
|
|
Res = EmitX86Select(Builder,CI.getArgOperand(2), Res, CI.getArgOperand(1));
|
|
|
|
|
|
|
|
return Res;
|
|
|
|
}
|
|
|
|
|
2016-06-16 17:48:30 +02:00
|
|
|
static Value *upgradeIntMinMax(IRBuilder<> &Builder, CallInst &CI,
|
|
|
|
ICmpInst::Predicate Pred) {
|
|
|
|
Value *Op0 = CI.getArgOperand(0);
|
|
|
|
Value *Op1 = CI.getArgOperand(1);
|
|
|
|
Value *Cmp = Builder.CreateICmp(Pred, Op0, Op1);
|
2016-10-24 06:04:16 +02:00
|
|
|
Value *Res = Builder.CreateSelect(Cmp, Op0, Op1);
|
|
|
|
|
|
|
|
if (CI.getNumArgOperands() == 4)
|
|
|
|
Res = EmitX86Select(Builder, CI.getArgOperand(3), Res, CI.getArgOperand(2));
|
|
|
|
|
|
|
|
return Res;
|
2016-06-16 17:48:30 +02:00
|
|
|
}
|
|
|
|
|
2018-04-13 08:07:18 +02:00
|
|
|
static Value *upgradePMULDQ(IRBuilder<> &Builder, CallInst &CI, bool IsSigned) {
|
|
|
|
Type *Ty = CI.getType();
|
|
|
|
|
|
|
|
// Arguments have a vXi32 type so cast to vXi64.
|
|
|
|
Value *LHS = Builder.CreateBitCast(CI.getArgOperand(0), Ty);
|
|
|
|
Value *RHS = Builder.CreateBitCast(CI.getArgOperand(1), Ty);
|
|
|
|
|
|
|
|
if (IsSigned) {
|
|
|
|
// Shift left then arithmetic shift right.
|
|
|
|
Constant *ShiftAmt = ConstantInt::get(Ty, 32);
|
|
|
|
LHS = Builder.CreateShl(LHS, ShiftAmt);
|
|
|
|
LHS = Builder.CreateAShr(LHS, ShiftAmt);
|
|
|
|
RHS = Builder.CreateShl(RHS, ShiftAmt);
|
|
|
|
RHS = Builder.CreateAShr(RHS, ShiftAmt);
|
|
|
|
} else {
|
|
|
|
// Clear the upper bits.
|
|
|
|
Constant *Mask = ConstantInt::get(Ty, 0xffffffff);
|
|
|
|
LHS = Builder.CreateAnd(LHS, Mask);
|
|
|
|
RHS = Builder.CreateAnd(RHS, Mask);
|
|
|
|
}
|
|
|
|
|
|
|
|
Value *Res = Builder.CreateMul(LHS, RHS);
|
|
|
|
|
|
|
|
if (CI.getNumArgOperands() == 4)
|
|
|
|
Res = EmitX86Select(Builder, CI.getArgOperand(3), Res, CI.getArgOperand(2));
|
|
|
|
|
|
|
|
return Res;
|
|
|
|
}
|
|
|
|
|
2017-11-13 13:51:18 +01:00
|
|
|
// Applying mask on vector of i1's and make sure result is at least 8 bits wide.
|
2018-06-27 17:57:53 +02:00
|
|
|
static Value *ApplyX86MaskOn1BitsVec(IRBuilder<> &Builder, Value *Vec,
|
|
|
|
Value *Mask) {
|
|
|
|
unsigned NumElts = Vec->getType()->getVectorNumElements();
|
2018-01-09 01:50:47 +01:00
|
|
|
if (Mask) {
|
|
|
|
const auto *C = dyn_cast<Constant>(Mask);
|
|
|
|
if (!C || !C->isAllOnesValue())
|
|
|
|
Vec = Builder.CreateAnd(Vec, getX86MaskVec(Builder, Mask, NumElts));
|
|
|
|
}
|
2017-11-13 13:51:18 +01:00
|
|
|
|
|
|
|
if (NumElts < 8) {
|
|
|
|
uint32_t Indices[8];
|
|
|
|
for (unsigned i = 0; i != NumElts; ++i)
|
|
|
|
Indices[i] = i;
|
|
|
|
for (unsigned i = NumElts; i != 8; ++i)
|
|
|
|
Indices[i] = NumElts + i % NumElts;
|
|
|
|
Vec = Builder.CreateShuffleVector(Vec,
|
|
|
|
Constant::getNullValue(Vec->getType()),
|
|
|
|
Indices);
|
|
|
|
}
|
|
|
|
return Builder.CreateBitCast(Vec, Builder.getIntNTy(std::max(NumElts, 8U)));
|
|
|
|
}
|
|
|
|
|
2016-06-21 05:53:24 +02:00
|
|
|
static Value *upgradeMaskedCompare(IRBuilder<> &Builder, CallInst &CI,
|
2017-06-22 22:11:01 +02:00
|
|
|
unsigned CC, bool Signed) {
|
2016-06-21 05:53:24 +02:00
|
|
|
Value *Op0 = CI.getArgOperand(0);
|
|
|
|
unsigned NumElts = Op0->getType()->getVectorNumElements();
|
|
|
|
|
2017-06-22 22:11:01 +02:00
|
|
|
Value *Cmp;
|
|
|
|
if (CC == 3) {
|
|
|
|
Cmp = Constant::getNullValue(llvm::VectorType::get(Builder.getInt1Ty(), NumElts));
|
|
|
|
} else if (CC == 7) {
|
|
|
|
Cmp = Constant::getAllOnesValue(llvm::VectorType::get(Builder.getInt1Ty(), NumElts));
|
|
|
|
} else {
|
|
|
|
ICmpInst::Predicate Pred;
|
|
|
|
switch (CC) {
|
|
|
|
default: llvm_unreachable("Unknown condition code");
|
|
|
|
case 0: Pred = ICmpInst::ICMP_EQ; break;
|
|
|
|
case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break;
|
|
|
|
case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break;
|
|
|
|
case 4: Pred = ICmpInst::ICMP_NE; break;
|
|
|
|
case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break;
|
|
|
|
case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break;
|
|
|
|
}
|
|
|
|
Cmp = Builder.CreateICmp(Pred, Op0, CI.getArgOperand(1));
|
|
|
|
}
|
|
|
|
|
|
|
|
Value *Mask = CI.getArgOperand(CI.getNumArgOperands() - 1);
|
2016-06-21 05:53:24 +02:00
|
|
|
|
2018-06-27 17:57:53 +02:00
|
|
|
return ApplyX86MaskOn1BitsVec(Builder, Cmp, Mask);
|
2016-06-21 05:53:24 +02:00
|
|
|
}
|
|
|
|
|
2016-11-06 17:29:08 +01:00
|
|
|
// Replace a masked intrinsic with an older unmasked intrinsic.
|
|
|
|
static Value *UpgradeX86MaskedShift(IRBuilder<> &Builder, CallInst &CI,
|
|
|
|
Intrinsic::ID IID) {
|
2018-04-09 08:15:09 +02:00
|
|
|
Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID);
|
2016-11-06 17:29:08 +01:00
|
|
|
Value *Rep = Builder.CreateCall(Intrin,
|
|
|
|
{ CI.getArgOperand(0), CI.getArgOperand(1) });
|
|
|
|
return EmitX86Select(Builder, CI.getArgOperand(3), Rep, CI.getArgOperand(2));
|
|
|
|
}
|
|
|
|
|
2016-11-16 10:00:28 +01:00
|
|
|
static Value* upgradeMaskedMove(IRBuilder<> &Builder, CallInst &CI) {
|
|
|
|
Value* A = CI.getArgOperand(0);
|
|
|
|
Value* B = CI.getArgOperand(1);
|
|
|
|
Value* Src = CI.getArgOperand(2);
|
|
|
|
Value* Mask = CI.getArgOperand(3);
|
|
|
|
|
|
|
|
Value* AndNode = Builder.CreateAnd(Mask, APInt(8, 1));
|
|
|
|
Value* Cmp = Builder.CreateIsNotNull(AndNode);
|
|
|
|
Value* Extract1 = Builder.CreateExtractElement(B, (uint64_t)0);
|
|
|
|
Value* Extract2 = Builder.CreateExtractElement(Src, (uint64_t)0);
|
|
|
|
Value* Select = Builder.CreateSelect(Cmp, Extract1, Extract2);
|
|
|
|
return Builder.CreateInsertElement(A, Select, (uint64_t)0);
|
|
|
|
}
|
2016-11-06 17:29:08 +01:00
|
|
|
|
2017-04-04 15:32:14 +02:00
|
|
|
|
|
|
|
static Value* UpgradeMaskToInt(IRBuilder<> &Builder, CallInst &CI) {
|
|
|
|
Value* Op = CI.getArgOperand(0);
|
|
|
|
Type* ReturnOp = CI.getType();
|
|
|
|
unsigned NumElts = CI.getType()->getVectorNumElements();
|
|
|
|
Value *Mask = getX86MaskVec(Builder, Op, NumElts);
|
|
|
|
return Builder.CreateSExt(Mask, ReturnOp, "vpmovm2");
|
|
|
|
}
|
|
|
|
|
2018-04-09 08:15:09 +02:00
|
|
|
// Replace intrinsic with unmasked version and a select.
|
|
|
|
static bool upgradeAVX512MaskToSelect(StringRef Name, IRBuilder<> &Builder,
|
|
|
|
CallInst &CI, Value *&Rep) {
|
|
|
|
Name = Name.substr(12); // Remove avx512.mask.
|
|
|
|
|
|
|
|
unsigned VecWidth = CI.getType()->getPrimitiveSizeInBits();
|
|
|
|
unsigned EltWidth = CI.getType()->getScalarSizeInBits();
|
|
|
|
Intrinsic::ID IID;
|
|
|
|
if (Name.startswith("max.p")) {
|
|
|
|
if (VecWidth == 128 && EltWidth == 32)
|
|
|
|
IID = Intrinsic::x86_sse_max_ps;
|
|
|
|
else if (VecWidth == 128 && EltWidth == 64)
|
|
|
|
IID = Intrinsic::x86_sse2_max_pd;
|
|
|
|
else if (VecWidth == 256 && EltWidth == 32)
|
|
|
|
IID = Intrinsic::x86_avx_max_ps_256;
|
|
|
|
else if (VecWidth == 256 && EltWidth == 64)
|
|
|
|
IID = Intrinsic::x86_avx_max_pd_256;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected intrinsic");
|
|
|
|
} else if (Name.startswith("min.p")) {
|
|
|
|
if (VecWidth == 128 && EltWidth == 32)
|
|
|
|
IID = Intrinsic::x86_sse_min_ps;
|
|
|
|
else if (VecWidth == 128 && EltWidth == 64)
|
|
|
|
IID = Intrinsic::x86_sse2_min_pd;
|
|
|
|
else if (VecWidth == 256 && EltWidth == 32)
|
|
|
|
IID = Intrinsic::x86_avx_min_ps_256;
|
|
|
|
else if (VecWidth == 256 && EltWidth == 64)
|
|
|
|
IID = Intrinsic::x86_avx_min_pd_256;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected intrinsic");
|
|
|
|
} else if (Name.startswith("pshuf.b.")) {
|
|
|
|
if (VecWidth == 128)
|
|
|
|
IID = Intrinsic::x86_ssse3_pshuf_b_128;
|
|
|
|
else if (VecWidth == 256)
|
|
|
|
IID = Intrinsic::x86_avx2_pshuf_b;
|
|
|
|
else if (VecWidth == 512)
|
|
|
|
IID = Intrinsic::x86_avx512_pshuf_b_512;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected intrinsic");
|
|
|
|
} else if (Name.startswith("pmul.hr.sw.")) {
|
|
|
|
if (VecWidth == 128)
|
|
|
|
IID = Intrinsic::x86_ssse3_pmul_hr_sw_128;
|
|
|
|
else if (VecWidth == 256)
|
|
|
|
IID = Intrinsic::x86_avx2_pmul_hr_sw;
|
|
|
|
else if (VecWidth == 512)
|
|
|
|
IID = Intrinsic::x86_avx512_pmul_hr_sw_512;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected intrinsic");
|
|
|
|
} else if (Name.startswith("pmulh.w.")) {
|
|
|
|
if (VecWidth == 128)
|
|
|
|
IID = Intrinsic::x86_sse2_pmulh_w;
|
|
|
|
else if (VecWidth == 256)
|
|
|
|
IID = Intrinsic::x86_avx2_pmulh_w;
|
|
|
|
else if (VecWidth == 512)
|
|
|
|
IID = Intrinsic::x86_avx512_pmulh_w_512;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected intrinsic");
|
|
|
|
} else if (Name.startswith("pmulhu.w.")) {
|
|
|
|
if (VecWidth == 128)
|
|
|
|
IID = Intrinsic::x86_sse2_pmulhu_w;
|
|
|
|
else if (VecWidth == 256)
|
|
|
|
IID = Intrinsic::x86_avx2_pmulhu_w;
|
|
|
|
else if (VecWidth == 512)
|
|
|
|
IID = Intrinsic::x86_avx512_pmulhu_w_512;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected intrinsic");
|
2018-04-11 06:55:04 +02:00
|
|
|
} else if (Name.startswith("pmaddw.d.")) {
|
|
|
|
if (VecWidth == 128)
|
|
|
|
IID = Intrinsic::x86_sse2_pmadd_wd;
|
|
|
|
else if (VecWidth == 256)
|
|
|
|
IID = Intrinsic::x86_avx2_pmadd_wd;
|
|
|
|
else if (VecWidth == 512)
|
|
|
|
IID = Intrinsic::x86_avx512_pmaddw_d_512;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected intrinsic");
|
|
|
|
} else if (Name.startswith("pmaddubs.w.")) {
|
|
|
|
if (VecWidth == 128)
|
|
|
|
IID = Intrinsic::x86_ssse3_pmadd_ub_sw_128;
|
|
|
|
else if (VecWidth == 256)
|
|
|
|
IID = Intrinsic::x86_avx2_pmadd_ub_sw;
|
|
|
|
else if (VecWidth == 512)
|
|
|
|
IID = Intrinsic::x86_avx512_pmaddubs_w_512;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected intrinsic");
|
2018-04-09 08:15:09 +02:00
|
|
|
} else if (Name.startswith("packsswb.")) {
|
|
|
|
if (VecWidth == 128)
|
|
|
|
IID = Intrinsic::x86_sse2_packsswb_128;
|
|
|
|
else if (VecWidth == 256)
|
|
|
|
IID = Intrinsic::x86_avx2_packsswb;
|
|
|
|
else if (VecWidth == 512)
|
|
|
|
IID = Intrinsic::x86_avx512_packsswb_512;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected intrinsic");
|
|
|
|
} else if (Name.startswith("packssdw.")) {
|
|
|
|
if (VecWidth == 128)
|
|
|
|
IID = Intrinsic::x86_sse2_packssdw_128;
|
|
|
|
else if (VecWidth == 256)
|
|
|
|
IID = Intrinsic::x86_avx2_packssdw;
|
|
|
|
else if (VecWidth == 512)
|
|
|
|
IID = Intrinsic::x86_avx512_packssdw_512;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected intrinsic");
|
|
|
|
} else if (Name.startswith("packuswb.")) {
|
|
|
|
if (VecWidth == 128)
|
|
|
|
IID = Intrinsic::x86_sse2_packuswb_128;
|
|
|
|
else if (VecWidth == 256)
|
|
|
|
IID = Intrinsic::x86_avx2_packuswb;
|
|
|
|
else if (VecWidth == 512)
|
|
|
|
IID = Intrinsic::x86_avx512_packuswb_512;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected intrinsic");
|
|
|
|
} else if (Name.startswith("packusdw.")) {
|
|
|
|
if (VecWidth == 128)
|
|
|
|
IID = Intrinsic::x86_sse41_packusdw;
|
|
|
|
else if (VecWidth == 256)
|
|
|
|
IID = Intrinsic::x86_avx2_packusdw;
|
|
|
|
else if (VecWidth == 512)
|
|
|
|
IID = Intrinsic::x86_avx512_packusdw_512;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected intrinsic");
|
|
|
|
} else if (Name.startswith("vpermilvar.")) {
|
|
|
|
if (VecWidth == 128 && EltWidth == 32)
|
|
|
|
IID = Intrinsic::x86_avx_vpermilvar_ps;
|
|
|
|
else if (VecWidth == 128 && EltWidth == 64)
|
|
|
|
IID = Intrinsic::x86_avx_vpermilvar_pd;
|
|
|
|
else if (VecWidth == 256 && EltWidth == 32)
|
|
|
|
IID = Intrinsic::x86_avx_vpermilvar_ps_256;
|
|
|
|
else if (VecWidth == 256 && EltWidth == 64)
|
|
|
|
IID = Intrinsic::x86_avx_vpermilvar_pd_256;
|
|
|
|
else if (VecWidth == 512 && EltWidth == 32)
|
|
|
|
IID = Intrinsic::x86_avx512_vpermilvar_ps_512;
|
|
|
|
else if (VecWidth == 512 && EltWidth == 64)
|
|
|
|
IID = Intrinsic::x86_avx512_vpermilvar_pd_512;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected intrinsic");
|
2018-05-12 04:34:28 +02:00
|
|
|
} else if (Name == "cvtpd2dq.256") {
|
2018-05-13 20:03:59 +02:00
|
|
|
IID = Intrinsic::x86_avx_cvt_pd2dq_256;
|
2018-05-12 04:34:28 +02:00
|
|
|
} else if (Name == "cvtpd2ps.256") {
|
2018-05-13 20:03:59 +02:00
|
|
|
IID = Intrinsic::x86_avx_cvt_pd2_ps_256;
|
2018-05-12 04:34:28 +02:00
|
|
|
} else if (Name == "cvttpd2dq.256") {
|
2018-05-13 20:03:59 +02:00
|
|
|
IID = Intrinsic::x86_avx_cvtt_pd2dq_256;
|
2018-05-12 04:34:28 +02:00
|
|
|
} else if (Name == "cvttps2dq.128") {
|
2018-05-13 20:03:59 +02:00
|
|
|
IID = Intrinsic::x86_sse2_cvttps2dq;
|
2018-05-12 04:34:28 +02:00
|
|
|
} else if (Name == "cvttps2dq.256") {
|
2018-05-13 20:03:59 +02:00
|
|
|
IID = Intrinsic::x86_avx_cvtt_ps2dq_256;
|
2018-05-21 01:34:04 +02:00
|
|
|
} else if (Name.startswith("permvar.")) {
|
|
|
|
bool IsFloat = CI.getType()->isFPOrFPVectorTy();
|
|
|
|
if (VecWidth == 256 && EltWidth == 32 && IsFloat)
|
|
|
|
IID = Intrinsic::x86_avx2_permps;
|
|
|
|
else if (VecWidth == 256 && EltWidth == 32 && !IsFloat)
|
|
|
|
IID = Intrinsic::x86_avx2_permd;
|
|
|
|
else if (VecWidth == 256 && EltWidth == 64 && IsFloat)
|
|
|
|
IID = Intrinsic::x86_avx512_permvar_df_256;
|
|
|
|
else if (VecWidth == 256 && EltWidth == 64 && !IsFloat)
|
|
|
|
IID = Intrinsic::x86_avx512_permvar_di_256;
|
|
|
|
else if (VecWidth == 512 && EltWidth == 32 && IsFloat)
|
|
|
|
IID = Intrinsic::x86_avx512_permvar_sf_512;
|
|
|
|
else if (VecWidth == 512 && EltWidth == 32 && !IsFloat)
|
|
|
|
IID = Intrinsic::x86_avx512_permvar_si_512;
|
|
|
|
else if (VecWidth == 512 && EltWidth == 64 && IsFloat)
|
|
|
|
IID = Intrinsic::x86_avx512_permvar_df_512;
|
|
|
|
else if (VecWidth == 512 && EltWidth == 64 && !IsFloat)
|
|
|
|
IID = Intrinsic::x86_avx512_permvar_di_512;
|
|
|
|
else if (VecWidth == 128 && EltWidth == 16)
|
|
|
|
IID = Intrinsic::x86_avx512_permvar_hi_128;
|
|
|
|
else if (VecWidth == 256 && EltWidth == 16)
|
|
|
|
IID = Intrinsic::x86_avx512_permvar_hi_256;
|
|
|
|
else if (VecWidth == 512 && EltWidth == 16)
|
|
|
|
IID = Intrinsic::x86_avx512_permvar_hi_512;
|
|
|
|
else if (VecWidth == 128 && EltWidth == 8)
|
|
|
|
IID = Intrinsic::x86_avx512_permvar_qi_128;
|
|
|
|
else if (VecWidth == 256 && EltWidth == 8)
|
|
|
|
IID = Intrinsic::x86_avx512_permvar_qi_256;
|
|
|
|
else if (VecWidth == 512 && EltWidth == 8)
|
|
|
|
IID = Intrinsic::x86_avx512_permvar_qi_512;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected intrinsic");
|
2018-06-11 08:18:22 +02:00
|
|
|
} else if (Name.startswith("dbpsadbw.")) {
|
|
|
|
if (VecWidth == 128)
|
|
|
|
IID = Intrinsic::x86_avx512_dbpsadbw_128;
|
|
|
|
else if (VecWidth == 256)
|
|
|
|
IID = Intrinsic::x86_avx512_dbpsadbw_256;
|
|
|
|
else if (VecWidth == 512)
|
|
|
|
IID = Intrinsic::x86_avx512_dbpsadbw_512;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected intrinsic");
|
2019-01-14 09:46:45 +01:00
|
|
|
} else if (Name.startswith("pmultishift.qb.")) {
|
|
|
|
if (VecWidth == 128)
|
|
|
|
IID = Intrinsic::x86_avx512_pmultishift_qb_128;
|
|
|
|
else if (VecWidth == 256)
|
|
|
|
IID = Intrinsic::x86_avx512_pmultishift_qb_256;
|
|
|
|
else if (VecWidth == 512)
|
|
|
|
IID = Intrinsic::x86_avx512_pmultishift_qb_512;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected intrinsic");
|
2019-01-26 07:27:01 +01:00
|
|
|
} else if (Name.startswith("conflict.")) {
|
|
|
|
if (Name[9] == 'd' && VecWidth == 128)
|
|
|
|
IID = Intrinsic::x86_avx512_conflict_d_128;
|
|
|
|
else if (Name[9] == 'd' && VecWidth == 256)
|
|
|
|
IID = Intrinsic::x86_avx512_conflict_d_256;
|
|
|
|
else if (Name[9] == 'd' && VecWidth == 512)
|
|
|
|
IID = Intrinsic::x86_avx512_conflict_d_512;
|
|
|
|
else if (Name[9] == 'q' && VecWidth == 128)
|
|
|
|
IID = Intrinsic::x86_avx512_conflict_q_128;
|
|
|
|
else if (Name[9] == 'q' && VecWidth == 256)
|
|
|
|
IID = Intrinsic::x86_avx512_conflict_q_256;
|
|
|
|
else if (Name[9] == 'q' && VecWidth == 512)
|
|
|
|
IID = Intrinsic::x86_avx512_conflict_q_512;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected intrinsic");
|
2019-04-15 19:17:35 +02:00
|
|
|
} else if (Name.startswith("pavg.")) {
|
|
|
|
if (Name[5] == 'b' && VecWidth == 128)
|
|
|
|
IID = Intrinsic::x86_sse2_pavg_b;
|
|
|
|
else if (Name[5] == 'b' && VecWidth == 256)
|
|
|
|
IID = Intrinsic::x86_avx2_pavg_b;
|
|
|
|
else if (Name[5] == 'b' && VecWidth == 512)
|
|
|
|
IID = Intrinsic::x86_avx512_pavg_b_512;
|
|
|
|
else if (Name[5] == 'w' && VecWidth == 128)
|
|
|
|
IID = Intrinsic::x86_sse2_pavg_w;
|
|
|
|
else if (Name[5] == 'w' && VecWidth == 256)
|
|
|
|
IID = Intrinsic::x86_avx2_pavg_w;
|
|
|
|
else if (Name[5] == 'w' && VecWidth == 512)
|
|
|
|
IID = Intrinsic::x86_avx512_pavg_w_512;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected intrinsic");
|
2018-04-09 08:15:09 +02:00
|
|
|
} else
|
|
|
|
return false;
|
|
|
|
|
2018-05-12 04:34:28 +02:00
|
|
|
SmallVector<Value *, 4> Args(CI.arg_operands().begin(),
|
|
|
|
CI.arg_operands().end());
|
|
|
|
Args.pop_back();
|
|
|
|
Args.pop_back();
|
2018-04-09 08:15:09 +02:00
|
|
|
Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI.getModule(), IID),
|
2018-05-12 04:34:28 +02:00
|
|
|
Args);
|
|
|
|
unsigned NumArgs = CI.getNumArgOperands();
|
|
|
|
Rep = EmitX86Select(Builder, CI.getArgOperand(NumArgs - 1), Rep,
|
|
|
|
CI.getArgOperand(NumArgs - 2));
|
2018-04-09 08:15:09 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-04-17 06:02:24 +02:00
|
|
|
/// Upgrade comment in call to inline asm that represents an objc retain release
|
|
|
|
/// marker.
|
|
|
|
void llvm::UpgradeInlineAsmString(std::string *AsmStr) {
|
2018-04-23 18:47:27 +02:00
|
|
|
size_t Pos;
|
2018-04-17 06:02:24 +02:00
|
|
|
if (AsmStr->find("mov\tfp") == 0 &&
|
|
|
|
AsmStr->find("objc_retainAutoreleaseReturnValue") != std::string::npos &&
|
|
|
|
(Pos = AsmStr->find("# marker")) != std::string::npos) {
|
|
|
|
AsmStr->replace(Pos, 1, ";");
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-06-16 00:01:28 +02:00
|
|
|
/// Upgrade a call to an old intrinsic. All argument and return casting must be
|
|
|
|
/// provided to seamlessly integrate with existing context.
|
2007-08-04 03:51:18 +02:00
|
|
|
void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
|
2012-02-03 07:10:55 +01:00
|
|
|
Function *F = CI->getCalledFunction();
|
2011-12-12 23:59:34 +01:00
|
|
|
LLVMContext &C = CI->getContext();
|
2011-12-12 05:26:04 +01:00
|
|
|
IRBuilder<> Builder(C);
|
2015-10-09 01:49:46 +02:00
|
|
|
Builder.SetInsertPoint(CI->getParent(), CI->getIterator());
|
2011-12-12 05:26:04 +01:00
|
|
|
|
2012-02-03 07:10:55 +01:00
|
|
|
assert(F && "Intrinsic call is not direct?");
|
|
|
|
|
|
|
|
if (!NewFn) {
|
|
|
|
// Get the Function's name.
|
|
|
|
StringRef Name = F->getName();
|
|
|
|
|
2016-07-04 22:56:38 +02:00
|
|
|
assert(Name.startswith("llvm.") && "Intrinsic doesn't start with 'llvm.'");
|
|
|
|
Name = Name.substr(5);
|
|
|
|
|
|
|
|
bool IsX86 = Name.startswith("x86.");
|
|
|
|
if (IsX86)
|
|
|
|
Name = Name.substr(4);
|
[NVPTX] Auto-upgrade some NVPTX intrinsics to LLVM target-generic code.
Summary:
Specifically, we upgrade llvm.nvvm.:
* brev{32,64}
* clz.{i,ll}
* popc.{i,ll}
* abs.{i,ll}
* {min,max}.{i,ll,u,ull}
* h2f
These either map directly to an existing LLVM target-generic
intrinsic or map to a simple LLVM target-generic idiom.
In all cases, we check that the code we generate is lowered to PTX as we
expect.
These builtins don't need to be backfilled in clang: They're not
accessible to user code from nvcc.
Reviewers: tra
Subscribers: majnemer, cfe-commits, llvm-commits, jholewinski
Differential Revision: https://reviews.llvm.org/D28793
llvm-svn: 292694
2017-01-21 02:00:32 +01:00
|
|
|
bool IsNVVM = Name.startswith("nvvm.");
|
|
|
|
if (IsNVVM)
|
|
|
|
Name = Name.substr(5);
|
2016-07-04 22:56:38 +02:00
|
|
|
|
2016-12-10 22:15:48 +01:00
|
|
|
if (IsX86 && Name.startswith("sse4a.movnt.")) {
|
|
|
|
Module *M = F->getParent();
|
|
|
|
SmallVector<Metadata *, 1> Elts;
|
|
|
|
Elts.push_back(
|
|
|
|
ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1)));
|
|
|
|
MDNode *Node = MDNode::get(C, Elts);
|
|
|
|
|
|
|
|
Value *Arg0 = CI->getArgOperand(0);
|
|
|
|
Value *Arg1 = CI->getArgOperand(1);
|
|
|
|
|
|
|
|
// Nontemporal (unaligned) store of the 0'th element of the float/double
|
|
|
|
// vector.
|
|
|
|
Type *SrcEltTy = cast<VectorType>(Arg1->getType())->getElementType();
|
|
|
|
PointerType *EltPtrTy = PointerType::getUnqual(SrcEltTy);
|
|
|
|
Value *Addr = Builder.CreateBitCast(Arg0, EltPtrTy, "cast");
|
|
|
|
Value *Extract =
|
|
|
|
Builder.CreateExtractElement(Arg1, (uint64_t)0, "extractelement");
|
|
|
|
|
2020-01-23 16:18:34 +01:00
|
|
|
StoreInst *SI = Builder.CreateAlignedStore(Extract, Addr, Align(1));
|
2016-12-10 22:15:48 +01:00
|
|
|
SI->setMetadata(M->getMDKindID("nontemporal"), Node);
|
|
|
|
|
|
|
|
// Remove intrinsic.
|
|
|
|
CI->eraseFromParent();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IsX86 && (Name.startswith("avx.movnt.") ||
|
|
|
|
Name.startswith("avx512.storent."))) {
|
|
|
|
Module *M = F->getParent();
|
|
|
|
SmallVector<Metadata *, 1> Elts;
|
|
|
|
Elts.push_back(
|
|
|
|
ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1)));
|
|
|
|
MDNode *Node = MDNode::get(C, Elts);
|
|
|
|
|
|
|
|
Value *Arg0 = CI->getArgOperand(0);
|
|
|
|
Value *Arg1 = CI->getArgOperand(1);
|
|
|
|
|
|
|
|
// Convert the type of the pointer to a pointer to the stored type.
|
|
|
|
Value *BC = Builder.CreateBitCast(Arg0,
|
|
|
|
PointerType::getUnqual(Arg1->getType()),
|
|
|
|
"cast");
|
|
|
|
VectorType *VTy = cast<VectorType>(Arg1->getType());
|
2020-01-23 16:18:34 +01:00
|
|
|
StoreInst *SI =
|
|
|
|
Builder.CreateAlignedStore(Arg1, BC, Align(VTy->getBitWidth() / 8));
|
2016-12-10 22:15:48 +01:00
|
|
|
SI->setMetadata(M->getMDKindID("nontemporal"), Node);
|
|
|
|
|
|
|
|
// Remove intrinsic.
|
|
|
|
CI->eraseFromParent();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IsX86 && Name == "sse2.storel.dq") {
|
|
|
|
Value *Arg0 = CI->getArgOperand(0);
|
|
|
|
Value *Arg1 = CI->getArgOperand(1);
|
|
|
|
|
|
|
|
Type *NewVecTy = VectorType::get(Type::getInt64Ty(C), 2);
|
|
|
|
Value *BC0 = Builder.CreateBitCast(Arg1, NewVecTy, "cast");
|
|
|
|
Value *Elt = Builder.CreateExtractElement(BC0, (uint64_t)0);
|
|
|
|
Value *BC = Builder.CreateBitCast(Arg0,
|
|
|
|
PointerType::getUnqual(Elt->getType()),
|
|
|
|
"cast");
|
2020-01-23 16:18:34 +01:00
|
|
|
Builder.CreateAlignedStore(Elt, BC, Align(1));
|
2016-12-10 22:15:48 +01:00
|
|
|
|
|
|
|
// Remove intrinsic.
|
|
|
|
CI->eraseFromParent();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IsX86 && (Name.startswith("sse.storeu.") ||
|
|
|
|
Name.startswith("sse2.storeu.") ||
|
|
|
|
Name.startswith("avx.storeu."))) {
|
|
|
|
Value *Arg0 = CI->getArgOperand(0);
|
|
|
|
Value *Arg1 = CI->getArgOperand(1);
|
|
|
|
|
|
|
|
Arg0 = Builder.CreateBitCast(Arg0,
|
|
|
|
PointerType::getUnqual(Arg1->getType()),
|
|
|
|
"cast");
|
2020-01-23 16:18:34 +01:00
|
|
|
Builder.CreateAlignedStore(Arg1, Arg0, Align(1));
|
2016-12-10 22:15:48 +01:00
|
|
|
|
|
|
|
// Remove intrinsic.
|
|
|
|
CI->eraseFromParent();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-05-11 06:33:18 +02:00
|
|
|
if (IsX86 && Name == "avx512.mask.store.ss") {
|
|
|
|
Value *Mask = Builder.CreateAnd(CI->getArgOperand(2), Builder.getInt8(1));
|
|
|
|
UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
|
|
|
|
Mask, false);
|
|
|
|
|
|
|
|
// Remove intrinsic.
|
|
|
|
CI->eraseFromParent();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-02-18 20:51:14 +01:00
|
|
|
if (IsX86 && (Name.startswith("avx512.mask.store"))) {
|
|
|
|
// "avx512.mask.storeu." or "avx512.mask.store."
|
|
|
|
bool Aligned = Name[17] != 'u'; // "avx512.mask.storeu".
|
2016-12-10 22:15:48 +01:00
|
|
|
UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
|
2017-02-18 20:51:14 +01:00
|
|
|
CI->getArgOperand(2), Aligned);
|
2016-12-10 22:15:48 +01:00
|
|
|
|
|
|
|
// Remove intrinsic.
|
|
|
|
CI->eraseFromParent();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-02-03 07:10:55 +01:00
|
|
|
Value *Rep;
|
2016-06-16 00:01:28 +02:00
|
|
|
// Upgrade packed integer vector compare intrinsics to compare instructions.
|
2017-02-18 20:51:14 +01:00
|
|
|
if (IsX86 && (Name.startswith("sse2.pcmp") ||
|
|
|
|
Name.startswith("avx2.pcmp"))) {
|
|
|
|
// "sse2.pcpmpeq." "sse2.pcmpgt." "avx2.pcmpeq." or "avx2.pcmpgt."
|
|
|
|
bool CmpEq = Name[9] == 'e';
|
|
|
|
Rep = Builder.CreateICmp(CmpEq ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_SGT,
|
|
|
|
CI->getArgOperand(0), CI->getArgOperand(1));
|
2012-02-03 07:10:55 +01:00
|
|
|
Rep = Builder.CreateSExt(Rep, CI->getType(), "");
|
2017-11-06 08:09:24 +01:00
|
|
|
} else if (IsX86 && (Name.startswith("avx512.broadcastm"))) {
|
|
|
|
Type *ExtTy = Type::getInt32Ty(C);
|
|
|
|
if (CI->getOperand(0)->getType()->isIntegerTy(8))
|
|
|
|
ExtTy = Type::getInt64Ty(C);
|
|
|
|
unsigned NumElts = CI->getType()->getPrimitiveSizeInBits() /
|
|
|
|
ExtTy->getPrimitiveSizeInBits();
|
|
|
|
Rep = Builder.CreateZExt(CI->getArgOperand(0), ExtTy);
|
|
|
|
Rep = Builder.CreateVectorSplat(NumElts, Rep);
|
2018-06-15 20:05:24 +02:00
|
|
|
} else if (IsX86 && (Name == "sse.sqrt.ss" ||
|
|
|
|
Name == "sse2.sqrt.sd")) {
|
|
|
|
Value *Vec = CI->getArgOperand(0);
|
|
|
|
Value *Elt0 = Builder.CreateExtractElement(Vec, (uint64_t)0);
|
|
|
|
Function *Intr = Intrinsic::getDeclaration(F->getParent(),
|
|
|
|
Intrinsic::sqrt, Elt0->getType());
|
|
|
|
Elt0 = Builder.CreateCall(Intr, Elt0);
|
|
|
|
Rep = Builder.CreateInsertElement(Vec, Elt0, (uint64_t)0);
|
|
|
|
} else if (IsX86 && (Name.startswith("avx.sqrt.p") ||
|
|
|
|
Name.startswith("sse2.sqrt.p") ||
|
|
|
|
Name.startswith("sse.sqrt.p"))) {
|
|
|
|
Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(),
|
|
|
|
Intrinsic::sqrt,
|
|
|
|
CI->getType()),
|
|
|
|
{CI->getArgOperand(0)});
|
2018-06-29 07:43:26 +02:00
|
|
|
} else if (IsX86 && (Name.startswith("avx512.mask.sqrt.p"))) {
|
|
|
|
if (CI->getNumArgOperands() == 4 &&
|
|
|
|
(!isa<ConstantInt>(CI->getArgOperand(3)) ||
|
|
|
|
cast<ConstantInt>(CI->getArgOperand(3))->getZExtValue() != 4)) {
|
|
|
|
Intrinsic::ID IID = Name[18] == 's' ? Intrinsic::x86_avx512_sqrt_ps_512
|
|
|
|
: Intrinsic::x86_avx512_sqrt_pd_512;
|
|
|
|
|
|
|
|
Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(3) };
|
|
|
|
Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(),
|
|
|
|
IID), Args);
|
|
|
|
} else {
|
2018-06-15 20:05:24 +02:00
|
|
|
Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(),
|
|
|
|
Intrinsic::sqrt,
|
|
|
|
CI->getType()),
|
|
|
|
{CI->getArgOperand(0)});
|
2018-06-29 07:43:26 +02:00
|
|
|
}
|
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
|
|
|
|
CI->getArgOperand(1));
|
2017-11-13 13:51:18 +01:00
|
|
|
} else if (IsX86 && (Name.startswith("avx512.ptestm") ||
|
|
|
|
Name.startswith("avx512.ptestnm"))) {
|
|
|
|
Value *Op0 = CI->getArgOperand(0);
|
|
|
|
Value *Op1 = CI->getArgOperand(1);
|
|
|
|
Value *Mask = CI->getArgOperand(2);
|
|
|
|
Rep = Builder.CreateAnd(Op0, Op1);
|
|
|
|
llvm::Type *Ty = Op0->getType();
|
|
|
|
Value *Zero = llvm::Constant::getNullValue(Ty);
|
|
|
|
ICmpInst::Predicate Pred =
|
|
|
|
Name.startswith("avx512.ptestm") ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ;
|
|
|
|
Rep = Builder.CreateICmp(Pred, Rep, Zero);
|
2018-06-27 17:57:53 +02:00
|
|
|
Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, Mask);
|
2017-11-13 13:51:18 +01:00
|
|
|
} else if (IsX86 && (Name.startswith("avx512.mask.pbroadcast"))){
|
2017-09-19 13:03:06 +02:00
|
|
|
unsigned NumElts =
|
|
|
|
CI->getArgOperand(1)->getType()->getVectorNumElements();
|
|
|
|
Rep = Builder.CreateVectorSplat(NumElts, CI->getArgOperand(0));
|
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
|
|
|
|
CI->getArgOperand(1));
|
2017-12-05 16:42:56 +01:00
|
|
|
} else if (IsX86 && (Name.startswith("avx512.kunpck"))) {
|
2018-01-14 20:24:10 +01:00
|
|
|
unsigned NumElts = CI->getType()->getScalarSizeInBits();
|
|
|
|
Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), NumElts);
|
|
|
|
Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), NumElts);
|
|
|
|
uint32_t Indices[64];
|
|
|
|
for (unsigned i = 0; i != NumElts; ++i)
|
|
|
|
Indices[i] = i;
|
|
|
|
|
|
|
|
// First extract half of each vector. This gives better codegen than
|
|
|
|
// doing it in a single shuffle.
|
|
|
|
LHS = Builder.CreateShuffleVector(LHS, LHS,
|
|
|
|
makeArrayRef(Indices, NumElts / 2));
|
|
|
|
RHS = Builder.CreateShuffleVector(RHS, RHS,
|
|
|
|
makeArrayRef(Indices, NumElts / 2));
|
|
|
|
// Concat the vectors.
|
2018-02-12 23:38:34 +01:00
|
|
|
// NOTE: Operands have to be swapped to match intrinsic definition.
|
|
|
|
Rep = Builder.CreateShuffleVector(RHS, LHS,
|
2018-01-14 20:24:10 +01:00
|
|
|
makeArrayRef(Indices, NumElts));
|
|
|
|
Rep = Builder.CreateBitCast(Rep, CI->getType());
|
2018-02-03 21:18:25 +01:00
|
|
|
} else if (IsX86 && Name == "avx512.kand.w") {
|
|
|
|
Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
|
|
|
|
Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
|
|
|
|
Rep = Builder.CreateAnd(LHS, RHS);
|
|
|
|
Rep = Builder.CreateBitCast(Rep, CI->getType());
|
|
|
|
} else if (IsX86 && Name == "avx512.kandn.w") {
|
|
|
|
Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
|
|
|
|
Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
|
|
|
|
LHS = Builder.CreateNot(LHS);
|
|
|
|
Rep = Builder.CreateAnd(LHS, RHS);
|
|
|
|
Rep = Builder.CreateBitCast(Rep, CI->getType());
|
|
|
|
} else if (IsX86 && Name == "avx512.kor.w") {
|
|
|
|
Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
|
|
|
|
Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
|
|
|
|
Rep = Builder.CreateOr(LHS, RHS);
|
|
|
|
Rep = Builder.CreateBitCast(Rep, CI->getType());
|
|
|
|
} else if (IsX86 && Name == "avx512.kxor.w") {
|
|
|
|
Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
|
|
|
|
Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
|
|
|
|
Rep = Builder.CreateXor(LHS, RHS);
|
|
|
|
Rep = Builder.CreateBitCast(Rep, CI->getType());
|
|
|
|
} else if (IsX86 && Name == "avx512.kxnor.w") {
|
|
|
|
Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
|
|
|
|
Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
|
|
|
|
LHS = Builder.CreateNot(LHS);
|
|
|
|
Rep = Builder.CreateXor(LHS, RHS);
|
|
|
|
Rep = Builder.CreateBitCast(Rep, CI->getType());
|
|
|
|
} else if (IsX86 && Name == "avx512.knot.w") {
|
|
|
|
Rep = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
|
|
|
|
Rep = Builder.CreateNot(Rep);
|
|
|
|
Rep = Builder.CreateBitCast(Rep, CI->getType());
|
2018-02-08 21:16:06 +01:00
|
|
|
} else if (IsX86 &&
|
|
|
|
(Name == "avx512.kortestz.w" || Name == "avx512.kortestc.w")) {
|
|
|
|
Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16);
|
|
|
|
Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16);
|
|
|
|
Rep = Builder.CreateOr(LHS, RHS);
|
|
|
|
Rep = Builder.CreateBitCast(Rep, Builder.getInt16Ty());
|
|
|
|
Value *C;
|
|
|
|
if (Name[14] == 'c')
|
|
|
|
C = ConstantInt::getAllOnesValue(Builder.getInt16Ty());
|
|
|
|
else
|
|
|
|
C = ConstantInt::getNullValue(Builder.getInt16Ty());
|
|
|
|
Rep = Builder.CreateICmpEQ(Rep, C);
|
|
|
|
Rep = Builder.CreateZExt(Rep, Builder.getInt32Ty());
|
2018-08-14 12:04:14 +02:00
|
|
|
} else if (IsX86 && (Name == "sse.add.ss" || Name == "sse2.add.sd" ||
|
|
|
|
Name == "sse.sub.ss" || Name == "sse2.sub.sd" ||
|
|
|
|
Name == "sse.mul.ss" || Name == "sse2.mul.sd" ||
|
|
|
|
Name == "sse.div.ss" || Name == "sse2.div.sd")) {
|
2016-11-16 06:24:10 +01:00
|
|
|
Type *I32Ty = Type::getInt32Ty(C);
|
|
|
|
Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0),
|
|
|
|
ConstantInt::get(I32Ty, 0));
|
|
|
|
Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1),
|
|
|
|
ConstantInt::get(I32Ty, 0));
|
2018-08-14 12:04:14 +02:00
|
|
|
Value *EltOp;
|
|
|
|
if (Name.contains(".add."))
|
|
|
|
EltOp = Builder.CreateFAdd(Elt0, Elt1);
|
|
|
|
else if (Name.contains(".sub."))
|
|
|
|
EltOp = Builder.CreateFSub(Elt0, Elt1);
|
|
|
|
else if (Name.contains(".mul."))
|
|
|
|
EltOp = Builder.CreateFMul(Elt0, Elt1);
|
|
|
|
else
|
|
|
|
EltOp = Builder.CreateFDiv(Elt0, Elt1);
|
|
|
|
Rep = Builder.CreateInsertElement(CI->getArgOperand(0), EltOp,
|
2016-11-16 06:24:10 +01:00
|
|
|
ConstantInt::get(I32Ty, 0));
|
2017-02-18 20:51:14 +01:00
|
|
|
} else if (IsX86 && Name.startswith("avx512.mask.pcmp")) {
|
|
|
|
// "avx512.mask.pcmpeq." or "avx512.mask.pcmpgt."
|
|
|
|
bool CmpEq = Name[16] == 'e';
|
2017-06-22 22:11:01 +02:00
|
|
|
Rep = upgradeMaskedCompare(Builder, *CI, CmpEq ? 0 : 6, true);
|
2019-01-14 01:03:50 +01:00
|
|
|
} else if (IsX86 && Name.startswith("avx512.mask.vpshufbitqmb.")) {
|
|
|
|
Type *OpTy = CI->getArgOperand(0)->getType();
|
|
|
|
unsigned VecWidth = OpTy->getPrimitiveSizeInBits();
|
|
|
|
Intrinsic::ID IID;
|
|
|
|
switch (VecWidth) {
|
|
|
|
default: llvm_unreachable("Unexpected intrinsic");
|
|
|
|
case 128: IID = Intrinsic::x86_avx512_vpshufbitqmb_128; break;
|
|
|
|
case 256: IID = Intrinsic::x86_avx512_vpshufbitqmb_256; break;
|
|
|
|
case 512: IID = Intrinsic::x86_avx512_vpshufbitqmb_512; break;
|
|
|
|
}
|
|
|
|
|
|
|
|
Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
|
|
|
|
{ CI->getOperand(0), CI->getArgOperand(1) });
|
|
|
|
Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2));
|
2018-06-27 17:57:53 +02:00
|
|
|
} else if (IsX86 && Name.startswith("avx512.mask.fpclass.p")) {
|
|
|
|
Type *OpTy = CI->getArgOperand(0)->getType();
|
|
|
|
unsigned VecWidth = OpTy->getPrimitiveSizeInBits();
|
|
|
|
unsigned EltWidth = OpTy->getScalarSizeInBits();
|
|
|
|
Intrinsic::ID IID;
|
|
|
|
if (VecWidth == 128 && EltWidth == 32)
|
|
|
|
IID = Intrinsic::x86_avx512_fpclass_ps_128;
|
|
|
|
else if (VecWidth == 256 && EltWidth == 32)
|
|
|
|
IID = Intrinsic::x86_avx512_fpclass_ps_256;
|
|
|
|
else if (VecWidth == 512 && EltWidth == 32)
|
|
|
|
IID = Intrinsic::x86_avx512_fpclass_ps_512;
|
|
|
|
else if (VecWidth == 128 && EltWidth == 64)
|
|
|
|
IID = Intrinsic::x86_avx512_fpclass_pd_128;
|
|
|
|
else if (VecWidth == 256 && EltWidth == 64)
|
|
|
|
IID = Intrinsic::x86_avx512_fpclass_pd_256;
|
|
|
|
else if (VecWidth == 512 && EltWidth == 64)
|
|
|
|
IID = Intrinsic::x86_avx512_fpclass_pd_512;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected intrinsic");
|
|
|
|
|
|
|
|
Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
|
|
|
|
{ CI->getOperand(0), CI->getArgOperand(1) });
|
|
|
|
Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2));
|
|
|
|
} else if (IsX86 && Name.startswith("avx512.mask.cmp.p")) {
|
|
|
|
Type *OpTy = CI->getArgOperand(0)->getType();
|
|
|
|
unsigned VecWidth = OpTy->getPrimitiveSizeInBits();
|
|
|
|
unsigned EltWidth = OpTy->getScalarSizeInBits();
|
|
|
|
Intrinsic::ID IID;
|
|
|
|
if (VecWidth == 128 && EltWidth == 32)
|
|
|
|
IID = Intrinsic::x86_avx512_cmp_ps_128;
|
|
|
|
else if (VecWidth == 256 && EltWidth == 32)
|
|
|
|
IID = Intrinsic::x86_avx512_cmp_ps_256;
|
|
|
|
else if (VecWidth == 512 && EltWidth == 32)
|
|
|
|
IID = Intrinsic::x86_avx512_cmp_ps_512;
|
|
|
|
else if (VecWidth == 128 && EltWidth == 64)
|
|
|
|
IID = Intrinsic::x86_avx512_cmp_pd_128;
|
|
|
|
else if (VecWidth == 256 && EltWidth == 64)
|
|
|
|
IID = Intrinsic::x86_avx512_cmp_pd_256;
|
|
|
|
else if (VecWidth == 512 && EltWidth == 64)
|
|
|
|
IID = Intrinsic::x86_avx512_cmp_pd_512;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected intrinsic");
|
|
|
|
|
|
|
|
SmallVector<Value *, 4> Args;
|
|
|
|
Args.push_back(CI->getArgOperand(0));
|
|
|
|
Args.push_back(CI->getArgOperand(1));
|
|
|
|
Args.push_back(CI->getArgOperand(2));
|
|
|
|
if (CI->getNumArgOperands() == 5)
|
|
|
|
Args.push_back(CI->getArgOperand(4));
|
|
|
|
|
|
|
|
Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
|
|
|
|
Args);
|
|
|
|
Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(3));
|
|
|
|
} else if (IsX86 && Name.startswith("avx512.mask.cmp.") &&
|
|
|
|
Name[16] != 'p') {
|
|
|
|
// Integer compare intrinsics.
|
2017-06-22 22:11:01 +02:00
|
|
|
unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
|
|
|
|
Rep = upgradeMaskedCompare(Builder, *CI, Imm, true);
|
2018-06-27 17:57:53 +02:00
|
|
|
} else if (IsX86 && Name.startswith("avx512.mask.ucmp.")) {
|
2017-06-22 22:11:01 +02:00
|
|
|
unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
|
|
|
|
Rep = upgradeMaskedCompare(Builder, *CI, Imm, false);
|
2018-01-09 01:50:47 +01:00
|
|
|
} else if (IsX86 && (Name.startswith("avx512.cvtb2mask.") ||
|
|
|
|
Name.startswith("avx512.cvtw2mask.") ||
|
|
|
|
Name.startswith("avx512.cvtd2mask.") ||
|
|
|
|
Name.startswith("avx512.cvtq2mask."))) {
|
|
|
|
Value *Op = CI->getArgOperand(0);
|
|
|
|
Value *Zero = llvm::Constant::getNullValue(Op->getType());
|
|
|
|
Rep = Builder.CreateICmp(ICmpInst::ICMP_SLT, Op, Zero);
|
2018-06-27 17:57:53 +02:00
|
|
|
Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, nullptr);
|
2017-09-13 11:02:36 +02:00
|
|
|
} else if(IsX86 && (Name == "ssse3.pabs.b.128" ||
|
|
|
|
Name == "ssse3.pabs.w.128" ||
|
|
|
|
Name == "ssse3.pabs.d.128" ||
|
|
|
|
Name.startswith("avx2.pabs") ||
|
|
|
|
Name.startswith("avx512.mask.pabs"))) {
|
|
|
|
Rep = upgradeAbs(Builder, *CI);
|
2016-07-04 22:56:38 +02:00
|
|
|
} else if (IsX86 && (Name == "sse41.pmaxsb" ||
|
|
|
|
Name == "sse2.pmaxs.w" ||
|
|
|
|
Name == "sse41.pmaxsd" ||
|
2016-10-24 06:04:16 +02:00
|
|
|
Name.startswith("avx2.pmaxs") ||
|
|
|
|
Name.startswith("avx512.mask.pmaxs"))) {
|
2016-06-16 17:48:30 +02:00
|
|
|
Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_SGT);
|
2016-07-04 22:56:38 +02:00
|
|
|
} else if (IsX86 && (Name == "sse2.pmaxu.b" ||
|
|
|
|
Name == "sse41.pmaxuw" ||
|
|
|
|
Name == "sse41.pmaxud" ||
|
2016-10-24 06:04:16 +02:00
|
|
|
Name.startswith("avx2.pmaxu") ||
|
|
|
|
Name.startswith("avx512.mask.pmaxu"))) {
|
2016-06-16 17:48:30 +02:00
|
|
|
Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_UGT);
|
2016-07-04 22:56:38 +02:00
|
|
|
} else if (IsX86 && (Name == "sse41.pminsb" ||
|
|
|
|
Name == "sse2.pmins.w" ||
|
|
|
|
Name == "sse41.pminsd" ||
|
2016-10-24 06:04:16 +02:00
|
|
|
Name.startswith("avx2.pmins") ||
|
|
|
|
Name.startswith("avx512.mask.pmins"))) {
|
2016-06-16 17:48:30 +02:00
|
|
|
Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_SLT);
|
2016-07-04 22:56:38 +02:00
|
|
|
} else if (IsX86 && (Name == "sse2.pminu.b" ||
|
|
|
|
Name == "sse41.pminuw" ||
|
|
|
|
Name == "sse41.pminud" ||
|
2016-10-24 06:04:16 +02:00
|
|
|
Name.startswith("avx2.pminu") ||
|
|
|
|
Name.startswith("avx512.mask.pminu"))) {
|
2016-06-16 17:48:30 +02:00
|
|
|
Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_ULT);
|
2018-04-13 08:07:18 +02:00
|
|
|
} else if (IsX86 && (Name == "sse2.pmulu.dq" ||
|
|
|
|
Name == "avx2.pmulu.dq" ||
|
|
|
|
Name == "avx512.pmulu.dq.512" ||
|
|
|
|
Name.startswith("avx512.mask.pmulu.dq."))) {
|
|
|
|
Rep = upgradePMULDQ(Builder, *CI, /*Signed*/false);
|
|
|
|
} else if (IsX86 && (Name == "sse41.pmuldq" ||
|
|
|
|
Name == "avx2.pmul.dq" ||
|
|
|
|
Name == "avx512.pmul.dq.512" ||
|
|
|
|
Name.startswith("avx512.mask.pmul.dq."))) {
|
|
|
|
Rep = upgradePMULDQ(Builder, *CI, /*Signed*/true);
|
2018-05-13 01:14:39 +02:00
|
|
|
} else if (IsX86 && (Name == "sse.cvtsi2ss" ||
|
|
|
|
Name == "sse2.cvtsi2sd" ||
|
|
|
|
Name == "sse.cvtsi642ss" ||
|
|
|
|
Name == "sse2.cvtsi642sd")) {
|
|
|
|
Rep = Builder.CreateSIToFP(CI->getArgOperand(1),
|
|
|
|
CI->getType()->getVectorElementType());
|
2018-05-14 02:06:49 +02:00
|
|
|
Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0);
|
|
|
|
} else if (IsX86 && Name == "avx512.cvtusi2sd") {
|
|
|
|
Rep = Builder.CreateUIToFP(CI->getArgOperand(1),
|
|
|
|
CI->getType()->getVectorElementType());
|
2018-05-13 01:14:39 +02:00
|
|
|
Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0);
|
2018-05-13 02:29:40 +02:00
|
|
|
} else if (IsX86 && Name == "sse2.cvtss2sd") {
|
|
|
|
Rep = Builder.CreateExtractElement(CI->getArgOperand(1), (uint64_t)0);
|
|
|
|
Rep = Builder.CreateFPExt(Rep, CI->getType()->getVectorElementType());
|
|
|
|
Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0);
|
2016-07-04 22:56:38 +02:00
|
|
|
} else if (IsX86 && (Name == "sse2.cvtdq2pd" ||
|
2018-05-22 01:15:00 +02:00
|
|
|
Name == "sse2.cvtdq2ps" ||
|
2016-07-04 22:56:38 +02:00
|
|
|
Name == "avx.cvtdq2.pd.256" ||
|
2018-05-22 01:15:00 +02:00
|
|
|
Name == "avx.cvtdq2.ps.256" ||
|
|
|
|
Name.startswith("avx512.mask.cvtdq2pd.") ||
|
|
|
|
Name.startswith("avx512.mask.cvtudq2pd.") ||
|
2019-01-26 03:41:54 +01:00
|
|
|
Name.startswith("avx512.mask.cvtdq2ps.") ||
|
|
|
|
Name.startswith("avx512.mask.cvtudq2ps.") ||
|
|
|
|
Name.startswith("avx512.mask.cvtqq2pd.") ||
|
|
|
|
Name.startswith("avx512.mask.cvtuqq2pd.") ||
|
|
|
|
Name == "avx512.mask.cvtqq2ps.256" ||
|
|
|
|
Name == "avx512.mask.cvtqq2ps.512" ||
|
|
|
|
Name == "avx512.mask.cvtuqq2ps.256" ||
|
|
|
|
Name == "avx512.mask.cvtuqq2ps.512" ||
|
2018-05-22 01:15:00 +02:00
|
|
|
Name == "sse2.cvtps2pd" ||
|
2016-11-16 15:48:32 +01:00
|
|
|
Name == "avx.cvt.ps2.pd.256" ||
|
2018-05-12 04:34:28 +02:00
|
|
|
Name == "avx512.mask.cvtps2pd.128" ||
|
2018-05-22 01:15:00 +02:00
|
|
|
Name == "avx512.mask.cvtps2pd.256")) {
|
|
|
|
Type *DstTy = CI->getType();
|
2016-05-25 10:59:18 +02:00
|
|
|
Rep = CI->getArgOperand(0);
|
2019-01-26 03:41:54 +01:00
|
|
|
Type *SrcTy = Rep->getType();
|
2016-05-25 10:59:18 +02:00
|
|
|
|
2018-05-22 01:15:00 +02:00
|
|
|
unsigned NumDstElts = DstTy->getVectorNumElements();
|
2019-01-26 03:41:54 +01:00
|
|
|
if (NumDstElts < SrcTy->getVectorNumElements()) {
|
2016-05-25 10:59:18 +02:00
|
|
|
assert(NumDstElts == 2 && "Unexpected vector size");
|
2016-06-12 02:41:19 +02:00
|
|
|
uint32_t ShuffleMask[2] = { 0, 1 };
|
2018-05-22 01:15:00 +02:00
|
|
|
Rep = Builder.CreateShuffleVector(Rep, Rep, ShuffleMask);
|
2016-05-25 10:59:18 +02:00
|
|
|
}
|
|
|
|
|
2019-01-26 03:41:54 +01:00
|
|
|
bool IsPS2PD = SrcTy->getVectorElementType()->isFloatTy();
|
2018-05-22 01:15:00 +02:00
|
|
|
bool IsUnsigned = (StringRef::npos != Name.find("cvtu"));
|
|
|
|
if (IsPS2PD)
|
2016-05-25 10:59:18 +02:00
|
|
|
Rep = Builder.CreateFPExt(Rep, DstTy, "cvtps2pd");
|
2019-01-26 03:41:54 +01:00
|
|
|
else if (CI->getNumArgOperands() == 4 &&
|
|
|
|
(!isa<ConstantInt>(CI->getArgOperand(3)) ||
|
|
|
|
cast<ConstantInt>(CI->getArgOperand(3))->getZExtValue() != 4)) {
|
|
|
|
Intrinsic::ID IID = IsUnsigned ? Intrinsic::x86_avx512_uitofp_round
|
|
|
|
: Intrinsic::x86_avx512_sitofp_round;
|
|
|
|
Function *F = Intrinsic::getDeclaration(CI->getModule(), IID,
|
|
|
|
{ DstTy, SrcTy });
|
|
|
|
Rep = Builder.CreateCall(F, { Rep, CI->getArgOperand(3) });
|
|
|
|
} else {
|
|
|
|
Rep = IsUnsigned ? Builder.CreateUIToFP(Rep, DstTy, "cvt")
|
|
|
|
: Builder.CreateSIToFP(Rep, DstTy, "cvt");
|
|
|
|
}
|
2016-11-16 15:48:32 +01:00
|
|
|
|
2019-01-26 03:41:54 +01:00
|
|
|
if (CI->getNumArgOperands() >= 3)
|
2016-11-16 15:48:32 +01:00
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
|
|
|
|
CI->getArgOperand(1));
|
2016-09-04 01:55:13 +02:00
|
|
|
} else if (IsX86 && (Name.startswith("avx512.mask.loadu."))) {
|
2016-07-12 03:42:33 +02:00
|
|
|
Rep = UpgradeMaskedLoad(Builder, CI->getArgOperand(0),
|
2016-06-02 06:19:36 +02:00
|
|
|
CI->getArgOperand(1), CI->getArgOperand(2),
|
|
|
|
/*Aligned*/false);
|
2016-09-04 01:55:13 +02:00
|
|
|
} else if (IsX86 && (Name.startswith("avx512.mask.load."))) {
|
2016-07-12 03:42:33 +02:00
|
|
|
Rep = UpgradeMaskedLoad(Builder, CI->getArgOperand(0),
|
2016-06-02 06:19:36 +02:00
|
|
|
CI->getArgOperand(1),CI->getArgOperand(2),
|
|
|
|
/*Aligned*/true);
|
2018-06-11 03:25:22 +02:00
|
|
|
} else if (IsX86 && Name.startswith("avx512.mask.expand.load.")) {
|
|
|
|
Type *ResultTy = CI->getType();
|
|
|
|
Type *PtrTy = ResultTy->getVectorElementType();
|
|
|
|
|
|
|
|
// Cast the pointer to element type.
|
|
|
|
Value *Ptr = Builder.CreateBitCast(CI->getOperand(0),
|
|
|
|
llvm::PointerType::getUnqual(PtrTy));
|
|
|
|
|
|
|
|
Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2),
|
|
|
|
ResultTy->getVectorNumElements());
|
|
|
|
|
|
|
|
Function *ELd = Intrinsic::getDeclaration(F->getParent(),
|
|
|
|
Intrinsic::masked_expandload,
|
|
|
|
ResultTy);
|
|
|
|
Rep = Builder.CreateCall(ELd, { Ptr, MaskVec, CI->getOperand(1) });
|
|
|
|
} else if (IsX86 && Name.startswith("avx512.mask.compress.store.")) {
|
|
|
|
Type *ResultTy = CI->getArgOperand(1)->getType();
|
|
|
|
Type *PtrTy = ResultTy->getVectorElementType();
|
|
|
|
|
|
|
|
// Cast the pointer to element type.
|
|
|
|
Value *Ptr = Builder.CreateBitCast(CI->getOperand(0),
|
|
|
|
llvm::PointerType::getUnqual(PtrTy));
|
|
|
|
|
|
|
|
Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2),
|
|
|
|
ResultTy->getVectorNumElements());
|
|
|
|
|
|
|
|
Function *CSt = Intrinsic::getDeclaration(F->getParent(),
|
|
|
|
Intrinsic::masked_compressstore,
|
|
|
|
ResultTy);
|
|
|
|
Rep = Builder.CreateCall(CSt, { CI->getArgOperand(1), Ptr, MaskVec });
|
2019-01-28 08:03:03 +01:00
|
|
|
} else if (IsX86 && (Name.startswith("avx512.mask.compress.") ||
|
|
|
|
Name.startswith("avx512.mask.expand."))) {
|
|
|
|
Type *ResultTy = CI->getType();
|
|
|
|
|
|
|
|
Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2),
|
|
|
|
ResultTy->getVectorNumElements());
|
|
|
|
|
|
|
|
bool IsCompress = Name[12] == 'c';
|
|
|
|
Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress
|
|
|
|
: Intrinsic::x86_avx512_mask_expand;
|
|
|
|
Function *Intr = Intrinsic::getDeclaration(F->getParent(), IID, ResultTy);
|
|
|
|
Rep = Builder.CreateCall(Intr, { CI->getOperand(0), CI->getOperand(1),
|
|
|
|
MaskVec });
|
2016-07-04 22:56:38 +02:00
|
|
|
} else if (IsX86 && Name.startswith("xop.vpcom")) {
|
2019-01-20 18:36:22 +01:00
|
|
|
bool IsSigned;
|
|
|
|
if (Name.endswith("ub") || Name.endswith("uw") || Name.endswith("ud") ||
|
|
|
|
Name.endswith("uq"))
|
|
|
|
IsSigned = false;
|
|
|
|
else if (Name.endswith("b") || Name.endswith("w") || Name.endswith("d") ||
|
|
|
|
Name.endswith("q"))
|
|
|
|
IsSigned = true;
|
2012-06-09 18:46:13 +02:00
|
|
|
else
|
|
|
|
llvm_unreachable("Unknown suffix");
|
|
|
|
|
|
|
|
unsigned Imm;
|
2019-01-20 20:27:40 +01:00
|
|
|
if (CI->getNumArgOperands() == 3) {
|
|
|
|
Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
|
|
|
|
} else {
|
|
|
|
Name = Name.substr(9); // strip off "xop.vpcom"
|
|
|
|
if (Name.startswith("lt"))
|
|
|
|
Imm = 0;
|
|
|
|
else if (Name.startswith("le"))
|
|
|
|
Imm = 1;
|
|
|
|
else if (Name.startswith("gt"))
|
|
|
|
Imm = 2;
|
|
|
|
else if (Name.startswith("ge"))
|
|
|
|
Imm = 3;
|
|
|
|
else if (Name.startswith("eq"))
|
|
|
|
Imm = 4;
|
|
|
|
else if (Name.startswith("ne"))
|
|
|
|
Imm = 5;
|
|
|
|
else if (Name.startswith("false"))
|
|
|
|
Imm = 6;
|
|
|
|
else if (Name.startswith("true"))
|
|
|
|
Imm = 7;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unknown condition");
|
|
|
|
}
|
|
|
|
|
2019-01-20 18:36:22 +01:00
|
|
|
Rep = upgradeX86vpcom(Builder, *CI, Imm, IsSigned);
|
2017-02-18 22:50:58 +01:00
|
|
|
} else if (IsX86 && Name.startswith("xop.vpcmov")) {
|
2015-11-03 21:27:01 +01:00
|
|
|
Value *Sel = CI->getArgOperand(2);
|
2017-02-18 20:51:19 +01:00
|
|
|
Value *NotSel = Builder.CreateNot(Sel);
|
|
|
|
Value *Sel0 = Builder.CreateAnd(CI->getArgOperand(0), Sel);
|
|
|
|
Value *Sel1 = Builder.CreateAnd(CI->getArgOperand(1), NotSel);
|
2015-11-03 21:27:01 +01:00
|
|
|
Rep = Builder.CreateOr(Sel0, Sel1);
|
2018-12-20 20:01:07 +01:00
|
|
|
} else if (IsX86 && (Name.startswith("xop.vprot") ||
|
|
|
|
Name.startswith("avx512.prol") ||
|
|
|
|
Name.startswith("avx512.mask.prol"))) {
|
|
|
|
Rep = upgradeX86Rotate(Builder, *CI, false);
|
|
|
|
} else if (IsX86 && (Name.startswith("avx512.pror") ||
|
|
|
|
Name.startswith("avx512.mask.pror"))) {
|
|
|
|
Rep = upgradeX86Rotate(Builder, *CI, true);
|
2019-01-07 22:00:32 +01:00
|
|
|
} else if (IsX86 && (Name.startswith("avx512.vpshld.") ||
|
|
|
|
Name.startswith("avx512.mask.vpshld") ||
|
|
|
|
Name.startswith("avx512.maskz.vpshld"))) {
|
|
|
|
bool ZeroMask = Name[11] == 'z';
|
|
|
|
Rep = upgradeX86ConcatShift(Builder, *CI, false, ZeroMask);
|
|
|
|
} else if (IsX86 && (Name.startswith("avx512.vpshrd.") ||
|
|
|
|
Name.startswith("avx512.mask.vpshrd") ||
|
|
|
|
Name.startswith("avx512.maskz.vpshrd"))) {
|
|
|
|
bool ZeroMask = Name[11] == 'z';
|
|
|
|
Rep = upgradeX86ConcatShift(Builder, *CI, true, ZeroMask);
|
2016-07-04 22:56:38 +02:00
|
|
|
} else if (IsX86 && Name == "sse42.crc32.64.8") {
|
2013-10-15 07:20:47 +02:00
|
|
|
Function *CRC32 = Intrinsic::getDeclaration(F->getParent(),
|
|
|
|
Intrinsic::x86_sse42_crc32_32_8);
|
|
|
|
Value *Trunc0 = Builder.CreateTrunc(CI->getArgOperand(0), Type::getInt32Ty(C));
|
2015-05-19 00:13:54 +02:00
|
|
|
Rep = Builder.CreateCall(CRC32, {Trunc0, CI->getArgOperand(1)});
|
2013-10-15 07:20:47 +02:00
|
|
|
Rep = Builder.CreateZExt(Rep, CI->getType(), "");
|
2018-05-14 20:21:22 +02:00
|
|
|
} else if (IsX86 && (Name.startswith("avx.vbroadcast.s") ||
|
|
|
|
Name.startswith("avx512.vbroadcast.s"))) {
|
2014-05-30 01:35:33 +02:00
|
|
|
// Replace broadcasts with a series of insertelements.
|
|
|
|
Type *VecTy = CI->getType();
|
|
|
|
Type *EltTy = VecTy->getVectorElementType();
|
|
|
|
unsigned EltNum = VecTy->getVectorNumElements();
|
|
|
|
Value *Cast = Builder.CreateBitCast(CI->getArgOperand(0),
|
|
|
|
EltTy->getPointerTo());
|
2015-05-20 23:46:30 +02:00
|
|
|
Value *Load = Builder.CreateLoad(EltTy, Cast);
|
2014-05-30 01:35:33 +02:00
|
|
|
Type *I32Ty = Type::getInt32Ty(C);
|
|
|
|
Rep = UndefValue::get(VecTy);
|
|
|
|
for (unsigned I = 0; I < EltNum; ++I)
|
|
|
|
Rep = Builder.CreateInsertElement(Rep, Load,
|
|
|
|
ConstantInt::get(I32Ty, I));
|
2016-07-04 22:56:38 +02:00
|
|
|
} else if (IsX86 && (Name.startswith("sse41.pmovsx") ||
|
|
|
|
Name.startswith("sse41.pmovzx") ||
|
|
|
|
Name.startswith("avx2.pmovsx") ||
|
2016-11-07 03:12:57 +01:00
|
|
|
Name.startswith("avx2.pmovzx") ||
|
|
|
|
Name.startswith("avx512.mask.pmovsx") ||
|
|
|
|
Name.startswith("avx512.mask.pmovzx"))) {
|
2015-09-23 10:48:33 +02:00
|
|
|
VectorType *SrcTy = cast<VectorType>(CI->getArgOperand(0)->getType());
|
|
|
|
VectorType *DstTy = cast<VectorType>(CI->getType());
|
|
|
|
unsigned NumDstElts = DstTy->getNumElements();
|
|
|
|
|
2016-05-28 20:03:41 +02:00
|
|
|
// Extract a subvector of the first NumDstElts lanes and sign/zero extend.
|
2016-06-12 06:48:00 +02:00
|
|
|
SmallVector<uint32_t, 8> ShuffleMask(NumDstElts);
|
2016-06-12 02:41:19 +02:00
|
|
|
for (unsigned i = 0; i != NumDstElts; ++i)
|
2016-06-12 06:48:00 +02:00
|
|
|
ShuffleMask[i] = i;
|
2015-09-23 10:48:33 +02:00
|
|
|
|
|
|
|
Value *SV = Builder.CreateShuffleVector(
|
|
|
|
CI->getArgOperand(0), UndefValue::get(SrcTy), ShuffleMask);
|
2016-05-28 20:03:41 +02:00
|
|
|
|
|
|
|
bool DoSext = (StringRef::npos != Name.find("pmovsx"));
|
|
|
|
Rep = DoSext ? Builder.CreateSExt(SV, DstTy)
|
|
|
|
: Builder.CreateZExt(SV, DstTy);
|
2016-11-07 03:12:57 +01:00
|
|
|
// If there are 3 arguments, it's a masked intrinsic so we need a select.
|
|
|
|
if (CI->getNumArgOperands() == 3)
|
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
|
|
|
|
CI->getArgOperand(1));
|
2019-01-21 09:16:59 +01:00
|
|
|
} else if (Name == "avx512.mask.pmov.qd.256" ||
|
|
|
|
Name == "avx512.mask.pmov.qd.512" ||
|
|
|
|
Name == "avx512.mask.pmov.wb.256" ||
|
|
|
|
Name == "avx512.mask.pmov.wb.512") {
|
|
|
|
Type *Ty = CI->getArgOperand(1)->getType();
|
|
|
|
Rep = Builder.CreateTrunc(CI->getArgOperand(0), Ty);
|
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
|
|
|
|
CI->getArgOperand(1));
|
2016-07-22 15:58:44 +02:00
|
|
|
} else if (IsX86 && (Name.startswith("avx.vbroadcastf128") ||
|
|
|
|
Name == "avx2.vbroadcasti128")) {
|
|
|
|
// Replace vbroadcastf128/vbroadcasti128 with a vector load+shuffle.
|
|
|
|
Type *EltTy = CI->getType()->getVectorElementType();
|
|
|
|
unsigned NumSrcElts = 128 / EltTy->getPrimitiveSizeInBits();
|
|
|
|
Type *VT = VectorType::get(EltTy, NumSrcElts);
|
2015-05-20 23:46:30 +02:00
|
|
|
Value *Op = Builder.CreatePointerCast(CI->getArgOperand(0),
|
|
|
|
PointerType::getUnqual(VT));
|
2020-01-23 11:33:12 +01:00
|
|
|
Value *Load = Builder.CreateAlignedLoad(VT, Op, Align(1));
|
2016-07-22 15:58:44 +02:00
|
|
|
if (NumSrcElts == 2)
|
|
|
|
Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()),
|
|
|
|
{ 0, 1, 0, 1 });
|
|
|
|
else
|
|
|
|
Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()),
|
|
|
|
{ 0, 1, 2, 3, 0, 1, 2, 3 });
|
2017-11-13 10:16:39 +01:00
|
|
|
} else if (IsX86 && (Name.startswith("avx512.mask.shuf.i") ||
|
|
|
|
Name.startswith("avx512.mask.shuf.f"))) {
|
|
|
|
unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
|
|
|
|
Type *VT = CI->getType();
|
|
|
|
unsigned NumLanes = VT->getPrimitiveSizeInBits() / 128;
|
|
|
|
unsigned NumElementsInLane = 128 / VT->getScalarSizeInBits();
|
|
|
|
unsigned ControlBitsMask = NumLanes - 1;
|
|
|
|
unsigned NumControlBits = NumLanes / 2;
|
|
|
|
SmallVector<uint32_t, 8> ShuffleMask(0);
|
|
|
|
|
|
|
|
for (unsigned l = 0; l != NumLanes; ++l) {
|
|
|
|
unsigned LaneMask = (Imm >> (l * NumControlBits)) & ControlBitsMask;
|
|
|
|
// We actually need the other source.
|
|
|
|
if (l >= NumLanes / 2)
|
|
|
|
LaneMask += NumLanes;
|
|
|
|
for (unsigned i = 0; i != NumElementsInLane; ++i)
|
|
|
|
ShuffleMask.push_back(LaneMask * NumElementsInLane + i);
|
|
|
|
}
|
|
|
|
Rep = Builder.CreateShuffleVector(CI->getArgOperand(0),
|
|
|
|
CI->getArgOperand(1), ShuffleMask);
|
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep,
|
|
|
|
CI->getArgOperand(3));
|
|
|
|
}else if (IsX86 && (Name.startswith("avx512.mask.broadcastf") ||
|
2017-08-11 18:22:45 +02:00
|
|
|
Name.startswith("avx512.mask.broadcasti"))) {
|
|
|
|
unsigned NumSrcElts =
|
|
|
|
CI->getArgOperand(0)->getType()->getVectorNumElements();
|
|
|
|
unsigned NumDstElts = CI->getType()->getVectorNumElements();
|
|
|
|
|
|
|
|
SmallVector<uint32_t, 8> ShuffleMask(NumDstElts);
|
|
|
|
for (unsigned i = 0; i != NumDstElts; ++i)
|
|
|
|
ShuffleMask[i] = i % NumSrcElts;
|
|
|
|
|
|
|
|
Rep = Builder.CreateShuffleVector(CI->getArgOperand(0),
|
|
|
|
CI->getArgOperand(0),
|
|
|
|
ShuffleMask);
|
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
|
|
|
|
CI->getArgOperand(1));
|
2016-07-04 22:56:38 +02:00
|
|
|
} else if (IsX86 && (Name.startswith("avx2.pbroadcast") ||
|
2016-07-05 15:58:47 +02:00
|
|
|
Name.startswith("avx2.vbroadcast") ||
|
|
|
|
Name.startswith("avx512.pbroadcast") ||
|
|
|
|
Name.startswith("avx512.mask.broadcast.s"))) {
|
2015-08-20 22:36:19 +02:00
|
|
|
// Replace vp?broadcasts with a vector shuffle.
|
|
|
|
Value *Op = CI->getArgOperand(0);
|
|
|
|
unsigned NumElts = CI->getType()->getVectorNumElements();
|
|
|
|
Type *MaskTy = VectorType::get(Type::getInt32Ty(C), NumElts);
|
|
|
|
Rep = Builder.CreateShuffleVector(Op, UndefValue::get(Op->getType()),
|
|
|
|
Constant::getNullValue(MaskTy));
|
2018-04-26 23:46:01 +02:00
|
|
|
|
2016-07-05 15:58:47 +02:00
|
|
|
if (CI->getNumArgOperands() == 3)
|
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
|
|
|
|
CI->getArgOperand(1));
|
2018-12-21 10:04:14 +01:00
|
|
|
} else if (IsX86 && (Name.startswith("sse2.padds.") ||
|
|
|
|
Name.startswith("sse2.psubs.") ||
|
|
|
|
Name.startswith("avx2.padds.") ||
|
|
|
|
Name.startswith("avx2.psubs.") ||
|
|
|
|
Name.startswith("avx512.padds.") ||
|
|
|
|
Name.startswith("avx512.psubs.") ||
|
|
|
|
Name.startswith("avx512.mask.padds.") ||
|
|
|
|
Name.startswith("avx512.mask.psubs."))) {
|
|
|
|
bool IsAdd = Name.contains(".padds");
|
|
|
|
Rep = UpgradeX86AddSubSatIntrinsics(Builder, *CI, true, IsAdd);
|
2018-08-16 08:20:22 +02:00
|
|
|
} else if (IsX86 && (Name.startswith("sse2.paddus.") ||
|
|
|
|
Name.startswith("sse2.psubus.") ||
|
|
|
|
Name.startswith("avx2.paddus.") ||
|
|
|
|
Name.startswith("avx2.psubus.") ||
|
|
|
|
Name.startswith("avx512.mask.paddus.") ||
|
|
|
|
Name.startswith("avx512.mask.psubus."))) {
|
2018-08-14 12:04:14 +02:00
|
|
|
bool IsAdd = Name.contains(".paddus");
|
2018-12-21 10:04:14 +01:00
|
|
|
Rep = UpgradeX86AddSubSatIntrinsics(Builder, *CI, false, IsAdd);
|
2018-08-14 12:04:14 +02:00
|
|
|
} else if (IsX86 && Name.startswith("avx512.mask.palignr.")) {
|
2016-11-23 07:54:55 +01:00
|
|
|
Rep = UpgradeX86ALIGNIntrinsics(Builder, CI->getArgOperand(0),
|
|
|
|
CI->getArgOperand(1),
|
|
|
|
CI->getArgOperand(2),
|
|
|
|
CI->getArgOperand(3),
|
|
|
|
CI->getArgOperand(4),
|
|
|
|
false);
|
|
|
|
} else if (IsX86 && Name.startswith("avx512.mask.valign.")) {
|
|
|
|
Rep = UpgradeX86ALIGNIntrinsics(Builder, CI->getArgOperand(0),
|
|
|
|
CI->getArgOperand(1),
|
|
|
|
CI->getArgOperand(2),
|
|
|
|
CI->getArgOperand(3),
|
|
|
|
CI->getArgOperand(4),
|
|
|
|
true);
|
2016-07-04 22:56:38 +02:00
|
|
|
} else if (IsX86 && (Name == "sse2.psll.dq" ||
|
|
|
|
Name == "avx2.psll.dq")) {
|
2016-05-29 08:37:33 +02:00
|
|
|
// 128/256-bit shift left specified in bits.
|
2015-02-18 07:24:44 +01:00
|
|
|
unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
|
2016-07-12 03:42:33 +02:00
|
|
|
Rep = UpgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0),
|
2015-02-18 07:24:44 +01:00
|
|
|
Shift / 8); // Shift is in bits.
|
2016-07-04 22:56:38 +02:00
|
|
|
} else if (IsX86 && (Name == "sse2.psrl.dq" ||
|
|
|
|
Name == "avx2.psrl.dq")) {
|
2016-05-29 08:37:33 +02:00
|
|
|
// 128/256-bit shift right specified in bits.
|
2015-02-18 07:24:44 +01:00
|
|
|
unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
|
2016-07-12 03:42:33 +02:00
|
|
|
Rep = UpgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0),
|
2015-02-18 07:24:44 +01:00
|
|
|
Shift / 8); // Shift is in bits.
|
2016-07-04 22:56:38 +02:00
|
|
|
} else if (IsX86 && (Name == "sse2.psll.dq.bs" ||
|
|
|
|
Name == "avx2.psll.dq.bs" ||
|
|
|
|
Name == "avx512.psll.dq.512")) {
|
2016-06-09 23:09:03 +02:00
|
|
|
// 128/256/512-bit shift left specified in bytes.
|
2015-02-16 21:51:59 +01:00
|
|
|
unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
|
2016-07-12 03:42:33 +02:00
|
|
|
Rep = UpgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0), Shift);
|
2016-07-04 22:56:38 +02:00
|
|
|
} else if (IsX86 && (Name == "sse2.psrl.dq.bs" ||
|
|
|
|
Name == "avx2.psrl.dq.bs" ||
|
|
|
|
Name == "avx512.psrl.dq.512")) {
|
2016-06-09 23:09:03 +02:00
|
|
|
// 128/256/512-bit shift right specified in bytes.
|
2015-02-16 21:51:59 +01:00
|
|
|
unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
|
2016-07-12 03:42:33 +02:00
|
|
|
Rep = UpgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0), Shift);
|
2016-07-04 22:56:38 +02:00
|
|
|
} else if (IsX86 && (Name == "sse41.pblendw" ||
|
|
|
|
Name.startswith("sse41.blendp") ||
|
|
|
|
Name.startswith("avx.blend.p") ||
|
|
|
|
Name == "avx2.pblendw" ||
|
|
|
|
Name.startswith("avx2.pblendd."))) {
|
2015-02-28 20:33:17 +01:00
|
|
|
Value *Op0 = CI->getArgOperand(0);
|
|
|
|
Value *Op1 = CI->getArgOperand(1);
|
|
|
|
unsigned Imm = cast <ConstantInt>(CI->getArgOperand(2))->getZExtValue();
|
|
|
|
VectorType *VecTy = cast<VectorType>(CI->getType());
|
|
|
|
unsigned NumElts = VecTy->getNumElements();
|
|
|
|
|
2016-06-12 06:48:00 +02:00
|
|
|
SmallVector<uint32_t, 16> Idxs(NumElts);
|
|
|
|
for (unsigned i = 0; i != NumElts; ++i)
|
|
|
|
Idxs[i] = ((Imm >> (i%8)) & 1) ? i + NumElts : i;
|
2015-02-28 20:33:17 +01:00
|
|
|
|
2016-06-12 03:05:59 +02:00
|
|
|
Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
|
2016-07-04 22:56:38 +02:00
|
|
|
} else if (IsX86 && (Name.startswith("avx.vinsertf128.") ||
|
2017-01-03 06:45:57 +01:00
|
|
|
Name == "avx2.vinserti128" ||
|
|
|
|
Name.startswith("avx512.mask.insert"))) {
|
2015-03-10 17:08:36 +01:00
|
|
|
Value *Op0 = CI->getArgOperand(0);
|
|
|
|
Value *Op1 = CI->getArgOperand(1);
|
|
|
|
unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
|
2017-01-03 06:45:57 +01:00
|
|
|
unsigned DstNumElts = CI->getType()->getVectorNumElements();
|
|
|
|
unsigned SrcNumElts = Op1->getType()->getVectorNumElements();
|
|
|
|
unsigned Scale = DstNumElts / SrcNumElts;
|
2015-09-23 10:48:33 +02:00
|
|
|
|
2015-03-10 17:08:36 +01:00
|
|
|
// Mask off the high bits of the immediate value; hardware ignores those.
|
2017-01-03 06:45:57 +01:00
|
|
|
Imm = Imm % Scale;
|
2015-09-23 10:48:33 +02:00
|
|
|
|
2017-01-03 06:45:57 +01:00
|
|
|
// Extend the second operand into a vector the size of the destination.
|
2015-03-10 17:08:36 +01:00
|
|
|
Value *UndefV = UndefValue::get(Op1->getType());
|
2017-01-03 06:45:57 +01:00
|
|
|
SmallVector<uint32_t, 8> Idxs(DstNumElts);
|
|
|
|
for (unsigned i = 0; i != SrcNumElts; ++i)
|
2016-06-12 06:48:00 +02:00
|
|
|
Idxs[i] = i;
|
2017-01-03 06:45:57 +01:00
|
|
|
for (unsigned i = SrcNumElts; i != DstNumElts; ++i)
|
|
|
|
Idxs[i] = SrcNumElts;
|
2016-06-12 03:05:59 +02:00
|
|
|
Rep = Builder.CreateShuffleVector(Op1, UndefV, Idxs);
|
2015-03-10 17:08:36 +01:00
|
|
|
|
|
|
|
// Insert the second operand into the first operand.
|
|
|
|
|
|
|
|
// Note that there is no guarantee that instruction lowering will actually
|
|
|
|
// produce a vinsertf128 instruction for the created shuffles. In
|
|
|
|
// particular, the 0 immediate case involves no lane changes, so it can
|
|
|
|
// be handled as a blend.
|
|
|
|
|
|
|
|
// Example of shuffle mask for 32-bit elements:
|
|
|
|
// Imm = 1 <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
|
|
|
|
// Imm = 0 <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7 >
|
|
|
|
|
2017-01-03 06:45:57 +01:00
|
|
|
// First fill with identify mask.
|
|
|
|
for (unsigned i = 0; i != DstNumElts; ++i)
|
|
|
|
Idxs[i] = i;
|
|
|
|
// Then replace the elements where we need to insert.
|
|
|
|
for (unsigned i = 0; i != SrcNumElts; ++i)
|
|
|
|
Idxs[i + Imm * SrcNumElts] = i + DstNumElts;
|
2016-06-12 03:05:59 +02:00
|
|
|
Rep = Builder.CreateShuffleVector(Op0, Rep, Idxs);
|
2017-01-03 06:45:57 +01:00
|
|
|
|
|
|
|
// If the intrinsic has a mask operand, handle that.
|
|
|
|
if (CI->getNumArgOperands() == 5)
|
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep,
|
|
|
|
CI->getArgOperand(3));
|
2016-07-04 22:56:38 +02:00
|
|
|
} else if (IsX86 && (Name.startswith("avx.vextractf128.") ||
|
2017-01-03 06:45:46 +01:00
|
|
|
Name == "avx2.vextracti128" ||
|
|
|
|
Name.startswith("avx512.mask.vextract"))) {
|
2015-03-12 16:15:19 +01:00
|
|
|
Value *Op0 = CI->getArgOperand(0);
|
|
|
|
unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
|
2017-01-03 06:45:46 +01:00
|
|
|
unsigned DstNumElts = CI->getType()->getVectorNumElements();
|
|
|
|
unsigned SrcNumElts = Op0->getType()->getVectorNumElements();
|
|
|
|
unsigned Scale = SrcNumElts / DstNumElts;
|
2015-09-23 10:48:33 +02:00
|
|
|
|
2015-03-12 16:15:19 +01:00
|
|
|
// Mask off the high bits of the immediate value; hardware ignores those.
|
2017-01-03 06:45:46 +01:00
|
|
|
Imm = Imm % Scale;
|
2015-03-12 16:15:19 +01:00
|
|
|
|
2017-01-03 06:45:46 +01:00
|
|
|
// Get indexes for the subvector of the input vector.
|
|
|
|
SmallVector<uint32_t, 8> Idxs(DstNumElts);
|
|
|
|
for (unsigned i = 0; i != DstNumElts; ++i) {
|
|
|
|
Idxs[i] = i + (Imm * DstNumElts);
|
2015-03-12 16:15:19 +01:00
|
|
|
}
|
2017-01-03 06:45:46 +01:00
|
|
|
Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
|
2015-03-12 16:15:19 +01:00
|
|
|
|
2017-01-03 06:45:46 +01:00
|
|
|
// If the intrinsic has a mask operand, handle that.
|
|
|
|
if (CI->getNumArgOperands() == 4)
|
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
|
|
|
|
CI->getArgOperand(2));
|
2016-07-04 22:56:38 +02:00
|
|
|
} else if (!IsX86 && Name == "stackprotectorcheck") {
|
2016-04-08 23:26:31 +02:00
|
|
|
Rep = nullptr;
|
2016-07-04 22:56:38 +02:00
|
|
|
} else if (IsX86 && (Name.startswith("avx512.mask.perm.df.") ||
|
|
|
|
Name.startswith("avx512.mask.perm.di."))) {
|
2016-07-04 16:19:05 +02:00
|
|
|
Value *Op0 = CI->getArgOperand(0);
|
|
|
|
unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
|
|
|
|
VectorType *VecTy = cast<VectorType>(CI->getType());
|
|
|
|
unsigned NumElts = VecTy->getNumElements();
|
|
|
|
|
|
|
|
SmallVector<uint32_t, 8> Idxs(NumElts);
|
|
|
|
for (unsigned i = 0; i != NumElts; ++i)
|
|
|
|
Idxs[i] = (i & ~0x3) + ((Imm >> (2 * (i & 0x3))) & 3);
|
|
|
|
|
|
|
|
Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
|
|
|
|
|
|
|
|
if (CI->getNumArgOperands() == 4)
|
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
|
|
|
|
CI->getArgOperand(2));
|
2017-09-16 09:36:14 +02:00
|
|
|
} else if (IsX86 && (Name.startswith("avx.vperm2f128.") ||
|
|
|
|
Name == "avx2.vperm2i128")) {
|
|
|
|
// The immediate permute control byte looks like this:
|
|
|
|
// [1:0] - select 128 bits from sources for low half of destination
|
|
|
|
// [2] - ignore
|
|
|
|
// [3] - zero low half of destination
|
|
|
|
// [5:4] - select 128 bits from sources for high half of destination
|
|
|
|
// [6] - ignore
|
|
|
|
// [7] - zero high half of destination
|
|
|
|
|
|
|
|
uint8_t Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
|
|
|
|
|
|
|
|
unsigned NumElts = CI->getType()->getVectorNumElements();
|
|
|
|
unsigned HalfSize = NumElts / 2;
|
|
|
|
SmallVector<uint32_t, 8> ShuffleMask(NumElts);
|
|
|
|
|
|
|
|
// Determine which operand(s) are actually in use for this instruction.
|
|
|
|
Value *V0 = (Imm & 0x02) ? CI->getArgOperand(1) : CI->getArgOperand(0);
|
|
|
|
Value *V1 = (Imm & 0x20) ? CI->getArgOperand(1) : CI->getArgOperand(0);
|
|
|
|
|
|
|
|
// If needed, replace operands based on zero mask.
|
|
|
|
V0 = (Imm & 0x08) ? ConstantAggregateZero::get(CI->getType()) : V0;
|
|
|
|
V1 = (Imm & 0x80) ? ConstantAggregateZero::get(CI->getType()) : V1;
|
|
|
|
|
|
|
|
// Permute low half of result.
|
|
|
|
unsigned StartIndex = (Imm & 0x01) ? HalfSize : 0;
|
|
|
|
for (unsigned i = 0; i < HalfSize; ++i)
|
|
|
|
ShuffleMask[i] = StartIndex + i;
|
|
|
|
|
|
|
|
// Permute high half of result.
|
|
|
|
StartIndex = (Imm & 0x10) ? HalfSize : 0;
|
|
|
|
for (unsigned i = 0; i < HalfSize; ++i)
|
|
|
|
ShuffleMask[i + HalfSize] = NumElts + StartIndex + i;
|
|
|
|
|
|
|
|
Rep = Builder.CreateShuffleVector(V0, V1, ShuffleMask);
|
|
|
|
|
2016-07-04 22:56:38 +02:00
|
|
|
} else if (IsX86 && (Name.startswith("avx.vpermil.") ||
|
|
|
|
Name == "sse2.pshuf.d" ||
|
|
|
|
Name.startswith("avx512.mask.vpermil.p") ||
|
|
|
|
Name.startswith("avx512.mask.pshuf.d."))) {
|
2016-06-12 05:10:47 +02:00
|
|
|
Value *Op0 = CI->getArgOperand(0);
|
|
|
|
unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
|
|
|
|
VectorType *VecTy = cast<VectorType>(CI->getType());
|
|
|
|
unsigned NumElts = VecTy->getNumElements();
|
2016-07-04 14:40:54 +02:00
|
|
|
// Calculate the size of each index in the immediate.
|
2016-06-12 05:10:47 +02:00
|
|
|
unsigned IdxSize = 64 / VecTy->getScalarSizeInBits();
|
|
|
|
unsigned IdxMask = ((1 << IdxSize) - 1);
|
|
|
|
|
|
|
|
SmallVector<uint32_t, 8> Idxs(NumElts);
|
|
|
|
// Lookup the bits for this element, wrapping around the immediate every
|
|
|
|
// 8-bits. Elements are grouped into sets of 2 or 4 elements so we need
|
|
|
|
// to offset by the first index of each group.
|
|
|
|
for (unsigned i = 0; i != NumElts; ++i)
|
|
|
|
Idxs[i] = ((Imm >> ((i * IdxSize) % 8)) & IdxMask) | (i & ~IdxMask);
|
|
|
|
|
2016-06-12 16:11:32 +02:00
|
|
|
Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
|
2016-06-13 04:36:48 +02:00
|
|
|
|
|
|
|
if (CI->getNumArgOperands() == 4)
|
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
|
|
|
|
CI->getArgOperand(2));
|
2016-07-04 22:56:38 +02:00
|
|
|
} else if (IsX86 && (Name == "sse2.pshufl.w" ||
|
|
|
|
Name.startswith("avx512.mask.pshufl.w."))) {
|
2016-06-12 16:11:32 +02:00
|
|
|
Value *Op0 = CI->getArgOperand(0);
|
|
|
|
unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
|
|
|
|
unsigned NumElts = CI->getType()->getVectorNumElements();
|
|
|
|
|
|
|
|
SmallVector<uint32_t, 16> Idxs(NumElts);
|
|
|
|
for (unsigned l = 0; l != NumElts; l += 8) {
|
|
|
|
for (unsigned i = 0; i != 4; ++i)
|
|
|
|
Idxs[i + l] = ((Imm >> (2 * i)) & 0x3) + l;
|
|
|
|
for (unsigned i = 4; i != 8; ++i)
|
|
|
|
Idxs[i + l] = i + l;
|
|
|
|
}
|
|
|
|
|
|
|
|
Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
|
2016-06-13 04:36:48 +02:00
|
|
|
|
|
|
|
if (CI->getNumArgOperands() == 4)
|
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
|
|
|
|
CI->getArgOperand(2));
|
2016-07-04 22:56:38 +02:00
|
|
|
} else if (IsX86 && (Name == "sse2.pshufh.w" ||
|
|
|
|
Name.startswith("avx512.mask.pshufh.w."))) {
|
2016-06-12 16:11:32 +02:00
|
|
|
Value *Op0 = CI->getArgOperand(0);
|
|
|
|
unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
|
|
|
|
unsigned NumElts = CI->getType()->getVectorNumElements();
|
|
|
|
|
|
|
|
SmallVector<uint32_t, 16> Idxs(NumElts);
|
|
|
|
for (unsigned l = 0; l != NumElts; l += 8) {
|
|
|
|
for (unsigned i = 0; i != 4; ++i)
|
|
|
|
Idxs[i + l] = i + l;
|
|
|
|
for (unsigned i = 0; i != 4; ++i)
|
|
|
|
Idxs[i + l + 4] = ((Imm >> (2 * i)) & 0x3) + 4 + l;
|
|
|
|
}
|
|
|
|
|
2016-06-12 05:10:47 +02:00
|
|
|
Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
|
2016-06-13 04:36:48 +02:00
|
|
|
|
|
|
|
if (CI->getNumArgOperands() == 4)
|
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
|
|
|
|
CI->getArgOperand(2));
|
2016-09-13 09:40:53 +02:00
|
|
|
} else if (IsX86 && Name.startswith("avx512.mask.shuf.p")) {
|
|
|
|
Value *Op0 = CI->getArgOperand(0);
|
|
|
|
Value *Op1 = CI->getArgOperand(1);
|
|
|
|
unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
|
|
|
|
unsigned NumElts = CI->getType()->getVectorNumElements();
|
|
|
|
|
|
|
|
unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
|
|
|
|
unsigned HalfLaneElts = NumLaneElts / 2;
|
|
|
|
|
|
|
|
SmallVector<uint32_t, 16> Idxs(NumElts);
|
|
|
|
for (unsigned i = 0; i != NumElts; ++i) {
|
|
|
|
// Base index is the starting element of the lane.
|
|
|
|
Idxs[i] = i - (i % NumLaneElts);
|
|
|
|
// If we are half way through the lane switch to the other source.
|
|
|
|
if ((i % NumLaneElts) >= HalfLaneElts)
|
|
|
|
Idxs[i] += NumElts;
|
|
|
|
// Now select the specific element. By adding HalfLaneElts bits from
|
|
|
|
// the immediate. Wrapping around the immediate every 8-bits.
|
|
|
|
Idxs[i] += (Imm >> ((i * HalfLaneElts) % 8)) & ((1 << HalfLaneElts) - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
|
|
|
|
|
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep,
|
|
|
|
CI->getArgOperand(3));
|
2016-07-04 22:56:38 +02:00
|
|
|
} else if (IsX86 && (Name.startswith("avx512.mask.movddup") ||
|
|
|
|
Name.startswith("avx512.mask.movshdup") ||
|
|
|
|
Name.startswith("avx512.mask.movsldup"))) {
|
2016-07-02 16:42:35 +02:00
|
|
|
Value *Op0 = CI->getArgOperand(0);
|
|
|
|
unsigned NumElts = CI->getType()->getVectorNumElements();
|
|
|
|
unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
|
|
|
|
|
|
|
|
unsigned Offset = 0;
|
2016-07-04 22:56:38 +02:00
|
|
|
if (Name.startswith("avx512.mask.movshdup."))
|
2016-07-02 16:42:35 +02:00
|
|
|
Offset = 1;
|
|
|
|
|
|
|
|
SmallVector<uint32_t, 16> Idxs(NumElts);
|
|
|
|
for (unsigned l = 0; l != NumElts; l += NumLaneElts)
|
|
|
|
for (unsigned i = 0; i != NumLaneElts; i += 2) {
|
|
|
|
Idxs[i + l + 0] = i + l + Offset;
|
|
|
|
Idxs[i + l + 1] = i + l + Offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
|
|
|
|
|
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
|
|
|
|
CI->getArgOperand(1));
|
2016-07-04 22:56:38 +02:00
|
|
|
} else if (IsX86 && (Name.startswith("avx512.mask.punpckl") ||
|
|
|
|
Name.startswith("avx512.mask.unpckl."))) {
|
2016-06-23 09:37:33 +02:00
|
|
|
Value *Op0 = CI->getArgOperand(0);
|
|
|
|
Value *Op1 = CI->getArgOperand(1);
|
|
|
|
int NumElts = CI->getType()->getVectorNumElements();
|
|
|
|
int NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
|
|
|
|
|
|
|
|
SmallVector<uint32_t, 64> Idxs(NumElts);
|
|
|
|
for (int l = 0; l != NumElts; l += NumLaneElts)
|
|
|
|
for (int i = 0; i != NumLaneElts; ++i)
|
|
|
|
Idxs[i + l] = l + (i / 2) + NumElts * (i % 2);
|
|
|
|
|
|
|
|
Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
|
|
|
|
|
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
|
|
|
|
CI->getArgOperand(2));
|
2016-07-04 22:56:38 +02:00
|
|
|
} else if (IsX86 && (Name.startswith("avx512.mask.punpckh") ||
|
|
|
|
Name.startswith("avx512.mask.unpckh."))) {
|
2016-06-23 09:37:33 +02:00
|
|
|
Value *Op0 = CI->getArgOperand(0);
|
|
|
|
Value *Op1 = CI->getArgOperand(1);
|
|
|
|
int NumElts = CI->getType()->getVectorNumElements();
|
|
|
|
int NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
|
|
|
|
|
|
|
|
SmallVector<uint32_t, 64> Idxs(NumElts);
|
|
|
|
for (int l = 0; l != NumElts; l += NumLaneElts)
|
|
|
|
for (int i = 0; i != NumLaneElts; ++i)
|
|
|
|
Idxs[i + l] = (NumLaneElts / 2) + l + (i / 2) + NumElts * (i % 2);
|
|
|
|
|
|
|
|
Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
|
|
|
|
|
2016-07-12 07:27:53 +02:00
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
|
|
|
|
CI->getArgOperand(2));
|
2019-01-07 21:13:45 +01:00
|
|
|
} else if (IsX86 && (Name.startswith("avx512.mask.and.") ||
|
|
|
|
Name.startswith("avx512.mask.pand."))) {
|
2016-09-02 07:29:17 +02:00
|
|
|
VectorType *FTy = cast<VectorType>(CI->getType());
|
|
|
|
VectorType *ITy = VectorType::getInteger(FTy);
|
|
|
|
Rep = Builder.CreateAnd(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
|
|
|
|
Builder.CreateBitCast(CI->getArgOperand(1), ITy));
|
|
|
|
Rep = Builder.CreateBitCast(Rep, FTy);
|
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
|
|
|
|
CI->getArgOperand(2));
|
2019-01-07 21:13:45 +01:00
|
|
|
} else if (IsX86 && (Name.startswith("avx512.mask.andn.") ||
|
|
|
|
Name.startswith("avx512.mask.pandn."))) {
|
2016-09-02 07:29:17 +02:00
|
|
|
VectorType *FTy = cast<VectorType>(CI->getType());
|
|
|
|
VectorType *ITy = VectorType::getInteger(FTy);
|
|
|
|
Rep = Builder.CreateNot(Builder.CreateBitCast(CI->getArgOperand(0), ITy));
|
|
|
|
Rep = Builder.CreateAnd(Rep,
|
|
|
|
Builder.CreateBitCast(CI->getArgOperand(1), ITy));
|
|
|
|
Rep = Builder.CreateBitCast(Rep, FTy);
|
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
|
|
|
|
CI->getArgOperand(2));
|
2019-01-07 21:13:45 +01:00
|
|
|
} else if (IsX86 && (Name.startswith("avx512.mask.or.") ||
|
|
|
|
Name.startswith("avx512.mask.por."))) {
|
2016-09-02 07:29:17 +02:00
|
|
|
VectorType *FTy = cast<VectorType>(CI->getType());
|
|
|
|
VectorType *ITy = VectorType::getInteger(FTy);
|
|
|
|
Rep = Builder.CreateOr(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
|
|
|
|
Builder.CreateBitCast(CI->getArgOperand(1), ITy));
|
|
|
|
Rep = Builder.CreateBitCast(Rep, FTy);
|
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
|
|
|
|
CI->getArgOperand(2));
|
2019-01-07 21:13:45 +01:00
|
|
|
} else if (IsX86 && (Name.startswith("avx512.mask.xor.") ||
|
|
|
|
Name.startswith("avx512.mask.pxor."))) {
|
2016-09-02 07:29:17 +02:00
|
|
|
VectorType *FTy = cast<VectorType>(CI->getType());
|
|
|
|
VectorType *ITy = VectorType::getInteger(FTy);
|
|
|
|
Rep = Builder.CreateXor(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
|
|
|
|
Builder.CreateBitCast(CI->getArgOperand(1), ITy));
|
|
|
|
Rep = Builder.CreateBitCast(Rep, FTy);
|
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
|
|
|
|
CI->getArgOperand(2));
|
2016-09-04 04:09:53 +02:00
|
|
|
} else if (IsX86 && Name.startswith("avx512.mask.padd.")) {
|
|
|
|
Rep = Builder.CreateAdd(CI->getArgOperand(0), CI->getArgOperand(1));
|
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
|
|
|
|
CI->getArgOperand(2));
|
|
|
|
} else if (IsX86 && Name.startswith("avx512.mask.psub.")) {
|
|
|
|
Rep = Builder.CreateSub(CI->getArgOperand(0), CI->getArgOperand(1));
|
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
|
|
|
|
CI->getArgOperand(2));
|
|
|
|
} else if (IsX86 && Name.startswith("avx512.mask.pmull.")) {
|
|
|
|
Rep = Builder.CreateMul(CI->getArgOperand(0), CI->getArgOperand(1));
|
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
|
|
|
|
CI->getArgOperand(2));
|
2018-06-10 08:01:36 +02:00
|
|
|
} else if (IsX86 && Name.startswith("avx512.mask.add.p")) {
|
|
|
|
if (Name.endswith(".512")) {
|
|
|
|
Intrinsic::ID IID;
|
|
|
|
if (Name[17] == 's')
|
|
|
|
IID = Intrinsic::x86_avx512_add_ps_512;
|
|
|
|
else
|
|
|
|
IID = Intrinsic::x86_avx512_add_pd_512;
|
|
|
|
|
|
|
|
Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
|
|
|
|
{ CI->getArgOperand(0), CI->getArgOperand(1),
|
|
|
|
CI->getArgOperand(4) });
|
|
|
|
} else {
|
|
|
|
Rep = Builder.CreateFAdd(CI->getArgOperand(0), CI->getArgOperand(1));
|
|
|
|
}
|
2016-09-04 20:13:33 +02:00
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
|
|
|
|
CI->getArgOperand(2));
|
2016-11-07 01:13:42 +01:00
|
|
|
} else if (IsX86 && Name.startswith("avx512.mask.div.p")) {
|
2018-06-10 08:01:36 +02:00
|
|
|
if (Name.endswith(".512")) {
|
|
|
|
Intrinsic::ID IID;
|
|
|
|
if (Name[17] == 's')
|
|
|
|
IID = Intrinsic::x86_avx512_div_ps_512;
|
|
|
|
else
|
|
|
|
IID = Intrinsic::x86_avx512_div_pd_512;
|
|
|
|
|
|
|
|
Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
|
|
|
|
{ CI->getArgOperand(0), CI->getArgOperand(1),
|
|
|
|
CI->getArgOperand(4) });
|
|
|
|
} else {
|
|
|
|
Rep = Builder.CreateFDiv(CI->getArgOperand(0), CI->getArgOperand(1));
|
|
|
|
}
|
2016-09-04 20:13:33 +02:00
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
|
|
|
|
CI->getArgOperand(2));
|
2016-11-07 01:13:42 +01:00
|
|
|
} else if (IsX86 && Name.startswith("avx512.mask.mul.p")) {
|
2018-06-10 08:01:36 +02:00
|
|
|
if (Name.endswith(".512")) {
|
|
|
|
Intrinsic::ID IID;
|
|
|
|
if (Name[17] == 's')
|
|
|
|
IID = Intrinsic::x86_avx512_mul_ps_512;
|
|
|
|
else
|
|
|
|
IID = Intrinsic::x86_avx512_mul_pd_512;
|
|
|
|
|
|
|
|
Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
|
|
|
|
{ CI->getArgOperand(0), CI->getArgOperand(1),
|
|
|
|
CI->getArgOperand(4) });
|
|
|
|
} else {
|
|
|
|
Rep = Builder.CreateFMul(CI->getArgOperand(0), CI->getArgOperand(1));
|
|
|
|
}
|
2016-09-04 20:13:33 +02:00
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
|
|
|
|
CI->getArgOperand(2));
|
2016-11-07 01:13:42 +01:00
|
|
|
} else if (IsX86 && Name.startswith("avx512.mask.sub.p")) {
|
2018-06-10 08:01:36 +02:00
|
|
|
if (Name.endswith(".512")) {
|
|
|
|
Intrinsic::ID IID;
|
|
|
|
if (Name[17] == 's')
|
|
|
|
IID = Intrinsic::x86_avx512_sub_ps_512;
|
|
|
|
else
|
|
|
|
IID = Intrinsic::x86_avx512_sub_pd_512;
|
|
|
|
|
|
|
|
Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
|
|
|
|
{ CI->getArgOperand(0), CI->getArgOperand(1),
|
|
|
|
CI->getArgOperand(4) });
|
|
|
|
} else {
|
|
|
|
Rep = Builder.CreateFSub(CI->getArgOperand(0), CI->getArgOperand(1));
|
|
|
|
}
|
2016-09-04 20:13:33 +02:00
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
|
|
|
|
CI->getArgOperand(2));
|
2019-01-07 21:13:45 +01:00
|
|
|
} else if (IsX86 && (Name.startswith("avx512.mask.max.p") ||
|
|
|
|
Name.startswith("avx512.mask.min.p")) &&
|
2018-06-21 07:00:56 +02:00
|
|
|
Name.drop_front(18) == ".512") {
|
2019-01-07 21:13:45 +01:00
|
|
|
bool IsDouble = Name[17] == 'd';
|
|
|
|
bool IsMin = Name[13] == 'i';
|
|
|
|
static const Intrinsic::ID MinMaxTbl[2][2] = {
|
|
|
|
{ Intrinsic::x86_avx512_max_ps_512, Intrinsic::x86_avx512_max_pd_512 },
|
|
|
|
{ Intrinsic::x86_avx512_min_ps_512, Intrinsic::x86_avx512_min_pd_512 }
|
|
|
|
};
|
|
|
|
Intrinsic::ID IID = MinMaxTbl[IsMin][IsDouble];
|
2018-06-21 07:00:56 +02:00
|
|
|
|
|
|
|
Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
|
|
|
|
{ CI->getArgOperand(0), CI->getArgOperand(1),
|
|
|
|
CI->getArgOperand(4) });
|
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
|
|
|
|
CI->getArgOperand(2));
|
2017-02-24 06:35:04 +01:00
|
|
|
} else if (IsX86 && Name.startswith("avx512.mask.lzcnt.")) {
|
|
|
|
Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(),
|
|
|
|
Intrinsic::ctlz,
|
|
|
|
CI->getType()),
|
|
|
|
{ CI->getArgOperand(0), Builder.getInt1(false) });
|
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
|
|
|
|
CI->getArgOperand(1));
|
2016-11-13 20:09:56 +01:00
|
|
|
} else if (IsX86 && Name.startswith("avx512.mask.psll")) {
|
|
|
|
bool IsImmediate = Name[16] == 'i' ||
|
|
|
|
(Name.size() > 18 && Name[18] == 'i');
|
|
|
|
bool IsVariable = Name[16] == 'v';
|
|
|
|
char Size = Name[16] == '.' ? Name[17] :
|
|
|
|
Name[17] == '.' ? Name[18] :
|
2016-11-18 06:04:44 +01:00
|
|
|
Name[18] == '.' ? Name[19] :
|
|
|
|
Name[20];
|
2016-11-13 20:09:56 +01:00
|
|
|
|
|
|
|
Intrinsic::ID IID;
|
2016-11-14 02:53:22 +01:00
|
|
|
if (IsVariable && Name[17] != '.') {
|
2016-11-13 20:09:56 +01:00
|
|
|
if (Size == 'd' && Name[17] == '2') // avx512.mask.psllv2.di
|
|
|
|
IID = Intrinsic::x86_avx2_psllv_q;
|
|
|
|
else if (Size == 'd' && Name[17] == '4') // avx512.mask.psllv4.di
|
|
|
|
IID = Intrinsic::x86_avx2_psllv_q_256;
|
|
|
|
else if (Size == 's' && Name[17] == '4') // avx512.mask.psllv4.si
|
|
|
|
IID = Intrinsic::x86_avx2_psllv_d;
|
|
|
|
else if (Size == 's' && Name[17] == '8') // avx512.mask.psllv8.si
|
|
|
|
IID = Intrinsic::x86_avx2_psllv_d_256;
|
2016-11-18 06:04:44 +01:00
|
|
|
else if (Size == 'h' && Name[17] == '8') // avx512.mask.psllv8.hi
|
|
|
|
IID = Intrinsic::x86_avx512_psllv_w_128;
|
|
|
|
else if (Size == 'h' && Name[17] == '1') // avx512.mask.psllv16.hi
|
|
|
|
IID = Intrinsic::x86_avx512_psllv_w_256;
|
|
|
|
else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psllv32hi
|
|
|
|
IID = Intrinsic::x86_avx512_psllv_w_512;
|
2016-11-13 20:09:56 +01:00
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected size");
|
|
|
|
} else if (Name.endswith(".128")) {
|
|
|
|
if (Size == 'd') // avx512.mask.psll.d.128, avx512.mask.psll.di.128
|
|
|
|
IID = IsImmediate ? Intrinsic::x86_sse2_pslli_d
|
|
|
|
: Intrinsic::x86_sse2_psll_d;
|
|
|
|
else if (Size == 'q') // avx512.mask.psll.q.128, avx512.mask.psll.qi.128
|
|
|
|
IID = IsImmediate ? Intrinsic::x86_sse2_pslli_q
|
|
|
|
: Intrinsic::x86_sse2_psll_q;
|
|
|
|
else if (Size == 'w') // avx512.mask.psll.w.128, avx512.mask.psll.wi.128
|
|
|
|
IID = IsImmediate ? Intrinsic::x86_sse2_pslli_w
|
|
|
|
: Intrinsic::x86_sse2_psll_w;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected size");
|
|
|
|
} else if (Name.endswith(".256")) {
|
|
|
|
if (Size == 'd') // avx512.mask.psll.d.256, avx512.mask.psll.di.256
|
|
|
|
IID = IsImmediate ? Intrinsic::x86_avx2_pslli_d
|
|
|
|
: Intrinsic::x86_avx2_psll_d;
|
|
|
|
else if (Size == 'q') // avx512.mask.psll.q.256, avx512.mask.psll.qi.256
|
|
|
|
IID = IsImmediate ? Intrinsic::x86_avx2_pslli_q
|
|
|
|
: Intrinsic::x86_avx2_psll_q;
|
|
|
|
else if (Size == 'w') // avx512.mask.psll.w.256, avx512.mask.psll.wi.256
|
|
|
|
IID = IsImmediate ? Intrinsic::x86_avx2_pslli_w
|
|
|
|
: Intrinsic::x86_avx2_psll_w;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected size");
|
|
|
|
} else {
|
2016-11-14 02:53:22 +01:00
|
|
|
if (Size == 'd') // psll.di.512, pslli.d, psll.d, psllv.d.512
|
|
|
|
IID = IsImmediate ? Intrinsic::x86_avx512_pslli_d_512 :
|
|
|
|
IsVariable ? Intrinsic::x86_avx512_psllv_d_512 :
|
|
|
|
Intrinsic::x86_avx512_psll_d_512;
|
|
|
|
else if (Size == 'q') // psll.qi.512, pslli.q, psll.q, psllv.q.512
|
|
|
|
IID = IsImmediate ? Intrinsic::x86_avx512_pslli_q_512 :
|
|
|
|
IsVariable ? Intrinsic::x86_avx512_psllv_q_512 :
|
|
|
|
Intrinsic::x86_avx512_psll_q_512;
|
2016-11-13 20:09:56 +01:00
|
|
|
else if (Size == 'w') // psll.wi.512, pslli.w, psll.w
|
|
|
|
IID = IsImmediate ? Intrinsic::x86_avx512_pslli_w_512
|
|
|
|
: Intrinsic::x86_avx512_psll_w_512;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected size");
|
|
|
|
}
|
|
|
|
|
|
|
|
Rep = UpgradeX86MaskedShift(Builder, *CI, IID);
|
|
|
|
} else if (IsX86 && Name.startswith("avx512.mask.psrl")) {
|
|
|
|
bool IsImmediate = Name[16] == 'i' ||
|
|
|
|
(Name.size() > 18 && Name[18] == 'i');
|
|
|
|
bool IsVariable = Name[16] == 'v';
|
|
|
|
char Size = Name[16] == '.' ? Name[17] :
|
|
|
|
Name[17] == '.' ? Name[18] :
|
2016-11-18 06:04:44 +01:00
|
|
|
Name[18] == '.' ? Name[19] :
|
|
|
|
Name[20];
|
2016-11-13 20:09:56 +01:00
|
|
|
|
|
|
|
Intrinsic::ID IID;
|
2016-11-14 02:53:22 +01:00
|
|
|
if (IsVariable && Name[17] != '.') {
|
2016-11-13 20:09:56 +01:00
|
|
|
if (Size == 'd' && Name[17] == '2') // avx512.mask.psrlv2.di
|
|
|
|
IID = Intrinsic::x86_avx2_psrlv_q;
|
|
|
|
else if (Size == 'd' && Name[17] == '4') // avx512.mask.psrlv4.di
|
|
|
|
IID = Intrinsic::x86_avx2_psrlv_q_256;
|
|
|
|
else if (Size == 's' && Name[17] == '4') // avx512.mask.psrlv4.si
|
|
|
|
IID = Intrinsic::x86_avx2_psrlv_d;
|
|
|
|
else if (Size == 's' && Name[17] == '8') // avx512.mask.psrlv8.si
|
|
|
|
IID = Intrinsic::x86_avx2_psrlv_d_256;
|
2016-11-18 06:04:44 +01:00
|
|
|
else if (Size == 'h' && Name[17] == '8') // avx512.mask.psrlv8.hi
|
|
|
|
IID = Intrinsic::x86_avx512_psrlv_w_128;
|
|
|
|
else if (Size == 'h' && Name[17] == '1') // avx512.mask.psrlv16.hi
|
|
|
|
IID = Intrinsic::x86_avx512_psrlv_w_256;
|
|
|
|
else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psrlv32hi
|
|
|
|
IID = Intrinsic::x86_avx512_psrlv_w_512;
|
2016-11-13 20:09:56 +01:00
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected size");
|
|
|
|
} else if (Name.endswith(".128")) {
|
|
|
|
if (Size == 'd') // avx512.mask.psrl.d.128, avx512.mask.psrl.di.128
|
|
|
|
IID = IsImmediate ? Intrinsic::x86_sse2_psrli_d
|
|
|
|
: Intrinsic::x86_sse2_psrl_d;
|
|
|
|
else if (Size == 'q') // avx512.mask.psrl.q.128, avx512.mask.psrl.qi.128
|
|
|
|
IID = IsImmediate ? Intrinsic::x86_sse2_psrli_q
|
|
|
|
: Intrinsic::x86_sse2_psrl_q;
|
|
|
|
else if (Size == 'w') // avx512.mask.psrl.w.128, avx512.mask.psrl.wi.128
|
|
|
|
IID = IsImmediate ? Intrinsic::x86_sse2_psrli_w
|
|
|
|
: Intrinsic::x86_sse2_psrl_w;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected size");
|
|
|
|
} else if (Name.endswith(".256")) {
|
|
|
|
if (Size == 'd') // avx512.mask.psrl.d.256, avx512.mask.psrl.di.256
|
|
|
|
IID = IsImmediate ? Intrinsic::x86_avx2_psrli_d
|
|
|
|
: Intrinsic::x86_avx2_psrl_d;
|
|
|
|
else if (Size == 'q') // avx512.mask.psrl.q.256, avx512.mask.psrl.qi.256
|
|
|
|
IID = IsImmediate ? Intrinsic::x86_avx2_psrli_q
|
|
|
|
: Intrinsic::x86_avx2_psrl_q;
|
|
|
|
else if (Size == 'w') // avx512.mask.psrl.w.256, avx512.mask.psrl.wi.256
|
|
|
|
IID = IsImmediate ? Intrinsic::x86_avx2_psrli_w
|
|
|
|
: Intrinsic::x86_avx2_psrl_w;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected size");
|
|
|
|
} else {
|
2016-11-14 02:53:22 +01:00
|
|
|
if (Size == 'd') // psrl.di.512, psrli.d, psrl.d, psrl.d.512
|
|
|
|
IID = IsImmediate ? Intrinsic::x86_avx512_psrli_d_512 :
|
|
|
|
IsVariable ? Intrinsic::x86_avx512_psrlv_d_512 :
|
|
|
|
Intrinsic::x86_avx512_psrl_d_512;
|
|
|
|
else if (Size == 'q') // psrl.qi.512, psrli.q, psrl.q, psrl.q.512
|
|
|
|
IID = IsImmediate ? Intrinsic::x86_avx512_psrli_q_512 :
|
|
|
|
IsVariable ? Intrinsic::x86_avx512_psrlv_q_512 :
|
|
|
|
Intrinsic::x86_avx512_psrl_q_512;
|
2016-11-13 20:09:56 +01:00
|
|
|
else if (Size == 'w') // psrl.wi.512, psrli.w, psrl.w)
|
|
|
|
IID = IsImmediate ? Intrinsic::x86_avx512_psrli_w_512
|
|
|
|
: Intrinsic::x86_avx512_psrl_w_512;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected size");
|
|
|
|
}
|
|
|
|
|
|
|
|
Rep = UpgradeX86MaskedShift(Builder, *CI, IID);
|
|
|
|
} else if (IsX86 && Name.startswith("avx512.mask.psra")) {
|
|
|
|
bool IsImmediate = Name[16] == 'i' ||
|
|
|
|
(Name.size() > 18 && Name[18] == 'i');
|
|
|
|
bool IsVariable = Name[16] == 'v';
|
|
|
|
char Size = Name[16] == '.' ? Name[17] :
|
|
|
|
Name[17] == '.' ? Name[18] :
|
2016-11-18 06:04:44 +01:00
|
|
|
Name[18] == '.' ? Name[19] :
|
|
|
|
Name[20];
|
2016-11-13 20:09:56 +01:00
|
|
|
|
|
|
|
Intrinsic::ID IID;
|
2016-11-14 02:53:22 +01:00
|
|
|
if (IsVariable && Name[17] != '.') {
|
2016-11-13 20:09:56 +01:00
|
|
|
if (Size == 's' && Name[17] == '4') // avx512.mask.psrav4.si
|
|
|
|
IID = Intrinsic::x86_avx2_psrav_d;
|
|
|
|
else if (Size == 's' && Name[17] == '8') // avx512.mask.psrav8.si
|
|
|
|
IID = Intrinsic::x86_avx2_psrav_d_256;
|
2016-11-18 06:04:44 +01:00
|
|
|
else if (Size == 'h' && Name[17] == '8') // avx512.mask.psrav8.hi
|
|
|
|
IID = Intrinsic::x86_avx512_psrav_w_128;
|
|
|
|
else if (Size == 'h' && Name[17] == '1') // avx512.mask.psrav16.hi
|
|
|
|
IID = Intrinsic::x86_avx512_psrav_w_256;
|
|
|
|
else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psrav32hi
|
|
|
|
IID = Intrinsic::x86_avx512_psrav_w_512;
|
2016-11-13 20:09:56 +01:00
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected size");
|
|
|
|
} else if (Name.endswith(".128")) {
|
|
|
|
if (Size == 'd') // avx512.mask.psra.d.128, avx512.mask.psra.di.128
|
|
|
|
IID = IsImmediate ? Intrinsic::x86_sse2_psrai_d
|
|
|
|
: Intrinsic::x86_sse2_psra_d;
|
|
|
|
else if (Size == 'q') // avx512.mask.psra.q.128, avx512.mask.psra.qi.128
|
2016-11-14 02:53:22 +01:00
|
|
|
IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_128 :
|
|
|
|
IsVariable ? Intrinsic::x86_avx512_psrav_q_128 :
|
|
|
|
Intrinsic::x86_avx512_psra_q_128;
|
2016-11-13 20:09:56 +01:00
|
|
|
else if (Size == 'w') // avx512.mask.psra.w.128, avx512.mask.psra.wi.128
|
|
|
|
IID = IsImmediate ? Intrinsic::x86_sse2_psrai_w
|
|
|
|
: Intrinsic::x86_sse2_psra_w;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected size");
|
|
|
|
} else if (Name.endswith(".256")) {
|
|
|
|
if (Size == 'd') // avx512.mask.psra.d.256, avx512.mask.psra.di.256
|
|
|
|
IID = IsImmediate ? Intrinsic::x86_avx2_psrai_d
|
|
|
|
: Intrinsic::x86_avx2_psra_d;
|
|
|
|
else if (Size == 'q') // avx512.mask.psra.q.256, avx512.mask.psra.qi.256
|
2016-11-14 02:53:22 +01:00
|
|
|
IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_256 :
|
|
|
|
IsVariable ? Intrinsic::x86_avx512_psrav_q_256 :
|
|
|
|
Intrinsic::x86_avx512_psra_q_256;
|
2016-11-13 20:09:56 +01:00
|
|
|
else if (Size == 'w') // avx512.mask.psra.w.256, avx512.mask.psra.wi.256
|
|
|
|
IID = IsImmediate ? Intrinsic::x86_avx2_psrai_w
|
|
|
|
: Intrinsic::x86_avx2_psra_w;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected size");
|
|
|
|
} else {
|
2016-11-14 02:53:22 +01:00
|
|
|
if (Size == 'd') // psra.di.512, psrai.d, psra.d, psrav.d.512
|
|
|
|
IID = IsImmediate ? Intrinsic::x86_avx512_psrai_d_512 :
|
|
|
|
IsVariable ? Intrinsic::x86_avx512_psrav_d_512 :
|
|
|
|
Intrinsic::x86_avx512_psra_d_512;
|
2016-11-13 20:09:56 +01:00
|
|
|
else if (Size == 'q') // psra.qi.512, psrai.q, psra.q
|
2016-11-14 02:53:22 +01:00
|
|
|
IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_512 :
|
|
|
|
IsVariable ? Intrinsic::x86_avx512_psrav_q_512 :
|
|
|
|
Intrinsic::x86_avx512_psra_q_512;
|
2016-11-13 20:09:56 +01:00
|
|
|
else if (Size == 'w') // psra.wi.512, psrai.w, psra.w
|
|
|
|
IID = IsImmediate ? Intrinsic::x86_avx512_psrai_w_512
|
|
|
|
: Intrinsic::x86_avx512_psra_w_512;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected size");
|
|
|
|
}
|
|
|
|
|
|
|
|
Rep = UpgradeX86MaskedShift(Builder, *CI, IID);
|
2016-11-16 10:00:28 +01:00
|
|
|
} else if (IsX86 && Name.startswith("avx512.mask.move.s")) {
|
|
|
|
Rep = upgradeMaskedMove(Builder, *CI);
|
2017-04-04 15:32:14 +02:00
|
|
|
} else if (IsX86 && Name.startswith("avx512.cvtmask2")) {
|
|
|
|
Rep = UpgradeMaskToInt(Builder, *CI);
|
2017-04-14 17:05:35 +02:00
|
|
|
} else if (IsX86 && Name.endswith(".movntdqa")) {
|
|
|
|
Module *M = F->getParent();
|
|
|
|
MDNode *Node = MDNode::get(
|
|
|
|
C, ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1)));
|
|
|
|
|
|
|
|
Value *Ptr = CI->getArgOperand(0);
|
|
|
|
VectorType *VTy = cast<VectorType>(CI->getType());
|
|
|
|
|
|
|
|
// Convert the type of the pointer to a pointer to the stored type.
|
|
|
|
Value *BC =
|
|
|
|
Builder.CreateBitCast(Ptr, PointerType::getUnqual(VTy), "cast");
|
2020-01-23 11:33:12 +01:00
|
|
|
LoadInst *LI =
|
|
|
|
Builder.CreateAlignedLoad(VTy, BC, Align(VTy->getBitWidth() / 8));
|
2017-04-14 17:05:35 +02:00
|
|
|
LI->setMetadata(M->getMDKindID("nontemporal"), Node);
|
|
|
|
Rep = LI;
|
2018-07-05 08:52:55 +02:00
|
|
|
} else if (IsX86 && (Name.startswith("fma.vfmadd.") ||
|
|
|
|
Name.startswith("fma.vfmsub.") ||
|
|
|
|
Name.startswith("fma.vfnmadd.") ||
|
|
|
|
Name.startswith("fma.vfnmsub."))) {
|
2018-07-05 04:52:54 +02:00
|
|
|
bool NegMul = Name[6] == 'n';
|
|
|
|
bool NegAcc = NegMul ? Name[8] == 's' : Name[7] == 's';
|
2018-07-05 08:52:55 +02:00
|
|
|
bool IsScalar = NegMul ? Name[12] == 's' : Name[11] == 's';
|
2018-07-05 04:52:54 +02:00
|
|
|
|
|
|
|
Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
|
|
|
|
CI->getArgOperand(2) };
|
|
|
|
|
2018-07-05 08:52:55 +02:00
|
|
|
if (IsScalar) {
|
|
|
|
Ops[0] = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
|
|
|
|
Ops[1] = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
|
|
|
|
Ops[2] = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (NegMul && !IsScalar)
|
2018-07-05 04:52:54 +02:00
|
|
|
Ops[0] = Builder.CreateFNeg(Ops[0]);
|
2018-07-05 08:52:55 +02:00
|
|
|
if (NegMul && IsScalar)
|
|
|
|
Ops[1] = Builder.CreateFNeg(Ops[1]);
|
2018-07-05 04:52:54 +02:00
|
|
|
if (NegAcc)
|
|
|
|
Ops[2] = Builder.CreateFNeg(Ops[2]);
|
|
|
|
|
|
|
|
Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(),
|
|
|
|
Intrinsic::fma,
|
2018-07-05 08:52:55 +02:00
|
|
|
Ops[0]->getType()),
|
|
|
|
Ops);
|
|
|
|
|
|
|
|
if (IsScalar)
|
|
|
|
Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep,
|
|
|
|
(uint64_t)0);
|
2018-07-06 09:14:41 +02:00
|
|
|
} else if (IsX86 && Name.startswith("fma4.vfmadd.s")) {
|
|
|
|
Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
|
|
|
|
CI->getArgOperand(2) };
|
|
|
|
|
|
|
|
Ops[0] = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
|
|
|
|
Ops[1] = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
|
|
|
|
Ops[2] = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
|
|
|
|
|
|
|
|
Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(),
|
|
|
|
Intrinsic::fma,
|
|
|
|
Ops[0]->getType()),
|
|
|
|
Ops);
|
|
|
|
|
|
|
|
Rep = Builder.CreateInsertElement(Constant::getNullValue(CI->getType()),
|
|
|
|
Rep, (uint64_t)0);
|
2018-07-12 02:29:56 +02:00
|
|
|
} else if (IsX86 && (Name.startswith("avx512.mask.vfmadd.s") ||
|
|
|
|
Name.startswith("avx512.maskz.vfmadd.s") ||
|
|
|
|
Name.startswith("avx512.mask3.vfmadd.s") ||
|
|
|
|
Name.startswith("avx512.mask3.vfmsub.s") ||
|
|
|
|
Name.startswith("avx512.mask3.vfnmsub.s"))) {
|
|
|
|
bool IsMask3 = Name[11] == '3';
|
|
|
|
bool IsMaskZ = Name[11] == 'z';
|
|
|
|
// Drop the "avx512.mask." to make it easier.
|
|
|
|
Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12);
|
|
|
|
bool NegMul = Name[2] == 'n';
|
|
|
|
bool NegAcc = NegMul ? Name[4] == 's' : Name[3] == 's';
|
|
|
|
|
|
|
|
Value *A = CI->getArgOperand(0);
|
|
|
|
Value *B = CI->getArgOperand(1);
|
|
|
|
Value *C = CI->getArgOperand(2);
|
|
|
|
|
|
|
|
if (NegMul && (IsMask3 || IsMaskZ))
|
|
|
|
A = Builder.CreateFNeg(A);
|
|
|
|
if (NegMul && !(IsMask3 || IsMaskZ))
|
|
|
|
B = Builder.CreateFNeg(B);
|
|
|
|
if (NegAcc)
|
|
|
|
C = Builder.CreateFNeg(C);
|
|
|
|
|
|
|
|
A = Builder.CreateExtractElement(A, (uint64_t)0);
|
|
|
|
B = Builder.CreateExtractElement(B, (uint64_t)0);
|
|
|
|
C = Builder.CreateExtractElement(C, (uint64_t)0);
|
|
|
|
|
|
|
|
if (!isa<ConstantInt>(CI->getArgOperand(4)) ||
|
|
|
|
cast<ConstantInt>(CI->getArgOperand(4))->getZExtValue() != 4) {
|
|
|
|
Value *Ops[] = { A, B, C, CI->getArgOperand(4) };
|
|
|
|
|
|
|
|
Intrinsic::ID IID;
|
|
|
|
if (Name.back() == 'd')
|
|
|
|
IID = Intrinsic::x86_avx512_vfmadd_f64;
|
|
|
|
else
|
|
|
|
IID = Intrinsic::x86_avx512_vfmadd_f32;
|
|
|
|
Function *FMA = Intrinsic::getDeclaration(CI->getModule(), IID);
|
|
|
|
Rep = Builder.CreateCall(FMA, Ops);
|
|
|
|
} else {
|
|
|
|
Function *FMA = Intrinsic::getDeclaration(CI->getModule(),
|
|
|
|
Intrinsic::fma,
|
|
|
|
A->getType());
|
|
|
|
Rep = Builder.CreateCall(FMA, { A, B, C });
|
|
|
|
}
|
|
|
|
|
|
|
|
Value *PassThru = IsMaskZ ? Constant::getNullValue(Rep->getType()) :
|
|
|
|
IsMask3 ? C : A;
|
|
|
|
|
|
|
|
// For Mask3 with NegAcc, we need to create a new extractelement that
|
|
|
|
// avoids the negation above.
|
|
|
|
if (NegAcc && IsMask3)
|
|
|
|
PassThru = Builder.CreateExtractElement(CI->getArgOperand(2),
|
|
|
|
(uint64_t)0);
|
|
|
|
|
|
|
|
Rep = EmitX86ScalarSelect(Builder, CI->getArgOperand(3),
|
|
|
|
Rep, PassThru);
|
|
|
|
Rep = Builder.CreateInsertElement(CI->getArgOperand(IsMask3 ? 2 : 0),
|
|
|
|
Rep, (uint64_t)0);
|
2018-07-06 05:42:09 +02:00
|
|
|
} else if (IsX86 && (Name.startswith("avx512.mask.vfmadd.p") ||
|
|
|
|
Name.startswith("avx512.mask.vfnmadd.p") ||
|
|
|
|
Name.startswith("avx512.mask.vfnmsub.p") ||
|
|
|
|
Name.startswith("avx512.mask3.vfmadd.p") ||
|
|
|
|
Name.startswith("avx512.mask3.vfmsub.p") ||
|
|
|
|
Name.startswith("avx512.mask3.vfnmsub.p") ||
|
|
|
|
Name.startswith("avx512.maskz.vfmadd.p"))) {
|
|
|
|
bool IsMask3 = Name[11] == '3';
|
|
|
|
bool IsMaskZ = Name[11] == 'z';
|
|
|
|
// Drop the "avx512.mask." to make it easier.
|
|
|
|
Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12);
|
|
|
|
bool NegMul = Name[2] == 'n';
|
|
|
|
bool NegAcc = NegMul ? Name[4] == 's' : Name[3] == 's';
|
|
|
|
|
2018-07-12 02:29:56 +02:00
|
|
|
Value *A = CI->getArgOperand(0);
|
|
|
|
Value *B = CI->getArgOperand(1);
|
|
|
|
Value *C = CI->getArgOperand(2);
|
|
|
|
|
|
|
|
if (NegMul && (IsMask3 || IsMaskZ))
|
|
|
|
A = Builder.CreateFNeg(A);
|
|
|
|
if (NegMul && !(IsMask3 || IsMaskZ))
|
|
|
|
B = Builder.CreateFNeg(B);
|
|
|
|
if (NegAcc)
|
|
|
|
C = Builder.CreateFNeg(C);
|
|
|
|
|
2018-07-06 05:42:09 +02:00
|
|
|
if (CI->getNumArgOperands() == 5 &&
|
|
|
|
(!isa<ConstantInt>(CI->getArgOperand(4)) ||
|
|
|
|
cast<ConstantInt>(CI->getArgOperand(4))->getZExtValue() != 4)) {
|
|
|
|
Intrinsic::ID IID;
|
|
|
|
// Check the character before ".512" in string.
|
|
|
|
if (Name[Name.size()-5] == 's')
|
|
|
|
IID = Intrinsic::x86_avx512_vfmadd_ps_512;
|
|
|
|
else
|
|
|
|
IID = Intrinsic::x86_avx512_vfmadd_pd_512;
|
|
|
|
|
|
|
|
Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
|
2018-07-12 02:29:56 +02:00
|
|
|
{ A, B, C, CI->getArgOperand(4) });
|
2018-07-06 05:42:09 +02:00
|
|
|
} else {
|
|
|
|
Function *FMA = Intrinsic::getDeclaration(CI->getModule(),
|
|
|
|
Intrinsic::fma,
|
2018-07-12 02:29:56 +02:00
|
|
|
A->getType());
|
|
|
|
Rep = Builder.CreateCall(FMA, { A, B, C });
|
2018-07-06 05:42:09 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Value *PassThru = IsMaskZ ? llvm::Constant::getNullValue(CI->getType()) :
|
|
|
|
IsMask3 ? CI->getArgOperand(2) :
|
|
|
|
CI->getArgOperand(0);
|
|
|
|
|
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
|
2018-07-05 20:43:58 +02:00
|
|
|
} else if (IsX86 && (Name.startswith("fma.vfmaddsub.p") ||
|
|
|
|
Name.startswith("fma.vfmsubadd.p"))) {
|
|
|
|
bool IsSubAdd = Name[7] == 's';
|
|
|
|
int NumElts = CI->getType()->getVectorNumElements();
|
|
|
|
|
|
|
|
Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
|
|
|
|
CI->getArgOperand(2) };
|
|
|
|
|
|
|
|
Function *FMA = Intrinsic::getDeclaration(CI->getModule(), Intrinsic::fma,
|
|
|
|
Ops[0]->getType());
|
|
|
|
Value *Odd = Builder.CreateCall(FMA, Ops);
|
|
|
|
Ops[2] = Builder.CreateFNeg(Ops[2]);
|
|
|
|
Value *Even = Builder.CreateCall(FMA, Ops);
|
|
|
|
|
|
|
|
if (IsSubAdd)
|
|
|
|
std::swap(Even, Odd);
|
|
|
|
|
|
|
|
SmallVector<uint32_t, 32> Idxs(NumElts);
|
|
|
|
for (int i = 0; i != NumElts; ++i)
|
|
|
|
Idxs[i] = i + (i % 2) * NumElts;
|
|
|
|
|
|
|
|
Rep = Builder.CreateShuffleVector(Even, Odd, Idxs);
|
2018-07-06 05:42:09 +02:00
|
|
|
} else if (IsX86 && (Name.startswith("avx512.mask.vfmaddsub.p") ||
|
|
|
|
Name.startswith("avx512.mask3.vfmaddsub.p") ||
|
|
|
|
Name.startswith("avx512.maskz.vfmaddsub.p") ||
|
|
|
|
Name.startswith("avx512.mask3.vfmsubadd.p"))) {
|
|
|
|
bool IsMask3 = Name[11] == '3';
|
|
|
|
bool IsMaskZ = Name[11] == 'z';
|
|
|
|
// Drop the "avx512.mask." to make it easier.
|
|
|
|
Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12);
|
|
|
|
bool IsSubAdd = Name[3] == 's';
|
|
|
|
if (CI->getNumArgOperands() == 5 &&
|
|
|
|
(!isa<ConstantInt>(CI->getArgOperand(4)) ||
|
|
|
|
cast<ConstantInt>(CI->getArgOperand(4))->getZExtValue() != 4)) {
|
|
|
|
Intrinsic::ID IID;
|
|
|
|
// Check the character before ".512" in string.
|
|
|
|
if (Name[Name.size()-5] == 's')
|
|
|
|
IID = Intrinsic::x86_avx512_vfmaddsub_ps_512;
|
|
|
|
else
|
|
|
|
IID = Intrinsic::x86_avx512_vfmaddsub_pd_512;
|
|
|
|
|
|
|
|
Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
|
|
|
|
CI->getArgOperand(2), CI->getArgOperand(4) };
|
|
|
|
if (IsSubAdd)
|
|
|
|
Ops[2] = Builder.CreateFNeg(Ops[2]);
|
|
|
|
|
|
|
|
Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
|
|
|
|
{CI->getArgOperand(0), CI->getArgOperand(1),
|
|
|
|
CI->getArgOperand(2), CI->getArgOperand(4)});
|
|
|
|
} else {
|
|
|
|
int NumElts = CI->getType()->getVectorNumElements();
|
|
|
|
|
|
|
|
Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1),
|
|
|
|
CI->getArgOperand(2) };
|
|
|
|
|
|
|
|
Function *FMA = Intrinsic::getDeclaration(CI->getModule(), Intrinsic::fma,
|
|
|
|
Ops[0]->getType());
|
|
|
|
Value *Odd = Builder.CreateCall(FMA, Ops);
|
|
|
|
Ops[2] = Builder.CreateFNeg(Ops[2]);
|
|
|
|
Value *Even = Builder.CreateCall(FMA, Ops);
|
|
|
|
|
|
|
|
if (IsSubAdd)
|
|
|
|
std::swap(Even, Odd);
|
|
|
|
|
|
|
|
SmallVector<uint32_t, 32> Idxs(NumElts);
|
|
|
|
for (int i = 0; i != NumElts; ++i)
|
|
|
|
Idxs[i] = i + (i % 2) * NumElts;
|
|
|
|
|
|
|
|
Rep = Builder.CreateShuffleVector(Even, Odd, Idxs);
|
|
|
|
}
|
|
|
|
|
|
|
|
Value *PassThru = IsMaskZ ? llvm::Constant::getNullValue(CI->getType()) :
|
|
|
|
IsMask3 ? CI->getArgOperand(2) :
|
|
|
|
CI->getArgOperand(0);
|
|
|
|
|
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
|
2018-05-21 22:58:09 +02:00
|
|
|
} else if (IsX86 && (Name.startswith("avx512.mask.pternlog.") ||
|
|
|
|
Name.startswith("avx512.maskz.pternlog."))) {
|
|
|
|
bool ZeroMask = Name[11] == 'z';
|
|
|
|
unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
|
|
|
|
unsigned EltWidth = CI->getType()->getScalarSizeInBits();
|
|
|
|
Intrinsic::ID IID;
|
|
|
|
if (VecWidth == 128 && EltWidth == 32)
|
|
|
|
IID = Intrinsic::x86_avx512_pternlog_d_128;
|
|
|
|
else if (VecWidth == 256 && EltWidth == 32)
|
|
|
|
IID = Intrinsic::x86_avx512_pternlog_d_256;
|
|
|
|
else if (VecWidth == 512 && EltWidth == 32)
|
|
|
|
IID = Intrinsic::x86_avx512_pternlog_d_512;
|
|
|
|
else if (VecWidth == 128 && EltWidth == 64)
|
|
|
|
IID = Intrinsic::x86_avx512_pternlog_q_128;
|
|
|
|
else if (VecWidth == 256 && EltWidth == 64)
|
|
|
|
IID = Intrinsic::x86_avx512_pternlog_q_256;
|
|
|
|
else if (VecWidth == 512 && EltWidth == 64)
|
|
|
|
IID = Intrinsic::x86_avx512_pternlog_q_512;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected intrinsic");
|
|
|
|
|
|
|
|
Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1),
|
|
|
|
CI->getArgOperand(2), CI->getArgOperand(3) };
|
|
|
|
Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
|
|
|
|
Args);
|
|
|
|
Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
|
|
|
|
: CI->getArgOperand(0);
|
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, PassThru);
|
2018-05-26 20:55:19 +02:00
|
|
|
} else if (IsX86 && (Name.startswith("avx512.mask.vpmadd52") ||
|
|
|
|
Name.startswith("avx512.maskz.vpmadd52"))) {
|
|
|
|
bool ZeroMask = Name[11] == 'z';
|
|
|
|
bool High = Name[20] == 'h' || Name[21] == 'h';
|
|
|
|
unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
|
|
|
|
Intrinsic::ID IID;
|
|
|
|
if (VecWidth == 128 && !High)
|
|
|
|
IID = Intrinsic::x86_avx512_vpmadd52l_uq_128;
|
|
|
|
else if (VecWidth == 256 && !High)
|
|
|
|
IID = Intrinsic::x86_avx512_vpmadd52l_uq_256;
|
|
|
|
else if (VecWidth == 512 && !High)
|
|
|
|
IID = Intrinsic::x86_avx512_vpmadd52l_uq_512;
|
|
|
|
else if (VecWidth == 128 && High)
|
|
|
|
IID = Intrinsic::x86_avx512_vpmadd52h_uq_128;
|
|
|
|
else if (VecWidth == 256 && High)
|
|
|
|
IID = Intrinsic::x86_avx512_vpmadd52h_uq_256;
|
|
|
|
else if (VecWidth == 512 && High)
|
|
|
|
IID = Intrinsic::x86_avx512_vpmadd52h_uq_512;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected intrinsic");
|
|
|
|
|
|
|
|
Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1),
|
|
|
|
CI->getArgOperand(2) };
|
|
|
|
Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
|
|
|
|
Args);
|
|
|
|
Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
|
|
|
|
: CI->getArgOperand(0);
|
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
|
2018-05-29 07:22:05 +02:00
|
|
|
} else if (IsX86 && (Name.startswith("avx512.mask.vpermi2var.") ||
|
|
|
|
Name.startswith("avx512.mask.vpermt2var.") ||
|
|
|
|
Name.startswith("avx512.maskz.vpermt2var."))) {
|
|
|
|
bool ZeroMask = Name[11] == 'z';
|
|
|
|
bool IndexForm = Name[17] == 'i';
|
2019-01-07 21:13:45 +01:00
|
|
|
Rep = UpgradeX86VPERMT2Intrinsics(Builder, *CI, ZeroMask, IndexForm);
|
2018-06-04 01:24:17 +02:00
|
|
|
} else if (IsX86 && (Name.startswith("avx512.mask.vpdpbusd.") ||
|
|
|
|
Name.startswith("avx512.maskz.vpdpbusd.") ||
|
|
|
|
Name.startswith("avx512.mask.vpdpbusds.") ||
|
|
|
|
Name.startswith("avx512.maskz.vpdpbusds."))) {
|
|
|
|
bool ZeroMask = Name[11] == 'z';
|
|
|
|
bool IsSaturating = Name[ZeroMask ? 21 : 20] == 's';
|
|
|
|
unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
|
|
|
|
Intrinsic::ID IID;
|
|
|
|
if (VecWidth == 128 && !IsSaturating)
|
|
|
|
IID = Intrinsic::x86_avx512_vpdpbusd_128;
|
|
|
|
else if (VecWidth == 256 && !IsSaturating)
|
|
|
|
IID = Intrinsic::x86_avx512_vpdpbusd_256;
|
|
|
|
else if (VecWidth == 512 && !IsSaturating)
|
|
|
|
IID = Intrinsic::x86_avx512_vpdpbusd_512;
|
|
|
|
else if (VecWidth == 128 && IsSaturating)
|
|
|
|
IID = Intrinsic::x86_avx512_vpdpbusds_128;
|
|
|
|
else if (VecWidth == 256 && IsSaturating)
|
|
|
|
IID = Intrinsic::x86_avx512_vpdpbusds_256;
|
|
|
|
else if (VecWidth == 512 && IsSaturating)
|
|
|
|
IID = Intrinsic::x86_avx512_vpdpbusds_512;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected intrinsic");
|
|
|
|
|
|
|
|
Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1),
|
|
|
|
CI->getArgOperand(2) };
|
|
|
|
Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
|
|
|
|
Args);
|
|
|
|
Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
|
|
|
|
: CI->getArgOperand(0);
|
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
|
|
|
|
} else if (IsX86 && (Name.startswith("avx512.mask.vpdpwssd.") ||
|
|
|
|
Name.startswith("avx512.maskz.vpdpwssd.") ||
|
|
|
|
Name.startswith("avx512.mask.vpdpwssds.") ||
|
|
|
|
Name.startswith("avx512.maskz.vpdpwssds."))) {
|
|
|
|
bool ZeroMask = Name[11] == 'z';
|
|
|
|
bool IsSaturating = Name[ZeroMask ? 21 : 20] == 's';
|
|
|
|
unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
|
|
|
|
Intrinsic::ID IID;
|
|
|
|
if (VecWidth == 128 && !IsSaturating)
|
|
|
|
IID = Intrinsic::x86_avx512_vpdpwssd_128;
|
|
|
|
else if (VecWidth == 256 && !IsSaturating)
|
|
|
|
IID = Intrinsic::x86_avx512_vpdpwssd_256;
|
|
|
|
else if (VecWidth == 512 && !IsSaturating)
|
|
|
|
IID = Intrinsic::x86_avx512_vpdpwssd_512;
|
|
|
|
else if (VecWidth == 128 && IsSaturating)
|
|
|
|
IID = Intrinsic::x86_avx512_vpdpwssds_128;
|
|
|
|
else if (VecWidth == 256 && IsSaturating)
|
|
|
|
IID = Intrinsic::x86_avx512_vpdpwssds_256;
|
|
|
|
else if (VecWidth == 512 && IsSaturating)
|
|
|
|
IID = Intrinsic::x86_avx512_vpdpwssds_512;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected intrinsic");
|
|
|
|
|
|
|
|
Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1),
|
|
|
|
CI->getArgOperand(2) };
|
|
|
|
Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
|
|
|
|
Args);
|
|
|
|
Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
|
|
|
|
: CI->getArgOperand(0);
|
|
|
|
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
|
2018-12-10 07:07:50 +01:00
|
|
|
} else if (IsX86 && (Name == "addcarryx.u32" || Name == "addcarryx.u64" ||
|
|
|
|
Name == "addcarry.u32" || Name == "addcarry.u64" ||
|
|
|
|
Name == "subborrow.u32" || Name == "subborrow.u64")) {
|
|
|
|
Intrinsic::ID IID;
|
|
|
|
if (Name[0] == 'a' && Name.back() == '2')
|
|
|
|
IID = Intrinsic::x86_addcarry_32;
|
|
|
|
else if (Name[0] == 'a' && Name.back() == '4')
|
|
|
|
IID = Intrinsic::x86_addcarry_64;
|
|
|
|
else if (Name[0] == 's' && Name.back() == '2')
|
|
|
|
IID = Intrinsic::x86_subborrow_32;
|
|
|
|
else if (Name[0] == 's' && Name.back() == '4')
|
|
|
|
IID = Intrinsic::x86_subborrow_64;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected intrinsic");
|
|
|
|
|
|
|
|
// Make a call with 3 operands.
|
|
|
|
Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1),
|
|
|
|
CI->getArgOperand(2)};
|
|
|
|
Value *NewCall = Builder.CreateCall(
|
|
|
|
Intrinsic::getDeclaration(CI->getModule(), IID),
|
|
|
|
Args);
|
|
|
|
|
|
|
|
// Extract the second result and store it.
|
|
|
|
Value *Data = Builder.CreateExtractValue(NewCall, 1);
|
|
|
|
// Cast the pointer to the right type.
|
|
|
|
Value *Ptr = Builder.CreateBitCast(CI->getArgOperand(3),
|
|
|
|
llvm::PointerType::getUnqual(Data->getType()));
|
2020-01-23 16:18:34 +01:00
|
|
|
Builder.CreateAlignedStore(Data, Ptr, Align(1));
|
2018-12-10 07:07:50 +01:00
|
|
|
// Replace the original call result with the first result of the new call.
|
|
|
|
Value *CF = Builder.CreateExtractValue(NewCall, 0);
|
|
|
|
|
|
|
|
CI->replaceAllUsesWith(CF);
|
|
|
|
Rep = nullptr;
|
2018-04-09 08:15:09 +02:00
|
|
|
} else if (IsX86 && Name.startswith("avx512.mask.") &&
|
|
|
|
upgradeAVX512MaskToSelect(Name, Builder, *CI, Rep)) {
|
|
|
|
// Rep will be updated by the call in the condition.
|
[NVPTX] Auto-upgrade some NVPTX intrinsics to LLVM target-generic code.
Summary:
Specifically, we upgrade llvm.nvvm.:
* brev{32,64}
* clz.{i,ll}
* popc.{i,ll}
* abs.{i,ll}
* {min,max}.{i,ll,u,ull}
* h2f
These either map directly to an existing LLVM target-generic
intrinsic or map to a simple LLVM target-generic idiom.
In all cases, we check that the code we generate is lowered to PTX as we
expect.
These builtins don't need to be backfilled in clang: They're not
accessible to user code from nvcc.
Reviewers: tra
Subscribers: majnemer, cfe-commits, llvm-commits, jholewinski
Differential Revision: https://reviews.llvm.org/D28793
llvm-svn: 292694
2017-01-21 02:00:32 +01:00
|
|
|
} else if (IsNVVM && (Name == "abs.i" || Name == "abs.ll")) {
|
|
|
|
Value *Arg = CI->getArgOperand(0);
|
|
|
|
Value *Neg = Builder.CreateNeg(Arg, "neg");
|
|
|
|
Value *Cmp = Builder.CreateICmpSGE(
|
|
|
|
Arg, llvm::Constant::getNullValue(Arg->getType()), "abs.cond");
|
|
|
|
Rep = Builder.CreateSelect(Cmp, Arg, Neg, "abs");
|
2019-07-11 19:11:25 +02:00
|
|
|
} else if (IsNVVM && (Name.startswith("atomic.load.add.f32.p") ||
|
|
|
|
Name.startswith("atomic.load.add.f64.p"))) {
|
|
|
|
Value *Ptr = CI->getArgOperand(0);
|
|
|
|
Value *Val = CI->getArgOperand(1);
|
|
|
|
Rep = Builder.CreateAtomicRMW(AtomicRMWInst::FAdd, Ptr, Val,
|
|
|
|
AtomicOrdering::SequentiallyConsistent);
|
[NVPTX] Auto-upgrade some NVPTX intrinsics to LLVM target-generic code.
Summary:
Specifically, we upgrade llvm.nvvm.:
* brev{32,64}
* clz.{i,ll}
* popc.{i,ll}
* abs.{i,ll}
* {min,max}.{i,ll,u,ull}
* h2f
These either map directly to an existing LLVM target-generic
intrinsic or map to a simple LLVM target-generic idiom.
In all cases, we check that the code we generate is lowered to PTX as we
expect.
These builtins don't need to be backfilled in clang: They're not
accessible to user code from nvcc.
Reviewers: tra
Subscribers: majnemer, cfe-commits, llvm-commits, jholewinski
Differential Revision: https://reviews.llvm.org/D28793
llvm-svn: 292694
2017-01-21 02:00:32 +01:00
|
|
|
} else if (IsNVVM && (Name == "max.i" || Name == "max.ll" ||
|
|
|
|
Name == "max.ui" || Name == "max.ull")) {
|
|
|
|
Value *Arg0 = CI->getArgOperand(0);
|
|
|
|
Value *Arg1 = CI->getArgOperand(1);
|
|
|
|
Value *Cmp = Name.endswith(".ui") || Name.endswith(".ull")
|
|
|
|
? Builder.CreateICmpUGE(Arg0, Arg1, "max.cond")
|
|
|
|
: Builder.CreateICmpSGE(Arg0, Arg1, "max.cond");
|
|
|
|
Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "max");
|
|
|
|
} else if (IsNVVM && (Name == "min.i" || Name == "min.ll" ||
|
|
|
|
Name == "min.ui" || Name == "min.ull")) {
|
|
|
|
Value *Arg0 = CI->getArgOperand(0);
|
|
|
|
Value *Arg1 = CI->getArgOperand(1);
|
|
|
|
Value *Cmp = Name.endswith(".ui") || Name.endswith(".ull")
|
|
|
|
? Builder.CreateICmpULE(Arg0, Arg1, "min.cond")
|
|
|
|
: Builder.CreateICmpSLE(Arg0, Arg1, "min.cond");
|
|
|
|
Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "min");
|
|
|
|
} else if (IsNVVM && Name == "clz.ll") {
|
|
|
|
// llvm.nvvm.clz.ll returns an i32, but llvm.ctlz.i64 and returns an i64.
|
|
|
|
Value *Arg = CI->getArgOperand(0);
|
|
|
|
Value *Ctlz = Builder.CreateCall(
|
|
|
|
Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz,
|
|
|
|
{Arg->getType()}),
|
|
|
|
{Arg, Builder.getFalse()}, "ctlz");
|
|
|
|
Rep = Builder.CreateTrunc(Ctlz, Builder.getInt32Ty(), "ctlz.trunc");
|
|
|
|
} else if (IsNVVM && Name == "popc.ll") {
|
|
|
|
// llvm.nvvm.popc.ll returns an i32, but llvm.ctpop.i64 and returns an
|
|
|
|
// i64.
|
|
|
|
Value *Arg = CI->getArgOperand(0);
|
|
|
|
Value *Popc = Builder.CreateCall(
|
|
|
|
Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop,
|
|
|
|
{Arg->getType()}),
|
|
|
|
Arg, "ctpop");
|
|
|
|
Rep = Builder.CreateTrunc(Popc, Builder.getInt32Ty(), "ctpop.trunc");
|
|
|
|
} else if (IsNVVM && Name == "h2f") {
|
|
|
|
Rep = Builder.CreateCall(Intrinsic::getDeclaration(
|
|
|
|
F->getParent(), Intrinsic::convert_from_fp16,
|
|
|
|
{Builder.getFloatTy()}),
|
|
|
|
CI->getArgOperand(0), "h2f");
|
2012-02-03 07:10:55 +01:00
|
|
|
} else {
|
2016-06-12 05:10:47 +02:00
|
|
|
llvm_unreachable("Unknown function for CallInst upgrade.");
|
2012-02-03 07:10:55 +01:00
|
|
|
}
|
|
|
|
|
2016-04-08 23:26:31 +02:00
|
|
|
if (Rep)
|
|
|
|
CI->replaceAllUsesWith(Rep);
|
2012-02-03 07:10:55 +01:00
|
|
|
CI->eraseFromParent();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
Remove alignment argument from memcpy/memmove/memset in favour of alignment attributes (Step 1)
Summary:
This is a resurrection of work first proposed and discussed in Aug 2015:
http://lists.llvm.org/pipermail/llvm-dev/2015-August/089384.html
and initially landed (but then backed out) in Nov 2015:
http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
The @llvm.memcpy/memmove/memset intrinsics currently have an explicit argument
which is required to be a constant integer. It represents the alignment of the
dest (and source), and so must be the minimum of the actual alignment of the
two.
This change is the first in a series that allows source and dest to each
have their own alignments by using the alignment attribute on their arguments.
In this change we:
1) Remove the alignment argument.
2) Add alignment attributes to the source & dest arguments. We, temporarily,
require that the alignments for source & dest be equal.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 100, i32 4, i1 false)
will now read
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 100, i1 false)
Downstream users may have to update their lit tests that check for
@llvm.memcpy/memmove/memset call/declaration patterns. The following extended sed script
may help with updating the majority of your tests, but it does not catch all possible
patterns so some manual checking and updating will be required.
s~declare void @llvm\.mem(set|cpy|move)\.p([^(]*)\((.*), i32, i1\)~declare void @llvm.mem\1.p\2(\3, i1)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* \3, i8 \4, i8 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* \3, i8 \4, i16 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* \3, i8 \4, i32 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* \3, i8 \4, i64 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* \3, i8 \4, i128 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* align \6 \3, i8 \4, i8 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* align \6 \3, i8 \4, i16 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* align \6 \3, i8 \4, i32 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* align \6 \3, i8 \4, i64 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* align \6 \3, i8 \4, i128 \5, i1 \7)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* \4, i8\5* \6, i8 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* \4, i8\5* \6, i16 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* \4, i8\5* \6, i32 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* \4, i8\5* \6, i64 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* \4, i8\5* \6, i128 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* align \8 \4, i8\5* align \8 \6, i8 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* align \8 \4, i8\5* align \8 \6, i16 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* align \8 \4, i8\5* align \8 \6, i32 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* align \8 \4, i8\5* align \8 \6, i64 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* align \8 \4, i8\5* align \8 \6, i128 \7, i1 \9)~g
The remaining changes in the series will:
Step 2) Expand the IRBuilder API to allow creation of memcpy/memmove with differing
source and dest alignments.
Step 3) Update Clang to use the new IRBuilder API.
Step 4) Update Polly to use the new IRBuilder API.
Step 5) Update LLVM passes that create memcpy/memmove calls to use the new IRBuilder API,
and those that use use MemIntrinsicInst::[get|set]Alignment() to use
getDestAlignment() and getSourceAlignment() instead.
Step 6) Remove the single-alignment IRBuilder API for memcpy/memmove, and the
MemIntrinsicInst::[get|set]Alignment() methods.
Reviewers: pete, hfinkel, lhames, reames, bollu
Reviewed By: reames
Subscribers: niosHD, reames, jholewinski, qcolombet, jfb, sanjoy, arsenm, dschuff, dylanmckay, mehdi_amini, sdardis, nemanjai, david2050, nhaehnle, javed.absar, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, asb, rbar, johnrusso, simoncook, jordy.potman.lists, apazos, sabuasal, llvm-commits
Differential Revision: https://reviews.llvm.org/D41675
llvm-svn: 322965
2018-01-19 18:13:12 +01:00
|
|
|
const auto &DefaultCase = [&NewFn, &CI]() -> void {
|
2017-02-16 00:16:20 +01:00
|
|
|
// Handle generic mangling change, but nothing else
|
|
|
|
assert(
|
|
|
|
(CI->getCalledFunction()->getName() != NewFn->getName()) &&
|
|
|
|
"Unknown function for CallInst upgrade and isn't just a name change");
|
2017-03-01 02:49:13 +01:00
|
|
|
CI->setCalledFunction(NewFn);
|
Remove alignment argument from memcpy/memmove/memset in favour of alignment attributes (Step 1)
Summary:
This is a resurrection of work first proposed and discussed in Aug 2015:
http://lists.llvm.org/pipermail/llvm-dev/2015-August/089384.html
and initially landed (but then backed out) in Nov 2015:
http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
The @llvm.memcpy/memmove/memset intrinsics currently have an explicit argument
which is required to be a constant integer. It represents the alignment of the
dest (and source), and so must be the minimum of the actual alignment of the
two.
This change is the first in a series that allows source and dest to each
have their own alignments by using the alignment attribute on their arguments.
In this change we:
1) Remove the alignment argument.
2) Add alignment attributes to the source & dest arguments. We, temporarily,
require that the alignments for source & dest be equal.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 100, i32 4, i1 false)
will now read
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 100, i1 false)
Downstream users may have to update their lit tests that check for
@llvm.memcpy/memmove/memset call/declaration patterns. The following extended sed script
may help with updating the majority of your tests, but it does not catch all possible
patterns so some manual checking and updating will be required.
s~declare void @llvm\.mem(set|cpy|move)\.p([^(]*)\((.*), i32, i1\)~declare void @llvm.mem\1.p\2(\3, i1)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* \3, i8 \4, i8 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* \3, i8 \4, i16 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* \3, i8 \4, i32 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* \3, i8 \4, i64 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* \3, i8 \4, i128 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* align \6 \3, i8 \4, i8 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* align \6 \3, i8 \4, i16 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* align \6 \3, i8 \4, i32 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* align \6 \3, i8 \4, i64 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* align \6 \3, i8 \4, i128 \5, i1 \7)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* \4, i8\5* \6, i8 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* \4, i8\5* \6, i16 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* \4, i8\5* \6, i32 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* \4, i8\5* \6, i64 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* \4, i8\5* \6, i128 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* align \8 \4, i8\5* align \8 \6, i8 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* align \8 \4, i8\5* align \8 \6, i16 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* align \8 \4, i8\5* align \8 \6, i32 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* align \8 \4, i8\5* align \8 \6, i64 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* align \8 \4, i8\5* align \8 \6, i128 \7, i1 \9)~g
The remaining changes in the series will:
Step 2) Expand the IRBuilder API to allow creation of memcpy/memmove with differing
source and dest alignments.
Step 3) Update Clang to use the new IRBuilder API.
Step 4) Update Polly to use the new IRBuilder API.
Step 5) Update LLVM passes that create memcpy/memmove calls to use the new IRBuilder API,
and those that use use MemIntrinsicInst::[get|set]Alignment() to use
getDestAlignment() and getSourceAlignment() instead.
Step 6) Remove the single-alignment IRBuilder API for memcpy/memmove, and the
MemIntrinsicInst::[get|set]Alignment() methods.
Reviewers: pete, hfinkel, lhames, reames, bollu
Reviewed By: reames
Subscribers: niosHD, reames, jholewinski, qcolombet, jfb, sanjoy, arsenm, dschuff, dylanmckay, mehdi_amini, sdardis, nemanjai, david2050, nhaehnle, javed.absar, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, asb, rbar, johnrusso, simoncook, jordy.potman.lists, apazos, sabuasal, llvm-commits
Differential Revision: https://reviews.llvm.org/D41675
llvm-svn: 322965
2018-01-19 18:13:12 +01:00
|
|
|
};
|
|
|
|
CallInst *NewCall = nullptr;
|
|
|
|
switch (NewFn->getIntrinsicID()) {
|
|
|
|
default: {
|
|
|
|
DefaultCase();
|
2017-02-16 00:16:20 +01:00
|
|
|
return;
|
|
|
|
}
|
2019-06-11 10:22:10 +02:00
|
|
|
case Intrinsic::experimental_vector_reduce_v2_fmul: {
|
|
|
|
SmallVector<Value *, 2> Args;
|
|
|
|
if (CI->isFast())
|
|
|
|
Args.push_back(ConstantFP::get(CI->getOperand(0)->getType(), 1.0));
|
|
|
|
else
|
|
|
|
Args.push_back(CI->getOperand(0));
|
|
|
|
Args.push_back(CI->getOperand(1));
|
|
|
|
NewCall = Builder.CreateCall(NewFn, Args);
|
|
|
|
cast<Instruction>(NewCall)->copyFastMathFlags(CI);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Intrinsic::experimental_vector_reduce_v2_fadd: {
|
|
|
|
SmallVector<Value *, 2> Args;
|
|
|
|
if (CI->isFast())
|
|
|
|
Args.push_back(Constant::getNullValue(CI->getOperand(0)->getType()));
|
|
|
|
else
|
|
|
|
Args.push_back(CI->getOperand(0));
|
|
|
|
Args.push_back(CI->getOperand(1));
|
|
|
|
NewCall = Builder.CreateCall(NewFn, Args);
|
|
|
|
cast<Instruction>(NewCall)->copyFastMathFlags(CI);
|
|
|
|
break;
|
|
|
|
}
|
2015-09-30 12:56:37 +02:00
|
|
|
case Intrinsic::arm_neon_vld1:
|
|
|
|
case Intrinsic::arm_neon_vld2:
|
|
|
|
case Intrinsic::arm_neon_vld3:
|
|
|
|
case Intrinsic::arm_neon_vld4:
|
|
|
|
case Intrinsic::arm_neon_vld2lane:
|
|
|
|
case Intrinsic::arm_neon_vld3lane:
|
|
|
|
case Intrinsic::arm_neon_vld4lane:
|
|
|
|
case Intrinsic::arm_neon_vst1:
|
|
|
|
case Intrinsic::arm_neon_vst2:
|
|
|
|
case Intrinsic::arm_neon_vst3:
|
|
|
|
case Intrinsic::arm_neon_vst4:
|
|
|
|
case Intrinsic::arm_neon_vst2lane:
|
|
|
|
case Intrinsic::arm_neon_vst3lane:
|
|
|
|
case Intrinsic::arm_neon_vst4lane: {
|
|
|
|
SmallVector<Value *, 4> Args(CI->arg_operands().begin(),
|
|
|
|
CI->arg_operands().end());
|
2017-03-01 02:49:13 +01:00
|
|
|
NewCall = Builder.CreateCall(NewFn, Args);
|
|
|
|
break;
|
2015-09-30 12:56:37 +02:00
|
|
|
}
|
|
|
|
|
2017-01-10 18:20:33 +01:00
|
|
|
case Intrinsic::bitreverse:
|
2017-03-01 02:49:13 +01:00
|
|
|
NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)});
|
|
|
|
break;
|
2017-01-10 18:20:33 +01:00
|
|
|
|
2011-12-12 05:26:04 +01:00
|
|
|
case Intrinsic::ctlz:
|
2012-05-22 17:25:31 +02:00
|
|
|
case Intrinsic::cttz:
|
2011-12-12 05:26:04 +01:00
|
|
|
assert(CI->getNumArgOperands() == 1 &&
|
|
|
|
"Mismatch between function args and call args");
|
2017-03-01 02:49:13 +01:00
|
|
|
NewCall =
|
|
|
|
Builder.CreateCall(NewFn, {CI->getArgOperand(0), Builder.getFalse()});
|
|
|
|
break;
|
2012-06-10 20:42:51 +02:00
|
|
|
|
2017-03-21 21:08:59 +01:00
|
|
|
case Intrinsic::objectsize: {
|
|
|
|
Value *NullIsUnknownSize = CI->getNumArgOperands() == 2
|
|
|
|
? Builder.getFalse()
|
|
|
|
: CI->getArgOperand(2);
|
2019-01-30 21:34:35 +01:00
|
|
|
Value *Dynamic =
|
2019-02-12 22:55:38 +01:00
|
|
|
CI->getNumArgOperands() < 4 ? Builder.getFalse() : CI->getArgOperand(3);
|
2017-03-21 21:08:59 +01:00
|
|
|
NewCall = Builder.CreateCall(
|
2019-01-30 21:34:35 +01:00
|
|
|
NewFn, {CI->getArgOperand(0), CI->getArgOperand(1), NullIsUnknownSize, Dynamic});
|
2017-03-01 02:49:13 +01:00
|
|
|
break;
|
2017-03-21 21:08:59 +01:00
|
|
|
}
|
2013-10-07 20:06:48 +02:00
|
|
|
|
[NVPTX] Auto-upgrade some NVPTX intrinsics to LLVM target-generic code.
Summary:
Specifically, we upgrade llvm.nvvm.:
* brev{32,64}
* clz.{i,ll}
* popc.{i,ll}
* abs.{i,ll}
* {min,max}.{i,ll,u,ull}
* h2f
These either map directly to an existing LLVM target-generic
intrinsic or map to a simple LLVM target-generic idiom.
In all cases, we check that the code we generate is lowered to PTX as we
expect.
These builtins don't need to be backfilled in clang: They're not
accessible to user code from nvcc.
Reviewers: tra
Subscribers: majnemer, cfe-commits, llvm-commits, jholewinski
Differential Revision: https://reviews.llvm.org/D28793
llvm-svn: 292694
2017-01-21 02:00:32 +01:00
|
|
|
case Intrinsic::ctpop:
|
2017-03-01 02:49:13 +01:00
|
|
|
NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)});
|
|
|
|
break;
|
[NVPTX] Auto-upgrade some NVPTX intrinsics to LLVM target-generic code.
Summary:
Specifically, we upgrade llvm.nvvm.:
* brev{32,64}
* clz.{i,ll}
* popc.{i,ll}
* abs.{i,ll}
* {min,max}.{i,ll,u,ull}
* h2f
These either map directly to an existing LLVM target-generic
intrinsic or map to a simple LLVM target-generic idiom.
In all cases, we check that the code we generate is lowered to PTX as we
expect.
These builtins don't need to be backfilled in clang: They're not
accessible to user code from nvcc.
Reviewers: tra
Subscribers: majnemer, cfe-commits, llvm-commits, jholewinski
Differential Revision: https://reviews.llvm.org/D28793
llvm-svn: 292694
2017-01-21 02:00:32 +01:00
|
|
|
|
|
|
|
case Intrinsic::convert_from_fp16:
|
2017-03-01 02:49:13 +01:00
|
|
|
NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)});
|
|
|
|
break;
|
2012-07-14 01:25:25 +02:00
|
|
|
|
2017-07-28 22:21:02 +02:00
|
|
|
case Intrinsic::dbg_value:
|
|
|
|
// Upgrade from the old version that had an extra offset argument.
|
|
|
|
assert(CI->getNumArgOperands() == 4);
|
|
|
|
// Drop nonzero offsets instead of attempting to upgrade them.
|
|
|
|
if (auto *Offset = dyn_cast_or_null<Constant>(CI->getArgOperand(1)))
|
|
|
|
if (Offset->isZeroValue()) {
|
|
|
|
NewCall = Builder.CreateCall(
|
|
|
|
NewFn,
|
|
|
|
{CI->getArgOperand(0), CI->getArgOperand(2), CI->getArgOperand(3)});
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
CI->eraseFromParent();
|
|
|
|
return;
|
|
|
|
|
2012-06-13 09:18:53 +02:00
|
|
|
case Intrinsic::x86_xop_vfrcz_ss:
|
|
|
|
case Intrinsic::x86_xop_vfrcz_sd:
|
2017-03-01 02:49:13 +01:00
|
|
|
NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(1)});
|
|
|
|
break;
|
2012-06-13 09:18:53 +02:00
|
|
|
|
2016-06-03 10:06:03 +02:00
|
|
|
case Intrinsic::x86_xop_vpermil2pd:
|
|
|
|
case Intrinsic::x86_xop_vpermil2ps:
|
|
|
|
case Intrinsic::x86_xop_vpermil2pd_256:
|
|
|
|
case Intrinsic::x86_xop_vpermil2ps_256: {
|
|
|
|
SmallVector<Value *, 4> Args(CI->arg_operands().begin(),
|
|
|
|
CI->arg_operands().end());
|
|
|
|
VectorType *FltIdxTy = cast<VectorType>(Args[2]->getType());
|
|
|
|
VectorType *IntIdxTy = VectorType::getInteger(FltIdxTy);
|
|
|
|
Args[2] = Builder.CreateBitCast(Args[2], IntIdxTy);
|
2017-03-01 02:49:13 +01:00
|
|
|
NewCall = Builder.CreateCall(NewFn, Args);
|
|
|
|
break;
|
2016-06-03 10:06:03 +02:00
|
|
|
}
|
|
|
|
|
2012-06-10 20:42:51 +02:00
|
|
|
case Intrinsic::x86_sse41_ptestc:
|
|
|
|
case Intrinsic::x86_sse41_ptestz:
|
2012-06-13 09:18:53 +02:00
|
|
|
case Intrinsic::x86_sse41_ptestnzc: {
|
2012-06-10 20:42:51 +02:00
|
|
|
// The arguments for these intrinsics used to be v4f32, and changed
|
|
|
|
// to v2i64. This is purely a nop, since those are bitwise intrinsics.
|
|
|
|
// So, the only thing required is a bitcast for both arguments.
|
|
|
|
// First, check the arguments have the old type.
|
|
|
|
Value *Arg0 = CI->getArgOperand(0);
|
|
|
|
if (Arg0->getType() != VectorType::get(Type::getFloatTy(C), 4))
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Old intrinsic, add bitcasts
|
|
|
|
Value *Arg1 = CI->getArgOperand(1);
|
|
|
|
|
2015-04-24 23:16:07 +02:00
|
|
|
Type *NewVecTy = VectorType::get(Type::getInt64Ty(C), 2);
|
|
|
|
|
|
|
|
Value *BC0 = Builder.CreateBitCast(Arg0, NewVecTy, "cast");
|
|
|
|
Value *BC1 = Builder.CreateBitCast(Arg1, NewVecTy, "cast");
|
|
|
|
|
2017-03-01 02:49:13 +01:00
|
|
|
NewCall = Builder.CreateCall(NewFn, {BC0, BC1});
|
|
|
|
break;
|
[x86] Fix a pretty horrible bug and inconsistency in the x86 asm
parsing (and latent bug in the instruction definitions).
This is effectively a revert of r136287 which tried to address
a specific and narrow case of immediate operands failing to be accepted
by x86 instructions with a pretty heavy hammer: it introduced a new kind
of operand that behaved differently. All of that is removed with this
commit, but the test cases are both preserved and enhanced.
The core problem that r136287 and this commit are trying to handle is
that gas accepts both of the following instructions:
insertps $192, %xmm0, %xmm1
insertps $-64, %xmm0, %xmm1
These will encode to the same byte sequence, with the immediate
occupying an 8-bit entry. The first form was fixed by r136287 but that
broke the prior handling of the second form! =[ Ironically, we would
still emit the second form in some cases and then be unable to
re-assemble the output.
The reason why the first instruction failed to be handled is because
prior to r136287 the operands ere marked 'i32i8imm' which forces them to
be sign-extenable. Clearly, that won't work for 192 in a single byte.
However, making thim zero-extended or "unsigned" doesn't really address
the core issue either because it breaks negative immediates. The correct
fix is to make these operands 'i8imm' reflecting that they can be either
signed or unsigned but must be 8-bit immediates. This patch backs out
r136287 and then changes those places as well as some others to use
'i8imm' rather than one of the extended variants.
Naturally, this broke something else. The custom DAG nodes had to be
updated to have a much more accurate type constraint of an i8 node, and
a bunch of Pat immediates needed to be specified as i8 values.
The fallout didn't end there though. We also then ceased to be able to
match the instruction-specific intrinsics to the instructions so
modified. Digging, this is because they too used i32 rather than i8 in
their signature. So I've also switched those intrinsics to i8 arguments
in line with the instructions.
In order to make the intrinsic adjustments of course, I also had to add
auto upgrading for the intrinsics.
I suspect that the intrinsic argument types may have led everything down
this rabbit hole. Pretty happy with the result.
llvm-svn: 217310
2014-09-06 12:00:01 +02:00
|
|
|
}
|
|
|
|
|
2018-09-07 21:14:15 +02:00
|
|
|
case Intrinsic::x86_rdtscp: {
|
|
|
|
// This used to take 1 arguments. If we have no arguments, it is already
|
|
|
|
// upgraded.
|
|
|
|
if (CI->getNumOperands() == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
NewCall = Builder.CreateCall(NewFn);
|
|
|
|
// Extract the second result and store it.
|
|
|
|
Value *Data = Builder.CreateExtractValue(NewCall, 1);
|
|
|
|
// Cast the pointer to the right type.
|
|
|
|
Value *Ptr = Builder.CreateBitCast(CI->getArgOperand(0),
|
|
|
|
llvm::PointerType::getUnqual(Data->getType()));
|
2020-01-23 16:18:34 +01:00
|
|
|
Builder.CreateAlignedStore(Data, Ptr, Align(1));
|
2018-09-07 21:14:15 +02:00
|
|
|
// Replace the original call result with the first result of the new call.
|
|
|
|
Value *TSC = Builder.CreateExtractValue(NewCall, 0);
|
|
|
|
|
|
|
|
std::string Name = CI->getName();
|
|
|
|
if (!Name.empty()) {
|
|
|
|
CI->setName(Name + ".old");
|
|
|
|
NewCall->setName(Name);
|
|
|
|
}
|
|
|
|
CI->replaceAllUsesWith(TSC);
|
|
|
|
CI->eraseFromParent();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
[x86] Fix a pretty horrible bug and inconsistency in the x86 asm
parsing (and latent bug in the instruction definitions).
This is effectively a revert of r136287 which tried to address
a specific and narrow case of immediate operands failing to be accepted
by x86 instructions with a pretty heavy hammer: it introduced a new kind
of operand that behaved differently. All of that is removed with this
commit, but the test cases are both preserved and enhanced.
The core problem that r136287 and this commit are trying to handle is
that gas accepts both of the following instructions:
insertps $192, %xmm0, %xmm1
insertps $-64, %xmm0, %xmm1
These will encode to the same byte sequence, with the immediate
occupying an 8-bit entry. The first form was fixed by r136287 but that
broke the prior handling of the second form! =[ Ironically, we would
still emit the second form in some cases and then be unable to
re-assemble the output.
The reason why the first instruction failed to be handled is because
prior to r136287 the operands ere marked 'i32i8imm' which forces them to
be sign-extenable. Clearly, that won't work for 192 in a single byte.
However, making thim zero-extended or "unsigned" doesn't really address
the core issue either because it breaks negative immediates. The correct
fix is to make these operands 'i8imm' reflecting that they can be either
signed or unsigned but must be 8-bit immediates. This patch backs out
r136287 and then changes those places as well as some others to use
'i8imm' rather than one of the extended variants.
Naturally, this broke something else. The custom DAG nodes had to be
updated to have a much more accurate type constraint of an i8 node, and
a bunch of Pat immediates needed to be specified as i8 values.
The fallout didn't end there though. We also then ceased to be able to
match the instruction-specific intrinsics to the instructions so
modified. Digging, this is because they too used i32 rather than i8 in
their signature. So I've also switched those intrinsics to i8 arguments
in line with the instructions.
In order to make the intrinsic adjustments of course, I also had to add
auto upgrading for the intrinsics.
I suspect that the intrinsic argument types may have led everything down
this rabbit hole. Pretty happy with the result.
llvm-svn: 217310
2014-09-06 12:00:01 +02:00
|
|
|
case Intrinsic::x86_sse41_insertps:
|
|
|
|
case Intrinsic::x86_sse41_dppd:
|
|
|
|
case Intrinsic::x86_sse41_dpps:
|
|
|
|
case Intrinsic::x86_sse41_mpsadbw:
|
|
|
|
case Intrinsic::x86_avx_dp_ps_256:
|
|
|
|
case Intrinsic::x86_avx2_mpsadbw: {
|
|
|
|
// Need to truncate the last argument from i32 to i8 -- this argument models
|
|
|
|
// an inherently 8-bit immediate operand to these x86 instructions.
|
|
|
|
SmallVector<Value *, 4> Args(CI->arg_operands().begin(),
|
|
|
|
CI->arg_operands().end());
|
|
|
|
|
|
|
|
// Replace the last argument with a trunc.
|
|
|
|
Args.back() = Builder.CreateTrunc(Args.back(), Type::getInt8Ty(C), "trunc");
|
2017-03-01 02:49:13 +01:00
|
|
|
NewCall = Builder.CreateCall(NewFn, Args);
|
|
|
|
break;
|
2007-12-17 23:33:23 +01:00
|
|
|
}
|
2016-04-19 22:51:05 +02:00
|
|
|
|
|
|
|
case Intrinsic::thread_pointer: {
|
2017-03-01 02:49:13 +01:00
|
|
|
NewCall = Builder.CreateCall(NewFn, {});
|
|
|
|
break;
|
2016-04-19 22:51:05 +02:00
|
|
|
}
|
2016-06-28 20:27:25 +02:00
|
|
|
|
2016-08-14 01:31:24 +02:00
|
|
|
case Intrinsic::invariant_start:
|
|
|
|
case Intrinsic::invariant_end:
|
2016-06-28 20:27:25 +02:00
|
|
|
case Intrinsic::masked_load:
|
2017-05-03 14:28:54 +02:00
|
|
|
case Intrinsic::masked_store:
|
|
|
|
case Intrinsic::masked_gather:
|
|
|
|
case Intrinsic::masked_scatter: {
|
2016-06-28 20:27:25 +02:00
|
|
|
SmallVector<Value *, 4> Args(CI->arg_operands().begin(),
|
|
|
|
CI->arg_operands().end());
|
2017-03-01 02:49:13 +01:00
|
|
|
NewCall = Builder.CreateCall(NewFn, Args);
|
|
|
|
break;
|
|
|
|
}
|
Remove alignment argument from memcpy/memmove/memset in favour of alignment attributes (Step 1)
Summary:
This is a resurrection of work first proposed and discussed in Aug 2015:
http://lists.llvm.org/pipermail/llvm-dev/2015-August/089384.html
and initially landed (but then backed out) in Nov 2015:
http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
The @llvm.memcpy/memmove/memset intrinsics currently have an explicit argument
which is required to be a constant integer. It represents the alignment of the
dest (and source), and so must be the minimum of the actual alignment of the
two.
This change is the first in a series that allows source and dest to each
have their own alignments by using the alignment attribute on their arguments.
In this change we:
1) Remove the alignment argument.
2) Add alignment attributes to the source & dest arguments. We, temporarily,
require that the alignments for source & dest be equal.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 100, i32 4, i1 false)
will now read
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 100, i1 false)
Downstream users may have to update their lit tests that check for
@llvm.memcpy/memmove/memset call/declaration patterns. The following extended sed script
may help with updating the majority of your tests, but it does not catch all possible
patterns so some manual checking and updating will be required.
s~declare void @llvm\.mem(set|cpy|move)\.p([^(]*)\((.*), i32, i1\)~declare void @llvm.mem\1.p\2(\3, i1)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* \3, i8 \4, i8 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* \3, i8 \4, i16 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* \3, i8 \4, i32 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* \3, i8 \4, i64 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* \3, i8 \4, i128 \5, i1 \6)~g
s~call void @llvm\.memset\.p([^(]*)i8\(i8([^*]*)\* (.*), i8 (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i8(i8\2* align \6 \3, i8 \4, i8 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i16\(i8([^*]*)\* (.*), i8 (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i16(i8\2* align \6 \3, i8 \4, i16 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i32\(i8([^*]*)\* (.*), i8 (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i32(i8\2* align \6 \3, i8 \4, i32 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i64\(i8([^*]*)\* (.*), i8 (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i64(i8\2* align \6 \3, i8 \4, i64 \5, i1 \7)~g
s~call void @llvm\.memset\.p([^(]*)i128\(i8([^*]*)\* (.*), i8 (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.memset.p\1i128(i8\2* align \6 \3, i8 \4, i128 \5, i1 \7)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* \4, i8\5* \6, i8 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* \4, i8\5* \6, i16 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* \4, i8\5* \6, i32 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* \4, i8\5* \6, i64 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 [01], i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* \4, i8\5* \6, i128 \7, i1 \8)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i8\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i8 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i8(i8\3* align \8 \4, i8\5* align \8 \6, i8 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i16\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i16 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i16(i8\3* align \8 \4, i8\5* align \8 \6, i16 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i32\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i32 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i32(i8\3* align \8 \4, i8\5* align \8 \6, i32 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i64\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i64 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i64(i8\3* align \8 \4, i8\5* align \8 \6, i64 \7, i1 \9)~g
s~call void @llvm\.mem(cpy|move)\.p([^(]*)i128\(i8([^*]*)\* (.*), i8([^*]*)\* (.*), i128 (.*), i32 ([0-9]*), i1 ([^)]*)\)~call void @llvm.mem\1.p\2i128(i8\3* align \8 \4, i8\5* align \8 \6, i128 \7, i1 \9)~g
The remaining changes in the series will:
Step 2) Expand the IRBuilder API to allow creation of memcpy/memmove with differing
source and dest alignments.
Step 3) Update Clang to use the new IRBuilder API.
Step 4) Update Polly to use the new IRBuilder API.
Step 5) Update LLVM passes that create memcpy/memmove calls to use the new IRBuilder API,
and those that use use MemIntrinsicInst::[get|set]Alignment() to use
getDestAlignment() and getSourceAlignment() instead.
Step 6) Remove the single-alignment IRBuilder API for memcpy/memmove, and the
MemIntrinsicInst::[get|set]Alignment() methods.
Reviewers: pete, hfinkel, lhames, reames, bollu
Reviewed By: reames
Subscribers: niosHD, reames, jholewinski, qcolombet, jfb, sanjoy, arsenm, dschuff, dylanmckay, mehdi_amini, sdardis, nemanjai, david2050, nhaehnle, javed.absar, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, asb, rbar, johnrusso, simoncook, jordy.potman.lists, apazos, sabuasal, llvm-commits
Differential Revision: https://reviews.llvm.org/D41675
llvm-svn: 322965
2018-01-19 18:13:12 +01:00
|
|
|
|
|
|
|
case Intrinsic::memcpy:
|
|
|
|
case Intrinsic::memmove:
|
|
|
|
case Intrinsic::memset: {
|
|
|
|
// We have to make sure that the call signature is what we're expecting.
|
|
|
|
// We only want to change the old signatures by removing the alignment arg:
|
|
|
|
// @llvm.mem[cpy|move]...(i8*, i8*, i[32|i64], i32, i1)
|
|
|
|
// -> @llvm.mem[cpy|move]...(i8*, i8*, i[32|i64], i1)
|
|
|
|
// @llvm.memset...(i8*, i8, i[32|64], i32, i1)
|
|
|
|
// -> @llvm.memset...(i8*, i8, i[32|64], i1)
|
|
|
|
// Note: i8*'s in the above can be any pointer type
|
|
|
|
if (CI->getNumArgOperands() != 5) {
|
|
|
|
DefaultCase();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
// Remove alignment argument (3), and add alignment attributes to the
|
|
|
|
// dest/src pointers.
|
|
|
|
Value *Args[4] = {CI->getArgOperand(0), CI->getArgOperand(1),
|
|
|
|
CI->getArgOperand(2), CI->getArgOperand(4)};
|
|
|
|
NewCall = Builder.CreateCall(NewFn, Args);
|
|
|
|
auto *MemCI = cast<MemIntrinsic>(NewCall);
|
|
|
|
// All mem intrinsics support dest alignment.
|
|
|
|
const ConstantInt *Align = cast<ConstantInt>(CI->getArgOperand(3));
|
|
|
|
MemCI->setDestAlignment(Align->getZExtValue());
|
|
|
|
// Memcpy/Memmove also support source alignment.
|
|
|
|
if (auto *MTI = dyn_cast<MemTransferInst>(MemCI))
|
|
|
|
MTI->setSourceAlignment(Align->getZExtValue());
|
|
|
|
break;
|
|
|
|
}
|
2016-06-28 20:27:25 +02:00
|
|
|
}
|
2017-03-01 02:49:13 +01:00
|
|
|
assert(NewCall && "Should have either set this variable or returned through "
|
|
|
|
"the default case");
|
|
|
|
std::string Name = CI->getName();
|
|
|
|
if (!Name.empty()) {
|
|
|
|
CI->setName(Name + ".old");
|
|
|
|
NewCall->setName(Name);
|
2012-06-13 09:18:53 +02:00
|
|
|
}
|
2017-03-01 02:49:13 +01:00
|
|
|
CI->replaceAllUsesWith(NewCall);
|
|
|
|
CI->eraseFromParent();
|
2007-08-04 03:51:18 +02:00
|
|
|
}
|
|
|
|
|
2016-04-18 21:11:57 +02:00
|
|
|
void llvm::UpgradeCallsToIntrinsic(Function *F) {
|
2007-08-04 03:51:18 +02:00
|
|
|
assert(F && "Illegal attempt to upgrade a non-existent intrinsic.");
|
|
|
|
|
2016-04-18 21:11:57 +02:00
|
|
|
// Check if this function should be upgraded and get the replacement function
|
|
|
|
// if there is one.
|
2011-06-18 08:05:24 +02:00
|
|
|
Function *NewFn;
|
2007-12-17 23:33:23 +01:00
|
|
|
if (UpgradeIntrinsicFunction(F, NewFn)) {
|
2016-04-18 21:11:57 +02:00
|
|
|
// Replace all users of the old function with the new function or new
|
|
|
|
// instructions. This is not a range loop because the call is deleted.
|
|
|
|
for (auto UI = F->user_begin(), UE = F->user_end(); UI != UE; )
|
2016-04-17 05:59:37 +02:00
|
|
|
if (CallInst *CI = dyn_cast<CallInst>(*UI++))
|
2015-07-03 22:12:01 +02:00
|
|
|
UpgradeIntrinsicCall(CI, NewFn);
|
2016-04-18 21:11:57 +02:00
|
|
|
|
2015-07-03 22:12:01 +02:00
|
|
|
// Remove old function, no longer used, from the module.
|
|
|
|
F->eraseFromParent();
|
2007-08-04 03:51:18 +02:00
|
|
|
}
|
|
|
|
}
|
2009-08-29 01:24:31 +02:00
|
|
|
|
2016-09-15 00:29:59 +02:00
|
|
|
MDNode *llvm::UpgradeTBAANode(MDNode &MD) {
|
2013-09-28 02:22:27 +02:00
|
|
|
// Check if the tag uses struct-path aware TBAA format.
|
2016-09-15 00:29:59 +02:00
|
|
|
if (isa<MDNode>(MD.getOperand(0)) && MD.getNumOperands() >= 3)
|
|
|
|
return &MD;
|
2013-09-28 02:22:27 +02:00
|
|
|
|
2016-09-15 00:29:59 +02:00
|
|
|
auto &Context = MD.getContext();
|
|
|
|
if (MD.getNumOperands() == 3) {
|
|
|
|
Metadata *Elts[] = {MD.getOperand(0), MD.getOperand(1)};
|
|
|
|
MDNode *ScalarType = MDNode::get(Context, Elts);
|
2013-09-28 02:22:27 +02:00
|
|
|
// Create a MDNode <ScalarType, ScalarType, offset 0, const>
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-09 19:38:53 +01:00
|
|
|
Metadata *Elts2[] = {ScalarType, ScalarType,
|
2016-09-15 00:29:59 +02:00
|
|
|
ConstantAsMetadata::get(
|
|
|
|
Constant::getNullValue(Type::getInt64Ty(Context))),
|
|
|
|
MD.getOperand(2)};
|
|
|
|
return MDNode::get(Context, Elts2);
|
2013-09-28 02:22:27 +02:00
|
|
|
}
|
2016-09-15 00:29:59 +02:00
|
|
|
// Create a MDNode <MD, MD, offset 0>
|
|
|
|
Metadata *Elts[] = {&MD, &MD, ConstantAsMetadata::get(Constant::getNullValue(
|
|
|
|
Type::getInt64Ty(Context)))};
|
|
|
|
return MDNode::get(Context, Elts);
|
2013-09-28 02:22:27 +02:00
|
|
|
}
|
2013-11-15 02:34:59 +01:00
|
|
|
|
|
|
|
Instruction *llvm::UpgradeBitCastInst(unsigned Opc, Value *V, Type *DestTy,
|
|
|
|
Instruction *&Temp) {
|
|
|
|
if (Opc != Instruction::BitCast)
|
2014-04-09 08:08:46 +02:00
|
|
|
return nullptr;
|
2013-11-15 02:34:59 +01:00
|
|
|
|
2014-04-09 08:08:46 +02:00
|
|
|
Temp = nullptr;
|
2013-11-15 02:34:59 +01:00
|
|
|
Type *SrcTy = V->getType();
|
|
|
|
if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() &&
|
|
|
|
SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) {
|
|
|
|
LLVMContext &Context = V->getContext();
|
|
|
|
|
|
|
|
// We have no information about target data layout, so we assume that
|
|
|
|
// the maximum pointer size is 64bit.
|
|
|
|
Type *MidTy = Type::getInt64Ty(Context);
|
|
|
|
Temp = CastInst::Create(Instruction::PtrToInt, V, MidTy);
|
|
|
|
|
|
|
|
return CastInst::Create(Instruction::IntToPtr, Temp, DestTy);
|
|
|
|
}
|
|
|
|
|
2014-04-09 08:08:46 +02:00
|
|
|
return nullptr;
|
2013-11-15 02:34:59 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
Value *llvm::UpgradeBitCastExpr(unsigned Opc, Constant *C, Type *DestTy) {
|
|
|
|
if (Opc != Instruction::BitCast)
|
2014-04-09 08:08:46 +02:00
|
|
|
return nullptr;
|
2013-11-15 02:34:59 +01:00
|
|
|
|
|
|
|
Type *SrcTy = C->getType();
|
|
|
|
if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() &&
|
|
|
|
SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) {
|
|
|
|
LLVMContext &Context = C->getContext();
|
|
|
|
|
|
|
|
// We have no information about target data layout, so we assume that
|
|
|
|
// the maximum pointer size is 64bit.
|
|
|
|
Type *MidTy = Type::getInt64Ty(Context);
|
|
|
|
|
|
|
|
return ConstantExpr::getIntToPtr(ConstantExpr::getPtrToInt(C, MidTy),
|
|
|
|
DestTy);
|
|
|
|
}
|
|
|
|
|
2014-04-09 08:08:46 +02:00
|
|
|
return nullptr;
|
2013-11-15 02:34:59 +01:00
|
|
|
}
|
2013-12-02 22:29:56 +01:00
|
|
|
|
|
|
|
/// Check the debug info version number, if it is out-dated, drop the debug
|
|
|
|
/// info. Return true if module is modified.
|
|
|
|
bool llvm::UpgradeDebugInfo(Module &M) {
|
2014-01-16 02:51:12 +01:00
|
|
|
unsigned Version = getDebugMetadataVersionFromModule(M);
|
2017-10-02 20:31:29 +02:00
|
|
|
if (Version == DEBUG_METADATA_VERSION) {
|
|
|
|
bool BrokenDebugInfo = false;
|
|
|
|
if (verifyModule(M, &llvm::errs(), &BrokenDebugInfo))
|
|
|
|
report_fatal_error("Broken module found, compilation aborted!");
|
|
|
|
if (!BrokenDebugInfo)
|
|
|
|
// Everything is ok.
|
|
|
|
return false;
|
|
|
|
else {
|
|
|
|
// Diagnose malformed debug info.
|
|
|
|
DiagnosticInfoIgnoringInvalidDebugMetadata Diag(M);
|
|
|
|
M.getContext().diagnose(Diag);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
bool Modified = StripDebugInfo(M);
|
|
|
|
if (Modified && Version != DEBUG_METADATA_VERSION) {
|
|
|
|
// Diagnose a version mismatch.
|
2014-01-16 02:51:12 +01:00
|
|
|
DiagnosticInfoDebugMetadataVersion DiagVersion(M, Version);
|
|
|
|
M.getContext().diagnose(DiagVersion);
|
|
|
|
}
|
2017-10-02 20:31:29 +02:00
|
|
|
return Modified;
|
2013-12-02 22:29:56 +01:00
|
|
|
}
|
2014-06-25 17:41:00 +02:00
|
|
|
|
2019-08-13 19:52:21 +02:00
|
|
|
/// This checks for objc retain release marker which should be upgraded. It
|
|
|
|
/// returns true if module is modified.
|
|
|
|
static bool UpgradeRetainReleaseMarker(Module &M) {
|
2018-04-05 04:44:46 +02:00
|
|
|
bool Changed = false;
|
2019-04-10 08:20:20 +02:00
|
|
|
const char *MarkerKey = "clang.arc.retainAutoreleasedReturnValueMarker";
|
|
|
|
NamedMDNode *ModRetainReleaseMarker = M.getNamedMetadata(MarkerKey);
|
2018-04-05 04:44:46 +02:00
|
|
|
if (ModRetainReleaseMarker) {
|
|
|
|
MDNode *Op = ModRetainReleaseMarker->getOperand(0);
|
|
|
|
if (Op) {
|
|
|
|
MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(0));
|
|
|
|
if (ID) {
|
|
|
|
SmallVector<StringRef, 4> ValueComp;
|
|
|
|
ID->getString().split(ValueComp, "#");
|
|
|
|
if (ValueComp.size() == 2) {
|
|
|
|
std::string NewValue = ValueComp[0].str() + ";" + ValueComp[1].str();
|
2019-04-10 08:20:20 +02:00
|
|
|
ID = MDString::get(M.getContext(), NewValue);
|
2018-04-05 04:44:46 +02:00
|
|
|
}
|
2019-04-10 08:20:20 +02:00
|
|
|
M.addModuleFlag(Module::Error, MarkerKey, ID);
|
|
|
|
M.eraseNamedMetadata(ModRetainReleaseMarker);
|
|
|
|
Changed = true;
|
2018-04-05 04:44:46 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return Changed;
|
|
|
|
}
|
|
|
|
|
2019-08-13 19:52:21 +02:00
|
|
|
void llvm::UpgradeARCRuntime(Module &M) {
|
2019-08-13 03:23:06 +02:00
|
|
|
// This lambda converts normal function calls to ARC runtime functions to
|
|
|
|
// intrinsic calls.
|
2019-08-08 18:59:31 +02:00
|
|
|
auto UpgradeToIntrinsic = [&](const char *OldFunc,
|
|
|
|
llvm::Intrinsic::ID IntrinsicFunc) {
|
|
|
|
Function *Fn = M.getFunction(OldFunc);
|
|
|
|
|
|
|
|
if (!Fn)
|
2019-08-09 01:33:17 +02:00
|
|
|
return;
|
2019-08-08 18:59:31 +02:00
|
|
|
|
|
|
|
Function *NewFn = llvm::Intrinsic::getDeclaration(&M, IntrinsicFunc);
|
2019-08-13 03:23:06 +02:00
|
|
|
|
|
|
|
for (auto I = Fn->user_begin(), E = Fn->user_end(); I != E;) {
|
|
|
|
CallInst *CI = dyn_cast<CallInst>(*I++);
|
|
|
|
if (!CI || CI->getCalledFunction() != Fn)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
IRBuilder<> Builder(CI->getParent(), CI->getIterator());
|
|
|
|
FunctionType *NewFuncTy = NewFn->getFunctionType();
|
|
|
|
SmallVector<Value *, 2> Args;
|
|
|
|
|
2019-10-23 16:38:52 +02:00
|
|
|
// Don't upgrade the intrinsic if it's not valid to bitcast the return
|
|
|
|
// value to the return type of the old function.
|
|
|
|
if (NewFuncTy->getReturnType() != CI->getType() &&
|
|
|
|
!CastInst::castIsValid(Instruction::BitCast, CI,
|
|
|
|
NewFuncTy->getReturnType()))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
bool InvalidCast = false;
|
|
|
|
|
2019-08-13 03:23:06 +02:00
|
|
|
for (unsigned I = 0, E = CI->getNumArgOperands(); I != E; ++I) {
|
|
|
|
Value *Arg = CI->getArgOperand(I);
|
2019-10-23 16:38:52 +02:00
|
|
|
|
2019-08-13 03:23:06 +02:00
|
|
|
// Bitcast argument to the parameter type of the new function if it's
|
|
|
|
// not a variadic argument.
|
2019-10-23 16:38:52 +02:00
|
|
|
if (I < NewFuncTy->getNumParams()) {
|
|
|
|
// Don't upgrade the intrinsic if it's not valid to bitcast the argument
|
|
|
|
// to the parameter type of the new function.
|
|
|
|
if (!CastInst::castIsValid(Instruction::BitCast, Arg,
|
|
|
|
NewFuncTy->getParamType(I))) {
|
|
|
|
InvalidCast = true;
|
|
|
|
break;
|
|
|
|
}
|
2019-08-13 03:23:06 +02:00
|
|
|
Arg = Builder.CreateBitCast(Arg, NewFuncTy->getParamType(I));
|
2019-10-23 16:38:52 +02:00
|
|
|
}
|
2019-08-13 03:23:06 +02:00
|
|
|
Args.push_back(Arg);
|
|
|
|
}
|
|
|
|
|
2019-10-23 16:38:52 +02:00
|
|
|
if (InvalidCast)
|
|
|
|
continue;
|
|
|
|
|
2019-08-13 03:23:06 +02:00
|
|
|
// Create a call instruction that calls the new function.
|
|
|
|
CallInst *NewCall = Builder.CreateCall(NewFuncTy, NewFn, Args);
|
|
|
|
NewCall->setTailCallKind(cast<CallInst>(CI)->getTailCallKind());
|
|
|
|
NewCall->setName(CI->getName());
|
|
|
|
|
|
|
|
// Bitcast the return value back to the type of the old call.
|
|
|
|
Value *NewRetVal = Builder.CreateBitCast(NewCall, CI->getType());
|
|
|
|
|
|
|
|
if (!CI->use_empty())
|
|
|
|
CI->replaceAllUsesWith(NewRetVal);
|
|
|
|
CI->eraseFromParent();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Fn->use_empty())
|
|
|
|
Fn->eraseFromParent();
|
2019-08-08 18:59:31 +02:00
|
|
|
};
|
|
|
|
|
2019-08-13 03:23:06 +02:00
|
|
|
// Unconditionally convert a call to "clang.arc.use" to a call to
|
|
|
|
// "llvm.objc.clang.arc.use".
|
2019-08-09 01:33:17 +02:00
|
|
|
UpgradeToIntrinsic("clang.arc.use", llvm::Intrinsic::objc_clang_arc_use);
|
2019-08-08 18:59:31 +02:00
|
|
|
|
2019-08-13 19:52:21 +02:00
|
|
|
// Upgrade the retain release marker. If there is no need to upgrade
|
|
|
|
// the marker, that means either the module is already new enough to contain
|
|
|
|
// new intrinsics or it is not ARC. There is no need to upgrade runtime call.
|
|
|
|
if (!UpgradeRetainReleaseMarker(M))
|
2019-08-09 01:33:17 +02:00
|
|
|
return;
|
2019-08-08 18:59:31 +02:00
|
|
|
|
|
|
|
std::pair<const char *, llvm::Intrinsic::ID> RuntimeFuncs[] = {
|
|
|
|
{"objc_autorelease", llvm::Intrinsic::objc_autorelease},
|
|
|
|
{"objc_autoreleasePoolPop", llvm::Intrinsic::objc_autoreleasePoolPop},
|
|
|
|
{"objc_autoreleasePoolPush", llvm::Intrinsic::objc_autoreleasePoolPush},
|
|
|
|
{"objc_autoreleaseReturnValue",
|
|
|
|
llvm::Intrinsic::objc_autoreleaseReturnValue},
|
|
|
|
{"objc_copyWeak", llvm::Intrinsic::objc_copyWeak},
|
|
|
|
{"objc_destroyWeak", llvm::Intrinsic::objc_destroyWeak},
|
|
|
|
{"objc_initWeak", llvm::Intrinsic::objc_initWeak},
|
|
|
|
{"objc_loadWeak", llvm::Intrinsic::objc_loadWeak},
|
|
|
|
{"objc_loadWeakRetained", llvm::Intrinsic::objc_loadWeakRetained},
|
|
|
|
{"objc_moveWeak", llvm::Intrinsic::objc_moveWeak},
|
|
|
|
{"objc_release", llvm::Intrinsic::objc_release},
|
|
|
|
{"objc_retain", llvm::Intrinsic::objc_retain},
|
|
|
|
{"objc_retainAutorelease", llvm::Intrinsic::objc_retainAutorelease},
|
|
|
|
{"objc_retainAutoreleaseReturnValue",
|
|
|
|
llvm::Intrinsic::objc_retainAutoreleaseReturnValue},
|
|
|
|
{"objc_retainAutoreleasedReturnValue",
|
|
|
|
llvm::Intrinsic::objc_retainAutoreleasedReturnValue},
|
|
|
|
{"objc_retainBlock", llvm::Intrinsic::objc_retainBlock},
|
|
|
|
{"objc_storeStrong", llvm::Intrinsic::objc_storeStrong},
|
|
|
|
{"objc_storeWeak", llvm::Intrinsic::objc_storeWeak},
|
|
|
|
{"objc_unsafeClaimAutoreleasedReturnValue",
|
|
|
|
llvm::Intrinsic::objc_unsafeClaimAutoreleasedReturnValue},
|
|
|
|
{"objc_retainedObject", llvm::Intrinsic::objc_retainedObject},
|
|
|
|
{"objc_unretainedObject", llvm::Intrinsic::objc_unretainedObject},
|
|
|
|
{"objc_unretainedPointer", llvm::Intrinsic::objc_unretainedPointer},
|
|
|
|
{"objc_retain_autorelease", llvm::Intrinsic::objc_retain_autorelease},
|
|
|
|
{"objc_sync_enter", llvm::Intrinsic::objc_sync_enter},
|
|
|
|
{"objc_sync_exit", llvm::Intrinsic::objc_sync_exit},
|
|
|
|
{"objc_arc_annotation_topdown_bbstart",
|
|
|
|
llvm::Intrinsic::objc_arc_annotation_topdown_bbstart},
|
|
|
|
{"objc_arc_annotation_topdown_bbend",
|
|
|
|
llvm::Intrinsic::objc_arc_annotation_topdown_bbend},
|
|
|
|
{"objc_arc_annotation_bottomup_bbstart",
|
|
|
|
llvm::Intrinsic::objc_arc_annotation_bottomup_bbstart},
|
|
|
|
{"objc_arc_annotation_bottomup_bbend",
|
|
|
|
llvm::Intrinsic::objc_arc_annotation_bottomup_bbend}};
|
|
|
|
|
|
|
|
for (auto &I : RuntimeFuncs)
|
2019-08-09 01:33:17 +02:00
|
|
|
UpgradeToIntrinsic(I.first, I.second);
|
2019-08-08 18:59:31 +02:00
|
|
|
}
|
|
|
|
|
2016-05-26 01:14:48 +02:00
|
|
|
bool llvm::UpgradeModuleFlags(Module &M) {
|
2017-08-21 23:49:13 +02:00
|
|
|
NamedMDNode *ModFlags = M.getModuleFlagsMetadata();
|
2016-05-26 01:14:48 +02:00
|
|
|
if (!ModFlags)
|
|
|
|
return false;
|
|
|
|
|
2017-08-21 23:49:13 +02:00
|
|
|
bool HasObjCFlag = false, HasClassProperties = false, Changed = false;
|
2016-05-26 01:14:48 +02:00
|
|
|
for (unsigned I = 0, E = ModFlags->getNumOperands(); I != E; ++I) {
|
|
|
|
MDNode *Op = ModFlags->getOperand(I);
|
2017-08-21 23:49:13 +02:00
|
|
|
if (Op->getNumOperands() != 3)
|
2016-05-26 01:14:48 +02:00
|
|
|
continue;
|
|
|
|
MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
|
|
|
|
if (!ID)
|
|
|
|
continue;
|
|
|
|
if (ID->getString() == "Objective-C Image Info Version")
|
|
|
|
HasObjCFlag = true;
|
|
|
|
if (ID->getString() == "Objective-C Class Properties")
|
|
|
|
HasClassProperties = true;
|
2017-08-21 23:49:13 +02:00
|
|
|
// Upgrade PIC/PIE Module Flags. The module flag behavior for these two
|
|
|
|
// field was Error and now they are Max.
|
|
|
|
if (ID->getString() == "PIC Level" || ID->getString() == "PIE Level") {
|
|
|
|
if (auto *Behavior =
|
|
|
|
mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0))) {
|
|
|
|
if (Behavior->getLimitedValue() == Module::Error) {
|
|
|
|
Type *Int32Ty = Type::getInt32Ty(M.getContext());
|
|
|
|
Metadata *Ops[3] = {
|
|
|
|
ConstantAsMetadata::get(ConstantInt::get(Int32Ty, Module::Max)),
|
|
|
|
MDString::get(M.getContext(), ID->getString()),
|
|
|
|
Op->getOperand(2)};
|
|
|
|
ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops));
|
|
|
|
Changed = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-09-15 23:12:14 +02:00
|
|
|
// Upgrade Objective-C Image Info Section. Removed the whitespce in the
|
|
|
|
// section name so that llvm-lto will not complain about mismatching
|
|
|
|
// module flags that is functionally the same.
|
|
|
|
if (ID->getString() == "Objective-C Image Info Section") {
|
|
|
|
if (auto *Value = dyn_cast_or_null<MDString>(Op->getOperand(2))) {
|
|
|
|
SmallVector<StringRef, 4> ValueComp;
|
|
|
|
Value->getString().split(ValueComp, " ");
|
|
|
|
if (ValueComp.size() != 1) {
|
|
|
|
std::string NewValue;
|
|
|
|
for (auto &S : ValueComp)
|
|
|
|
NewValue += S.str();
|
|
|
|
Metadata *Ops[3] = {Op->getOperand(0), Op->getOperand(1),
|
|
|
|
MDString::get(M.getContext(), NewValue)};
|
|
|
|
ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops));
|
|
|
|
Changed = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-05-26 01:14:48 +02:00
|
|
|
}
|
2017-08-21 23:49:13 +02:00
|
|
|
|
2016-05-26 01:14:48 +02:00
|
|
|
// "Objective-C Class Properties" is recently added for Objective-C. We
|
|
|
|
// upgrade ObjC bitcodes to contain a "Objective-C Class Properties" module
|
2016-09-16 02:38:18 +02:00
|
|
|
// flag of value 0, so we can correclty downgrade this flag when trying to
|
|
|
|
// link an ObjC bitcode without this module flag with an ObjC bitcode with
|
|
|
|
// this module flag.
|
2016-05-26 01:14:48 +02:00
|
|
|
if (HasObjCFlag && !HasClassProperties) {
|
2016-09-16 02:38:18 +02:00
|
|
|
M.addModuleFlag(llvm::Module::Override, "Objective-C Class Properties",
|
2016-05-26 01:14:48 +02:00
|
|
|
(uint32_t)0);
|
2017-08-21 23:49:13 +02:00
|
|
|
Changed = true;
|
2016-05-26 01:14:48 +02:00
|
|
|
}
|
2017-08-21 23:49:13 +02:00
|
|
|
|
|
|
|
return Changed;
|
2016-05-26 01:14:48 +02:00
|
|
|
}
|
|
|
|
|
2017-10-06 20:06:59 +02:00
|
|
|
void llvm::UpgradeSectionAttributes(Module &M) {
|
|
|
|
auto TrimSpaces = [](StringRef Section) -> std::string {
|
|
|
|
SmallVector<StringRef, 5> Components;
|
|
|
|
Section.split(Components, ',');
|
|
|
|
|
|
|
|
SmallString<32> Buffer;
|
|
|
|
raw_svector_ostream OS(Buffer);
|
|
|
|
|
|
|
|
for (auto Component : Components)
|
|
|
|
OS << ',' << Component.trim();
|
|
|
|
|
|
|
|
return OS.str().substr(1);
|
|
|
|
};
|
|
|
|
|
|
|
|
for (auto &GV : M.globals()) {
|
|
|
|
if (!GV.hasSection())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
StringRef Section = GV.getSection();
|
|
|
|
|
|
|
|
if (!Section.startswith("__DATA, __objc_catlist"))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// __DATA, __objc_catlist, regular, no_dead_strip
|
|
|
|
// __DATA,__objc_catlist,regular,no_dead_strip
|
|
|
|
GV.setSection(TrimSpaces(Section));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-25 01:56:13 +01:00
|
|
|
static bool isOldLoopArgument(Metadata *MD) {
|
|
|
|
auto *T = dyn_cast_or_null<MDTuple>(MD);
|
|
|
|
if (!T)
|
|
|
|
return false;
|
|
|
|
if (T->getNumOperands() < 1)
|
|
|
|
return false;
|
|
|
|
auto *S = dyn_cast_or_null<MDString>(T->getOperand(0));
|
|
|
|
if (!S)
|
|
|
|
return false;
|
|
|
|
return S->getString().startswith("llvm.vectorizer.");
|
|
|
|
}
|
|
|
|
|
|
|
|
static MDString *upgradeLoopTag(LLVMContext &C, StringRef OldTag) {
|
|
|
|
StringRef OldPrefix = "llvm.vectorizer.";
|
|
|
|
assert(OldTag.startswith(OldPrefix) && "Expected old prefix");
|
|
|
|
|
|
|
|
if (OldTag == "llvm.vectorizer.unroll")
|
|
|
|
return MDString::get(C, "llvm.loop.interleave.count");
|
|
|
|
|
|
|
|
return MDString::get(
|
|
|
|
C, (Twine("llvm.loop.vectorize.") + OldTag.drop_front(OldPrefix.size()))
|
|
|
|
.str());
|
|
|
|
}
|
|
|
|
|
|
|
|
static Metadata *upgradeLoopArgument(Metadata *MD) {
|
|
|
|
auto *T = dyn_cast_or_null<MDTuple>(MD);
|
|
|
|
if (!T)
|
|
|
|
return MD;
|
|
|
|
if (T->getNumOperands() < 1)
|
|
|
|
return MD;
|
|
|
|
auto *OldTag = dyn_cast_or_null<MDString>(T->getOperand(0));
|
|
|
|
if (!OldTag)
|
|
|
|
return MD;
|
|
|
|
if (!OldTag->getString().startswith("llvm.vectorizer."))
|
|
|
|
return MD;
|
|
|
|
|
|
|
|
// This has an old tag. Upgrade it.
|
|
|
|
SmallVector<Metadata *, 8> Ops;
|
|
|
|
Ops.reserve(T->getNumOperands());
|
|
|
|
Ops.push_back(upgradeLoopTag(T->getContext(), OldTag->getString()));
|
|
|
|
for (unsigned I = 1, E = T->getNumOperands(); I != E; ++I)
|
|
|
|
Ops.push_back(T->getOperand(I));
|
|
|
|
|
|
|
|
return MDTuple::get(T->getContext(), Ops);
|
|
|
|
}
|
|
|
|
|
|
|
|
MDNode *llvm::upgradeInstructionLoopAttachment(MDNode &N) {
|
|
|
|
auto *T = dyn_cast<MDTuple>(&N);
|
|
|
|
if (!T)
|
|
|
|
return &N;
|
|
|
|
|
2016-08-11 23:15:00 +02:00
|
|
|
if (none_of(T->operands(), isOldLoopArgument))
|
2016-03-25 01:56:13 +01:00
|
|
|
return &N;
|
|
|
|
|
|
|
|
SmallVector<Metadata *, 8> Ops;
|
|
|
|
Ops.reserve(T->getNumOperands());
|
|
|
|
for (Metadata *MD : T->operands())
|
|
|
|
Ops.push_back(upgradeLoopArgument(MD));
|
|
|
|
|
|
|
|
return MDTuple::get(T->getContext(), Ops);
|
2014-06-25 17:41:00 +02:00
|
|
|
}
|
2019-09-19 00:15:58 +02:00
|
|
|
|
|
|
|
std::string llvm::UpgradeDataLayoutString(StringRef DL, StringRef TT) {
|
|
|
|
std::string AddrSpaces = "-p270:32:32-p271:32:32-p272:64:64";
|
|
|
|
|
|
|
|
// If X86, and the datalayout matches the expected format, add pointer size
|
|
|
|
// address spaces to the datalayout.
|
2020-01-06 19:16:28 +01:00
|
|
|
if (!Triple(TT).isX86() || DL.contains(AddrSpaces))
|
2019-09-19 00:15:58 +02:00
|
|
|
return DL;
|
|
|
|
|
|
|
|
SmallVector<StringRef, 4> Groups;
|
|
|
|
Regex R("(e-m:[a-z](-p:32:32)?)(-[if]64:.*$)");
|
|
|
|
if (!R.match(DL, &Groups))
|
|
|
|
return DL;
|
|
|
|
|
|
|
|
SmallString<1024> Buf;
|
|
|
|
std::string Res = (Groups[1] + AddrSpaces + Groups[3]).toStringRef(Buf).str();
|
|
|
|
return Res;
|
|
|
|
}
|
2019-12-25 03:12:15 +01:00
|
|
|
|
|
|
|
void llvm::UpgradeFramePointerAttributes(AttrBuilder &B) {
|
|
|
|
StringRef FramePointer;
|
|
|
|
if (B.contains("no-frame-pointer-elim")) {
|
|
|
|
// The value can be "true" or "false".
|
|
|
|
for (const auto &I : B.td_attrs())
|
|
|
|
if (I.first == "no-frame-pointer-elim")
|
|
|
|
FramePointer = I.second == "true" ? "all" : "none";
|
|
|
|
B.removeAttribute("no-frame-pointer-elim");
|
|
|
|
}
|
|
|
|
if (B.contains("no-frame-pointer-elim-non-leaf")) {
|
|
|
|
// The value is ignored. "no-frame-pointer-elim"="true" takes priority.
|
|
|
|
if (FramePointer != "all")
|
|
|
|
FramePointer = "non-leaf";
|
|
|
|
B.removeAttribute("no-frame-pointer-elim-non-leaf");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!FramePointer.empty())
|
|
|
|
B.addAttribute("frame-pointer", FramePointer);
|
|
|
|
}
|