1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 03:02:36 +01:00

Allow vectorization of bit intrinsics in BB Vectorizer.

This patch adds support for vectorization of  bit intrinsics such as bswap,ctpop,ctlz,cttz.

llvm-svn: 207174
This commit is contained in:
Karthik Bhat 2014-04-25 03:33:48 +00:00
parent 532de088bc
commit a6070a9b75
3 changed files with 181 additions and 8 deletions

View File

@ -47,6 +47,9 @@ struct VectorizeConfig {
/// @brief Vectorize floating-point math intrinsics.
bool VectorizeMath;
/// @brief Vectorize bit intrinsics.
bool VectorizeBitManipulations;
/// @brief Vectorize the fused-multiply-add intrinsic.
bool VectorizeFMA;

View File

@ -122,6 +122,10 @@ static cl::opt<bool>
NoMath("bb-vectorize-no-math", cl::init(false), cl::Hidden,
cl::desc("Don't try to vectorize floating-point math intrinsics"));
static cl::opt<bool>
NoBitManipulation("bb-vectorize-no-bitmanip", cl::init(false), cl::Hidden,
cl::desc("Don't try to vectorize BitManipulation intrinsics"));
static cl::opt<bool>
NoFMA("bb-vectorize-no-fma", cl::init(false), cl::Hidden,
cl::desc("Don't try to vectorize the fused-multiply-add intrinsic"));
@ -684,6 +688,11 @@ namespace {
case Intrinsic::floor:
case Intrinsic::fabs:
return Config.VectorizeMath;
case Intrinsic::bswap:
case Intrinsic::ctpop:
case Intrinsic::ctlz:
case Intrinsic::cttz:
return Config.VectorizeBitManipulations;
case Intrinsic::fma:
case Intrinsic::fmuladd:
return Config.VectorizeFMA;
@ -1088,13 +1097,14 @@ namespace {
CostSavings = ICost + JCost - VCost;
}
// The powi intrinsic is special because only the first argument is
// vectorized, the second arguments must be equal.
// The powi,ctlz,cttz intrinsics are special because only the first
// argument is vectorized, the second arguments must be equal.
CallInst *CI = dyn_cast<CallInst>(I);
Function *FI;
if (CI && (FI = CI->getCalledFunction())) {
Intrinsic::ID IID = (Intrinsic::ID) FI->getIntrinsicID();
if (IID == Intrinsic::powi) {
if (IID == Intrinsic::powi || IID == Intrinsic::ctlz ||
IID == Intrinsic::cttz) {
Value *A1I = CI->getArgOperand(1),
*A1J = cast<CallInst>(J)->getArgOperand(1);
const SCEV *A1ISCEV = SE->getSCEV(A1I),
@ -1118,7 +1128,8 @@ namespace {
assert(CI->getNumArgOperands() == CJ->getNumArgOperands() &&
"Intrinsic argument counts differ");
for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) {
if (IID == Intrinsic::powi && i == 1)
if ((IID == Intrinsic::powi || IID == Intrinsic::ctlz ||
IID == Intrinsic::cttz) && i == 1)
Tys.push_back(CI->getArgOperand(i)->getType());
else
Tys.push_back(getVecTypeForPair(CI->getArgOperand(i)->getType(),
@ -2773,10 +2784,11 @@ namespace {
ReplacedOperands[o] = Intrinsic::getDeclaration(M, IID, VArgType);
continue;
} else if (IID == Intrinsic::powi && o == 1) {
// The second argument of powi is a single integer and we've already
// checked that both arguments are equal. As a result, we just keep
// I's second argument.
} else if ((IID == Intrinsic::powi || IID == Intrinsic::ctlz ||
IID == Intrinsic::cttz) && o == 1) {
// The second argument of powi/ctlz/cttz is a single integer/constant
// and we've already checked that both arguments are equal.
// As a result, we just keep I's second argument.
ReplacedOperands[o] = I->getOperand(o);
continue;
}
@ -3222,6 +3234,7 @@ VectorizeConfig::VectorizeConfig() {
VectorizePointers = !::NoPointers;
VectorizeCasts = !::NoCasts;
VectorizeMath = !::NoMath;
VectorizeBitManipulations = !::NoBitManipulation;
VectorizeFMA = !::NoFMA;
VectorizeSelect = !::NoSelect;
VectorizeCmp = !::NoCmp;

View File

@ -13,6 +13,10 @@ declare double @llvm.rint.f64(double)
declare double @llvm.trunc.f64(double)
declare double @llvm.floor.f64(double)
declare double @llvm.fabs.f64(double)
declare i64 @llvm.bswap.i64(i64)
declare i64 @llvm.ctpop.i64(i64)
declare i64 @llvm.ctlz.i64(i64, i1)
declare i64 @llvm.cttz.i64(i64, i1)
; Basic depth-3 chain with fma
define double @test1(double %A1, double %A2, double %B1, double %B2, double %C1, double %C2) {
@ -333,6 +337,155 @@ define double @testfabs(double %A1, double %A2, double %B1, double %B2) {
}
; Basic depth-3 chain with bswap
define i64 @testbswap(i64 %A1, i64 %A2, i64 %B1, i64 %B2) {
%X1 = sub i64 %A1, %B1
%X2 = sub i64 %A2, %B2
%Y1 = call i64 @llvm.bswap.i64(i64 %X1)
%Y2 = call i64 @llvm.bswap.i64(i64 %X2)
%Z1 = add i64 %Y1, %B1
%Z2 = add i64 %Y2, %B2
%R = mul i64 %Z1, %Z2
ret i64 %R
; CHECK: @testbswap
; CHECK: %X1.v.i1.1 = insertelement <2 x i64> undef, i64 %B1, i32 0
; CHECK: %X1.v.i1.2 = insertelement <2 x i64> %X1.v.i1.1, i64 %B2, i32 1
; CHECK: %X1.v.i0.1 = insertelement <2 x i64> undef, i64 %A1, i32 0
; CHECK: %X1.v.i0.2 = insertelement <2 x i64> %X1.v.i0.1, i64 %A2, i32 1
; CHECK: %X1 = sub <2 x i64> %X1.v.i0.2, %X1.v.i1.2
; CHECK: %Y1 = call <2 x i64> @llvm.bswap.v2i64(<2 x i64> %X1)
; CHECK: %Z1 = add <2 x i64> %Y1, %X1.v.i1.2
; CHECK: %Z1.v.r1 = extractelement <2 x i64> %Z1, i32 0
; CHECK: %Z1.v.r2 = extractelement <2 x i64> %Z1, i32 1
; CHECK: %R = mul i64 %Z1.v.r1, %Z1.v.r2
; CHECK: ret i64 %R
}
; Basic depth-3 chain with ctpop
define i64 @testctpop(i64 %A1, i64 %A2, i64 %B1, i64 %B2) {
%X1 = sub i64 %A1, %B1
%X2 = sub i64 %A2, %B2
%Y1 = call i64 @llvm.ctpop.i64(i64 %X1)
%Y2 = call i64 @llvm.ctpop.i64(i64 %X2)
%Z1 = add i64 %Y1, %B1
%Z2 = add i64 %Y2, %B2
%R = mul i64 %Z1, %Z2
ret i64 %R
; CHECK: @testctpop
; CHECK: %X1.v.i1.1 = insertelement <2 x i64> undef, i64 %B1, i32 0
; CHECK: %X1.v.i1.2 = insertelement <2 x i64> %X1.v.i1.1, i64 %B2, i32 1
; CHECK: %X1.v.i0.1 = insertelement <2 x i64> undef, i64 %A1, i32 0
; CHECK: %X1.v.i0.2 = insertelement <2 x i64> %X1.v.i0.1, i64 %A2, i32 1
; CHECK: %X1 = sub <2 x i64> %X1.v.i0.2, %X1.v.i1.2
; CHECK: %Y1 = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %X1)
; CHECK: %Z1 = add <2 x i64> %Y1, %X1.v.i1.2
; CHECK: %Z1.v.r1 = extractelement <2 x i64> %Z1, i32 0
; CHECK: %Z1.v.r2 = extractelement <2 x i64> %Z1, i32 1
; CHECK: %R = mul i64 %Z1.v.r1, %Z1.v.r2
; CHECK: ret i64 %R
}
; Basic depth-3 chain with ctlz
define i64 @testctlz(i64 %A1, i64 %A2, i64 %B1, i64 %B2) {
%X1 = sub i64 %A1, %B1
%X2 = sub i64 %A2, %B2
%Y1 = call i64 @llvm.ctlz.i64(i64 %X1, i1 true)
%Y2 = call i64 @llvm.ctlz.i64(i64 %X2, i1 true)
%Z1 = add i64 %Y1, %B1
%Z2 = add i64 %Y2, %B2
%R = mul i64 %Z1, %Z2
ret i64 %R
; CHECK: @testctlz
; CHECK: %X1.v.i1.1 = insertelement <2 x i64> undef, i64 %B1, i32 0
; CHECK: %X1.v.i1.2 = insertelement <2 x i64> %X1.v.i1.1, i64 %B2, i32 1
; CHECK: %X1.v.i0.1 = insertelement <2 x i64> undef, i64 %A1, i32 0
; CHECK: %X1.v.i0.2 = insertelement <2 x i64> %X1.v.i0.1, i64 %A2, i32 1
; CHECK: %X1 = sub <2 x i64> %X1.v.i0.2, %X1.v.i1.2
; CHECK: %Y1 = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %X1, i1 true)
; CHECK: %Z1 = add <2 x i64> %Y1, %X1.v.i1.2
; CHECK: %Z1.v.r1 = extractelement <2 x i64> %Z1, i32 0
; CHECK: %Z1.v.r2 = extractelement <2 x i64> %Z1, i32 1
; CHECK: %R = mul i64 %Z1.v.r1, %Z1.v.r2
; CHECK: ret i64 %R
}
; Basic depth-3 chain with ctlz
define i64 @testctlzneg(i64 %A1, i64 %A2, i64 %B1, i64 %B2) {
%X1 = sub i64 %A1, %B1
%X2 = sub i64 %A2, %B2
%Y1 = call i64 @llvm.ctlz.i64(i64 %X1, i1 true)
%Y2 = call i64 @llvm.ctlz.i64(i64 %X2, i1 false)
%Z1 = add i64 %Y1, %B1
%Z2 = add i64 %Y2, %B2
%R = mul i64 %Z1, %Z2
ret i64 %R
; CHECK: @testctlzneg
; CHECK: %X1 = sub i64 %A1, %B1
; CHECK: %X2 = sub i64 %A2, %B2
; CHECK: %Y1 = call i64 @llvm.ctlz.i64(i64 %X1, i1 true)
; CHECK: %Y2 = call i64 @llvm.ctlz.i64(i64 %X2, i1 false)
; CHECK: %Z1 = add i64 %Y1, %B1
; CHECK: %Z2 = add i64 %Y2, %B2
; CHECK: %R = mul i64 %Z1, %Z2
; CHECK: ret i64 %R
}
; Basic depth-3 chain with cttz
define i64 @testcttz(i64 %A1, i64 %A2, i64 %B1, i64 %B2) {
%X1 = sub i64 %A1, %B1
%X2 = sub i64 %A2, %B2
%Y1 = call i64 @llvm.cttz.i64(i64 %X1, i1 true)
%Y2 = call i64 @llvm.cttz.i64(i64 %X2, i1 true)
%Z1 = add i64 %Y1, %B1
%Z2 = add i64 %Y2, %B2
%R = mul i64 %Z1, %Z2
ret i64 %R
; CHECK: @testcttz
; CHECK: %X1.v.i1.1 = insertelement <2 x i64> undef, i64 %B1, i32 0
; CHECK: %X1.v.i1.2 = insertelement <2 x i64> %X1.v.i1.1, i64 %B2, i32 1
; CHECK: %X1.v.i0.1 = insertelement <2 x i64> undef, i64 %A1, i32 0
; CHECK: %X1.v.i0.2 = insertelement <2 x i64> %X1.v.i0.1, i64 %A2, i32 1
; CHECK: %X1 = sub <2 x i64> %X1.v.i0.2, %X1.v.i1.2
; CHECK: %Y1 = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %X1, i1 true)
; CHECK: %Z1 = add <2 x i64> %Y1, %X1.v.i1.2
; CHECK: %Z1.v.r1 = extractelement <2 x i64> %Z1, i32 0
; CHECK: %Z1.v.r2 = extractelement <2 x i64> %Z1, i32 1
; CHECK: %R = mul i64 %Z1.v.r1, %Z1.v.r2
; CHECK: ret i64 %R
}
; Basic depth-3 chain with cttz
define i64 @testcttzneg(i64 %A1, i64 %A2, i64 %B1, i64 %B2) {
%X1 = sub i64 %A1, %B1
%X2 = sub i64 %A2, %B2
%Y1 = call i64 @llvm.cttz.i64(i64 %X1, i1 true)
%Y2 = call i64 @llvm.cttz.i64(i64 %X2, i1 false)
%Z1 = add i64 %Y1, %B1
%Z2 = add i64 %Y2, %B2
%R = mul i64 %Z1, %Z2
ret i64 %R
; CHECK: @testcttzneg
; CHECK: %X1 = sub i64 %A1, %B1
; CHECK: %X2 = sub i64 %A2, %B2
; CHECK: %Y1 = call i64 @llvm.cttz.i64(i64 %X1, i1 true)
; CHECK: %Y2 = call i64 @llvm.cttz.i64(i64 %X2, i1 false)
; CHECK: %Z1 = add i64 %Y1, %B1
; CHECK: %Z2 = add i64 %Y2, %B2
; CHECK: %R = mul i64 %Z1, %Z2
; CHECK: ret i64 %R
}
; CHECK: declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>) #0
; CHECK: declare <2 x double> @llvm.fmuladd.v2f64(<2 x double>, <2 x double>, <2 x double>) #0
@ -346,4 +499,8 @@ define double @testfabs(double %A1, double %A2, double %B1, double %B2) {
; CHECK: declare <2 x double> @llvm.trunc.v2f64(<2 x double>) #0
; CHECK: declare <2 x double> @llvm.floor.v2f64(<2 x double>) #0
; CHECK: declare <2 x double> @llvm.fabs.v2f64(<2 x double>) #0
; CHECK: declare <2 x i64> @llvm.bswap.v2i64(<2 x i64>) #0
; CHECK: declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) #0
; CHECK: declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>, i1) #0
; CHECK: declare <2 x i64> @llvm.cttz.v2i64(<2 x i64>, i1) #0
; CHECK: attributes #0 = { nounwind readnone }