2004-06-20 09:49:54 +02:00
|
|
|
//===-- IntrinsicLowering.cpp - Intrinsic Lowering default implementation -===//
|
2005-04-22 00:36:52 +02:00
|
|
|
//
|
2004-06-20 09:49:54 +02:00
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file was developed by the LLVM research group and is distributed under
|
|
|
|
// the University of Illinois Open Source License. See LICENSE.TXT for details.
|
2005-04-22 00:36:52 +02:00
|
|
|
//
|
2004-06-20 09:49:54 +02:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
2006-11-15 19:00:10 +01:00
|
|
|
// This file implements the IntrinsicLowering class.
|
2004-06-20 09:49:54 +02:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "llvm/Constants.h"
|
|
|
|
#include "llvm/DerivedTypes.h"
|
|
|
|
#include "llvm/Module.h"
|
2004-07-29 19:30:56 +02:00
|
|
|
#include "llvm/Instructions.h"
|
2005-05-03 19:19:30 +02:00
|
|
|
#include "llvm/Type.h"
|
2006-11-28 03:08:17 +01:00
|
|
|
#include "llvm/CodeGen/IntrinsicLowering.h"
|
|
|
|
#include "llvm/Support/Streams.h"
|
2004-06-20 09:49:54 +02:00
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
template <class ArgIt>
|
|
|
|
static Function *EnsureFunctionExists(Module &M, const char *Name,
|
|
|
|
ArgIt ArgBegin, ArgIt ArgEnd,
|
|
|
|
const Type *RetTy) {
|
|
|
|
if (Function *F = M.getNamedFunction(Name)) return F;
|
|
|
|
// It doesn't already exist in the program, insert a new definition now.
|
|
|
|
std::vector<const Type *> ParamTys;
|
|
|
|
for (ArgIt I = ArgBegin; I != ArgEnd; ++I)
|
|
|
|
ParamTys.push_back(I->getType());
|
|
|
|
return M.getOrInsertFunction(Name, FunctionType::get(RetTy, ParamTys, false));
|
|
|
|
}
|
|
|
|
|
|
|
|
/// ReplaceCallWith - This function is used when we want to lower an intrinsic
|
|
|
|
/// call to a call of an external function. This handles hard cases such as
|
|
|
|
/// when there was already a prototype for the external function, and if that
|
|
|
|
/// prototype doesn't match the arguments we expect to pass in.
|
|
|
|
template <class ArgIt>
|
|
|
|
static CallInst *ReplaceCallWith(const char *NewFn, CallInst *CI,
|
|
|
|
ArgIt ArgBegin, ArgIt ArgEnd,
|
2006-11-27 02:05:10 +01:00
|
|
|
const unsigned *castOpcodes,
|
2004-06-20 09:49:54 +02:00
|
|
|
const Type *RetTy, Function *&FCache) {
|
|
|
|
if (!FCache) {
|
|
|
|
// If we haven't already looked up this function, check to see if the
|
|
|
|
// program already contains a function with this name.
|
|
|
|
Module *M = CI->getParent()->getParent()->getParent();
|
|
|
|
FCache = M->getNamedFunction(NewFn);
|
|
|
|
if (!FCache) {
|
|
|
|
// It doesn't already exist in the program, insert a new definition now.
|
|
|
|
std::vector<const Type *> ParamTys;
|
|
|
|
for (ArgIt I = ArgBegin; I != ArgEnd; ++I)
|
|
|
|
ParamTys.push_back((*I)->getType());
|
|
|
|
FCache = M->getOrInsertFunction(NewFn,
|
|
|
|
FunctionType::get(RetTy, ParamTys, false));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
const FunctionType *FT = FCache->getFunctionType();
|
|
|
|
std::vector<Value*> Operands;
|
|
|
|
unsigned ArgNo = 0;
|
|
|
|
for (ArgIt I = ArgBegin; I != ArgEnd && ArgNo != FT->getNumParams();
|
|
|
|
++I, ++ArgNo) {
|
|
|
|
Value *Arg = *I;
|
|
|
|
if (Arg->getType() != FT->getParamType(ArgNo))
|
2006-11-27 02:05:10 +01:00
|
|
|
if (castOpcodes[ArgNo])
|
|
|
|
Arg = CastInst::create(Instruction::CastOps(castOpcodes[ArgNo]),
|
|
|
|
Arg, FT->getParamType(ArgNo), Arg->getName(), CI);
|
|
|
|
else
|
|
|
|
Arg = CastInst::createInferredCast(Arg, FT->getParamType(ArgNo),
|
|
|
|
Arg->getName(), CI);
|
2004-06-20 09:49:54 +02:00
|
|
|
Operands.push_back(Arg);
|
|
|
|
}
|
|
|
|
// Pass nulls into any additional arguments...
|
|
|
|
for (; ArgNo != FT->getNumParams(); ++ArgNo)
|
|
|
|
Operands.push_back(Constant::getNullValue(FT->getParamType(ArgNo)));
|
|
|
|
|
|
|
|
std::string Name = CI->getName(); CI->setName("");
|
|
|
|
if (FT->getReturnType() == Type::VoidTy) Name.clear();
|
|
|
|
CallInst *NewCI = new CallInst(FCache, Operands, Name, CI);
|
|
|
|
if (!CI->use_empty()) {
|
|
|
|
Value *V = NewCI;
|
|
|
|
if (CI->getType() != NewCI->getType())
|
2006-11-27 02:05:10 +01:00
|
|
|
V = CastInst::createInferredCast(NewCI, CI->getType(), Name, CI);
|
2004-06-20 09:49:54 +02:00
|
|
|
CI->replaceAllUsesWith(V);
|
|
|
|
}
|
|
|
|
return NewCI;
|
|
|
|
}
|
|
|
|
|
2006-11-15 19:00:10 +01:00
|
|
|
void IntrinsicLowering::AddPrototypes(Module &M) {
|
2004-06-20 09:49:54 +02:00
|
|
|
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I)
|
|
|
|
if (I->isExternal() && !I->use_empty())
|
|
|
|
switch (I->getIntrinsicID()) {
|
|
|
|
default: break;
|
|
|
|
case Intrinsic::setjmp:
|
2005-05-08 21:46:29 +02:00
|
|
|
EnsureFunctionExists(M, "setjmp", I->arg_begin(), I->arg_end(),
|
|
|
|
Type::IntTy);
|
2004-06-20 09:49:54 +02:00
|
|
|
break;
|
|
|
|
case Intrinsic::longjmp:
|
2005-05-08 21:46:29 +02:00
|
|
|
EnsureFunctionExists(M, "longjmp", I->arg_begin(), I->arg_end(),
|
|
|
|
Type::VoidTy);
|
2004-06-20 09:49:54 +02:00
|
|
|
break;
|
|
|
|
case Intrinsic::siglongjmp:
|
2005-05-08 21:46:29 +02:00
|
|
|
EnsureFunctionExists(M, "abort", I->arg_end(), I->arg_end(),
|
|
|
|
Type::VoidTy);
|
2004-06-20 09:49:54 +02:00
|
|
|
break;
|
2006-03-03 01:00:25 +01:00
|
|
|
case Intrinsic::memcpy_i32:
|
|
|
|
case Intrinsic::memcpy_i64:
|
2005-03-15 05:54:21 +01:00
|
|
|
EnsureFunctionExists(M, "memcpy", I->arg_begin(), --I->arg_end(),
|
|
|
|
I->arg_begin()->getType());
|
2004-06-20 09:49:54 +02:00
|
|
|
break;
|
2006-03-03 01:00:25 +01:00
|
|
|
case Intrinsic::memmove_i32:
|
|
|
|
case Intrinsic::memmove_i64:
|
2005-03-15 05:54:21 +01:00
|
|
|
EnsureFunctionExists(M, "memmove", I->arg_begin(), --I->arg_end(),
|
|
|
|
I->arg_begin()->getType());
|
2004-06-20 09:49:54 +02:00
|
|
|
break;
|
2006-03-03 01:00:25 +01:00
|
|
|
case Intrinsic::memset_i32:
|
|
|
|
case Intrinsic::memset_i64:
|
2005-05-08 21:46:29 +02:00
|
|
|
M.getOrInsertFunction("memset", PointerType::get(Type::SByteTy),
|
|
|
|
PointerType::get(Type::SByteTy),
|
2005-10-23 06:37:20 +02:00
|
|
|
Type::IntTy, (--(--I->arg_end()))->getType(),
|
|
|
|
(Type *)0);
|
2004-06-20 09:49:54 +02:00
|
|
|
break;
|
For PR411:
This patch is an incremental step towards supporting a flat symbol table.
It de-overloads the intrinsic functions by providing type-specific intrinsics
and arranging for automatically upgrading from the old overloaded name to
the new non-overloaded name. Specifically:
llvm.isunordered -> llvm.isunordered.f32, llvm.isunordered.f64
llvm.sqrt -> llvm.sqrt.f32, llvm.sqrt.f64
llvm.ctpop -> llvm.ctpop.i8, llvm.ctpop.i16, llvm.ctpop.i32, llvm.ctpop.i64
llvm.ctlz -> llvm.ctlz.i8, llvm.ctlz.i16, llvm.ctlz.i32, llvm.ctlz.i64
llvm.cttz -> llvm.cttz.i8, llvm.cttz.i16, llvm.cttz.i32, llvm.cttz.i64
New code should not use the overloaded intrinsic names. Warnings will be
emitted if they are used.
llvm-svn: 25366
2006-01-16 22:12:35 +01:00
|
|
|
case Intrinsic::isunordered_f32:
|
|
|
|
case Intrinsic::isunordered_f64:
|
2005-05-08 21:46:29 +02:00
|
|
|
EnsureFunctionExists(M, "isunordered", I->arg_begin(), I->arg_end(),
|
|
|
|
Type::BoolTy);
|
2004-06-20 09:49:54 +02:00
|
|
|
break;
|
For PR411:
This patch is an incremental step towards supporting a flat symbol table.
It de-overloads the intrinsic functions by providing type-specific intrinsics
and arranging for automatically upgrading from the old overloaded name to
the new non-overloaded name. Specifically:
llvm.isunordered -> llvm.isunordered.f32, llvm.isunordered.f64
llvm.sqrt -> llvm.sqrt.f32, llvm.sqrt.f64
llvm.ctpop -> llvm.ctpop.i8, llvm.ctpop.i16, llvm.ctpop.i32, llvm.ctpop.i64
llvm.ctlz -> llvm.ctlz.i8, llvm.ctlz.i16, llvm.ctlz.i32, llvm.ctlz.i64
llvm.cttz -> llvm.cttz.i8, llvm.cttz.i16, llvm.cttz.i32, llvm.cttz.i64
New code should not use the overloaded intrinsic names. Warnings will be
emitted if they are used.
llvm-svn: 25366
2006-01-16 22:12:35 +01:00
|
|
|
case Intrinsic::sqrt_f32:
|
|
|
|
case Intrinsic::sqrt_f64:
|
2005-04-30 09:13:31 +02:00
|
|
|
if(I->arg_begin()->getType() == Type::FloatTy)
|
2005-05-08 21:46:29 +02:00
|
|
|
EnsureFunctionExists(M, "sqrtf", I->arg_begin(), I->arg_end(),
|
|
|
|
Type::FloatTy);
|
2005-04-30 06:07:50 +02:00
|
|
|
else
|
2005-05-08 21:46:29 +02:00
|
|
|
EnsureFunctionExists(M, "sqrt", I->arg_begin(), I->arg_end(),
|
|
|
|
Type::DoubleTy);
|
2005-04-30 06:07:50 +02:00
|
|
|
break;
|
2004-06-20 09:49:54 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-01-16 08:57:00 +01:00
|
|
|
/// LowerBSWAP - Emit the code to lower bswap of V before the specified
|
|
|
|
/// instruction IP.
|
|
|
|
static Value *LowerBSWAP(Value *V, Instruction *IP) {
|
|
|
|
assert(V->getType()->isInteger() && "Can't bswap a non-integer type!");
|
|
|
|
|
|
|
|
unsigned BitSize = V->getType()->getPrimitiveSizeInBits();
|
|
|
|
|
|
|
|
switch(BitSize) {
|
|
|
|
default: assert(0 && "Unhandled type size of value to byteswap!");
|
|
|
|
case 16: {
|
|
|
|
Value *Tmp1 = new ShiftInst(Instruction::Shl, V,
|
|
|
|
ConstantInt::get(Type::UByteTy,8),"bswap.2",IP);
|
2006-11-08 07:47:33 +01:00
|
|
|
Value *Tmp2 = new ShiftInst(Instruction::LShr, V,
|
2006-01-16 08:57:00 +01:00
|
|
|
ConstantInt::get(Type::UByteTy,8),"bswap.1",IP);
|
|
|
|
V = BinaryOperator::createOr(Tmp1, Tmp2, "bswap.i16", IP);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case 32: {
|
|
|
|
Value *Tmp4 = new ShiftInst(Instruction::Shl, V,
|
|
|
|
ConstantInt::get(Type::UByteTy,24),"bswap.4", IP);
|
|
|
|
Value *Tmp3 = new ShiftInst(Instruction::Shl, V,
|
2006-11-08 07:47:33 +01:00
|
|
|
ConstantInt::get(Type::UByteTy,8),"bswap.3",IP);
|
|
|
|
Value *Tmp2 = new ShiftInst(Instruction::LShr, V,
|
|
|
|
ConstantInt::get(Type::UByteTy,8),"bswap.2",IP);
|
|
|
|
Value *Tmp1 = new ShiftInst(Instruction::LShr, V,
|
2006-01-16 08:57:00 +01:00
|
|
|
ConstantInt::get(Type::UByteTy,24),"bswap.1", IP);
|
|
|
|
Tmp3 = BinaryOperator::createAnd(Tmp3,
|
2006-10-20 09:07:24 +02:00
|
|
|
ConstantInt::get(Type::UIntTy, 0xFF0000),
|
2006-01-16 08:57:00 +01:00
|
|
|
"bswap.and3", IP);
|
|
|
|
Tmp2 = BinaryOperator::createAnd(Tmp2,
|
2006-10-20 09:07:24 +02:00
|
|
|
ConstantInt::get(Type::UIntTy, 0xFF00),
|
2006-01-16 08:57:00 +01:00
|
|
|
"bswap.and2", IP);
|
|
|
|
Tmp4 = BinaryOperator::createOr(Tmp4, Tmp3, "bswap.or1", IP);
|
|
|
|
Tmp2 = BinaryOperator::createOr(Tmp2, Tmp1, "bswap.or2", IP);
|
|
|
|
V = BinaryOperator::createOr(Tmp4, Tmp3, "bswap.i32", IP);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case 64: {
|
|
|
|
Value *Tmp8 = new ShiftInst(Instruction::Shl, V,
|
|
|
|
ConstantInt::get(Type::UByteTy,56),"bswap.8", IP);
|
|
|
|
Value *Tmp7 = new ShiftInst(Instruction::Shl, V,
|
|
|
|
ConstantInt::get(Type::UByteTy,40),"bswap.7", IP);
|
|
|
|
Value *Tmp6 = new ShiftInst(Instruction::Shl, V,
|
|
|
|
ConstantInt::get(Type::UByteTy,24),"bswap.6", IP);
|
|
|
|
Value *Tmp5 = new ShiftInst(Instruction::Shl, V,
|
2006-11-08 07:47:33 +01:00
|
|
|
ConstantInt::get(Type::UByteTy,8),"bswap.5", IP);
|
|
|
|
Value* Tmp4 = new ShiftInst(Instruction::LShr, V,
|
|
|
|
ConstantInt::get(Type::UByteTy,8),"bswap.4", IP);
|
|
|
|
Value* Tmp3 = new ShiftInst(Instruction::LShr, V,
|
2006-01-16 08:57:00 +01:00
|
|
|
ConstantInt::get(Type::UByteTy,24),"bswap.3", IP);
|
2006-11-08 07:47:33 +01:00
|
|
|
Value* Tmp2 = new ShiftInst(Instruction::LShr, V,
|
2006-01-16 08:57:00 +01:00
|
|
|
ConstantInt::get(Type::UByteTy,40),"bswap.2", IP);
|
2006-11-08 07:47:33 +01:00
|
|
|
Value* Tmp1 = new ShiftInst(Instruction::LShr, V,
|
2006-01-16 08:57:00 +01:00
|
|
|
ConstantInt::get(Type::UByteTy,56),"bswap.1", IP);
|
|
|
|
Tmp7 = BinaryOperator::createAnd(Tmp7,
|
2006-10-20 09:07:24 +02:00
|
|
|
ConstantInt::get(Type::ULongTy,
|
|
|
|
0xFF000000000000ULL),
|
|
|
|
"bswap.and7", IP);
|
2006-01-16 08:57:00 +01:00
|
|
|
Tmp6 = BinaryOperator::createAnd(Tmp6,
|
2006-10-20 09:07:24 +02:00
|
|
|
ConstantInt::get(Type::ULongTy, 0xFF0000000000ULL),
|
|
|
|
"bswap.and6", IP);
|
2006-01-16 08:57:00 +01:00
|
|
|
Tmp5 = BinaryOperator::createAnd(Tmp5,
|
2006-10-20 09:07:24 +02:00
|
|
|
ConstantInt::get(Type::ULongTy, 0xFF00000000ULL),
|
|
|
|
"bswap.and5", IP);
|
2006-01-16 08:57:00 +01:00
|
|
|
Tmp4 = BinaryOperator::createAnd(Tmp4,
|
2006-10-20 09:07:24 +02:00
|
|
|
ConstantInt::get(Type::ULongTy, 0xFF000000ULL),
|
|
|
|
"bswap.and4", IP);
|
2006-01-16 08:57:00 +01:00
|
|
|
Tmp3 = BinaryOperator::createAnd(Tmp3,
|
2006-10-20 09:07:24 +02:00
|
|
|
ConstantInt::get(Type::ULongTy, 0xFF0000ULL),
|
|
|
|
"bswap.and3", IP);
|
2006-01-16 08:57:00 +01:00
|
|
|
Tmp2 = BinaryOperator::createAnd(Tmp2,
|
2006-10-20 09:07:24 +02:00
|
|
|
ConstantInt::get(Type::ULongTy, 0xFF00ULL),
|
|
|
|
"bswap.and2", IP);
|
2006-01-16 08:57:00 +01:00
|
|
|
Tmp8 = BinaryOperator::createOr(Tmp8, Tmp7, "bswap.or1", IP);
|
|
|
|
Tmp6 = BinaryOperator::createOr(Tmp6, Tmp5, "bswap.or2", IP);
|
|
|
|
Tmp4 = BinaryOperator::createOr(Tmp4, Tmp3, "bswap.or3", IP);
|
|
|
|
Tmp2 = BinaryOperator::createOr(Tmp2, Tmp1, "bswap.or4", IP);
|
|
|
|
Tmp8 = BinaryOperator::createOr(Tmp8, Tmp6, "bswap.or5", IP);
|
|
|
|
Tmp4 = BinaryOperator::createOr(Tmp4, Tmp2, "bswap.or6", IP);
|
|
|
|
V = BinaryOperator::createOr(Tmp8, Tmp4, "bswap.i64", IP);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return V;
|
|
|
|
}
|
|
|
|
|
2005-05-11 21:42:05 +02:00
|
|
|
/// LowerCTPOP - Emit the code to lower ctpop of V before the specified
|
2006-01-16 08:57:00 +01:00
|
|
|
/// instruction IP.
|
2005-05-11 21:42:05 +02:00
|
|
|
static Value *LowerCTPOP(Value *V, Instruction *IP) {
|
|
|
|
assert(V->getType()->isInteger() && "Can't ctpop a non-integer type!");
|
|
|
|
|
|
|
|
static const uint64_t MaskValues[6] = {
|
|
|
|
0x5555555555555555ULL, 0x3333333333333333ULL,
|
|
|
|
0x0F0F0F0F0F0F0F0FULL, 0x00FF00FF00FF00FFULL,
|
|
|
|
0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL
|
|
|
|
};
|
|
|
|
|
2005-05-11 22:24:12 +02:00
|
|
|
unsigned BitSize = V->getType()->getPrimitiveSizeInBits();
|
2006-11-08 07:47:33 +01:00
|
|
|
|
2005-05-11 21:42:05 +02:00
|
|
|
for (unsigned i = 1, ct = 0; i != BitSize; i <<= 1, ++ct) {
|
|
|
|
Value *MaskCst =
|
2006-10-20 09:07:24 +02:00
|
|
|
ConstantExpr::getCast(ConstantInt::get(Type::ULongTy, MaskValues[ct]),
|
|
|
|
V->getType());
|
2005-05-11 21:42:05 +02:00
|
|
|
Value *LHS = BinaryOperator::createAnd(V, MaskCst, "cppop.and1", IP);
|
2006-11-08 07:47:33 +01:00
|
|
|
Value *VShift = new ShiftInst(Instruction::LShr, V,
|
2005-05-11 21:42:05 +02:00
|
|
|
ConstantInt::get(Type::UByteTy, i), "ctpop.sh", IP);
|
|
|
|
Value *RHS = BinaryOperator::createAnd(VShift, MaskCst, "cppop.and2", IP);
|
|
|
|
V = BinaryOperator::createAdd(LHS, RHS, "ctpop.step", IP);
|
|
|
|
}
|
|
|
|
|
|
|
|
return V;
|
|
|
|
}
|
|
|
|
|
2005-05-11 22:24:12 +02:00
|
|
|
/// LowerCTLZ - Emit the code to lower ctlz of V before the specified
|
2006-01-16 08:57:00 +01:00
|
|
|
/// instruction IP.
|
2005-05-11 22:24:12 +02:00
|
|
|
static Value *LowerCTLZ(Value *V, Instruction *IP) {
|
|
|
|
|
|
|
|
unsigned BitSize = V->getType()->getPrimitiveSizeInBits();
|
|
|
|
for (unsigned i = 1; i != BitSize; i <<= 1) {
|
|
|
|
Value *ShVal = ConstantInt::get(Type::UByteTy, i);
|
2006-11-08 07:47:33 +01:00
|
|
|
ShVal = new ShiftInst(Instruction::LShr, V, ShVal, "ctlz.sh", IP);
|
2005-05-11 22:24:12 +02:00
|
|
|
V = BinaryOperator::createOr(V, ShVal, "ctlz.step", IP);
|
|
|
|
}
|
|
|
|
|
|
|
|
V = BinaryOperator::createNot(V, "", IP);
|
|
|
|
return LowerCTPOP(V, IP);
|
|
|
|
}
|
|
|
|
|
2006-01-16 08:57:00 +01:00
|
|
|
|
|
|
|
|
2006-11-15 19:00:10 +01:00
|
|
|
void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
|
2004-06-20 09:49:54 +02:00
|
|
|
Function *Callee = CI->getCalledFunction();
|
|
|
|
assert(Callee && "Cannot lower an indirect call!");
|
2005-04-22 00:36:52 +02:00
|
|
|
|
2004-06-20 09:49:54 +02:00
|
|
|
switch (Callee->getIntrinsicID()) {
|
|
|
|
case Intrinsic::not_intrinsic:
|
2006-11-28 03:08:17 +01:00
|
|
|
llvm_cerr << "Cannot lower a call to a non-intrinsic function '"
|
2004-06-20 09:49:54 +02:00
|
|
|
<< Callee->getName() << "'!\n";
|
|
|
|
abort();
|
|
|
|
default:
|
2006-11-28 03:08:17 +01:00
|
|
|
llvm_cerr << "Error: Code generator does not support intrinsic function '"
|
2004-06-20 09:49:54 +02:00
|
|
|
<< Callee->getName() << "'!\n";
|
|
|
|
abort();
|
|
|
|
|
|
|
|
// The setjmp/longjmp intrinsics should only exist in the code if it was
|
|
|
|
// never optimized (ie, right out of the CFE), or if it has been hacked on
|
|
|
|
// by the lowerinvoke pass. In both cases, the right thing to do is to
|
|
|
|
// convert the call to an explicit setjmp or longjmp call.
|
|
|
|
case Intrinsic::setjmp: {
|
|
|
|
static Function *SetjmpFCache = 0;
|
2006-11-27 02:05:10 +01:00
|
|
|
static const unsigned castOpcodes[] = { Instruction::BitCast };
|
2004-06-20 09:49:54 +02:00
|
|
|
Value *V = ReplaceCallWith("setjmp", CI, CI->op_begin()+1, CI->op_end(),
|
2006-11-27 02:05:10 +01:00
|
|
|
castOpcodes, Type::IntTy, SetjmpFCache);
|
2004-06-20 09:49:54 +02:00
|
|
|
if (CI->getType() != Type::VoidTy)
|
|
|
|
CI->replaceAllUsesWith(V);
|
|
|
|
break;
|
|
|
|
}
|
2005-04-22 00:36:52 +02:00
|
|
|
case Intrinsic::sigsetjmp:
|
2004-06-20 09:49:54 +02:00
|
|
|
if (CI->getType() != Type::VoidTy)
|
|
|
|
CI->replaceAllUsesWith(Constant::getNullValue(CI->getType()));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case Intrinsic::longjmp: {
|
|
|
|
static Function *LongjmpFCache = 0;
|
2006-11-27 02:05:10 +01:00
|
|
|
static const unsigned castOpcodes[] =
|
|
|
|
{ Instruction::BitCast, 0 };
|
2004-06-20 09:49:54 +02:00
|
|
|
ReplaceCallWith("longjmp", CI, CI->op_begin()+1, CI->op_end(),
|
2006-11-27 02:05:10 +01:00
|
|
|
castOpcodes, Type::VoidTy, LongjmpFCache);
|
2004-06-20 09:49:54 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case Intrinsic::siglongjmp: {
|
|
|
|
// Insert the call to abort
|
|
|
|
static Function *AbortFCache = 0;
|
2006-11-27 02:05:10 +01:00
|
|
|
static const unsigned castOpcodes[] =
|
|
|
|
{ Instruction::BitCast, 0 };
|
|
|
|
ReplaceCallWith("abort", CI, CI->op_end(), CI->op_end(),
|
|
|
|
castOpcodes, Type::VoidTy, AbortFCache);
|
2004-06-20 09:49:54 +02:00
|
|
|
break;
|
|
|
|
}
|
For PR411:
This patch is an incremental step towards supporting a flat symbol table.
It de-overloads the intrinsic functions by providing type-specific intrinsics
and arranging for automatically upgrading from the old overloaded name to
the new non-overloaded name. Specifically:
llvm.isunordered -> llvm.isunordered.f32, llvm.isunordered.f64
llvm.sqrt -> llvm.sqrt.f32, llvm.sqrt.f64
llvm.ctpop -> llvm.ctpop.i8, llvm.ctpop.i16, llvm.ctpop.i32, llvm.ctpop.i64
llvm.ctlz -> llvm.ctlz.i8, llvm.ctlz.i16, llvm.ctlz.i32, llvm.ctlz.i64
llvm.cttz -> llvm.cttz.i8, llvm.cttz.i16, llvm.cttz.i32, llvm.cttz.i64
New code should not use the overloaded intrinsic names. Warnings will be
emitted if they are used.
llvm-svn: 25366
2006-01-16 22:12:35 +01:00
|
|
|
case Intrinsic::ctpop_i8:
|
|
|
|
case Intrinsic::ctpop_i16:
|
|
|
|
case Intrinsic::ctpop_i32:
|
|
|
|
case Intrinsic::ctpop_i64:
|
|
|
|
CI->replaceAllUsesWith(LowerCTPOP(CI->getOperand(1), CI));
|
|
|
|
break;
|
|
|
|
|
2006-01-16 08:57:00 +01:00
|
|
|
case Intrinsic::bswap_i16:
|
|
|
|
case Intrinsic::bswap_i32:
|
|
|
|
case Intrinsic::bswap_i64:
|
|
|
|
CI->replaceAllUsesWith(LowerBSWAP(CI->getOperand(1), CI));
|
|
|
|
break;
|
|
|
|
|
For PR411:
This patch is an incremental step towards supporting a flat symbol table.
It de-overloads the intrinsic functions by providing type-specific intrinsics
and arranging for automatically upgrading from the old overloaded name to
the new non-overloaded name. Specifically:
llvm.isunordered -> llvm.isunordered.f32, llvm.isunordered.f64
llvm.sqrt -> llvm.sqrt.f32, llvm.sqrt.f64
llvm.ctpop -> llvm.ctpop.i8, llvm.ctpop.i16, llvm.ctpop.i32, llvm.ctpop.i64
llvm.ctlz -> llvm.ctlz.i8, llvm.ctlz.i16, llvm.ctlz.i32, llvm.ctlz.i64
llvm.cttz -> llvm.cttz.i8, llvm.cttz.i16, llvm.cttz.i32, llvm.cttz.i64
New code should not use the overloaded intrinsic names. Warnings will be
emitted if they are used.
llvm-svn: 25366
2006-01-16 22:12:35 +01:00
|
|
|
case Intrinsic::ctlz_i8:
|
|
|
|
case Intrinsic::ctlz_i16:
|
|
|
|
case Intrinsic::ctlz_i32:
|
|
|
|
case Intrinsic::ctlz_i64:
|
2005-05-11 22:24:12 +02:00
|
|
|
CI->replaceAllUsesWith(LowerCTLZ(CI->getOperand(1), CI));
|
2005-05-03 19:19:30 +02:00
|
|
|
break;
|
2006-01-16 08:57:00 +01:00
|
|
|
|
For PR411:
This patch is an incremental step towards supporting a flat symbol table.
It de-overloads the intrinsic functions by providing type-specific intrinsics
and arranging for automatically upgrading from the old overloaded name to
the new non-overloaded name. Specifically:
llvm.isunordered -> llvm.isunordered.f32, llvm.isunordered.f64
llvm.sqrt -> llvm.sqrt.f32, llvm.sqrt.f64
llvm.ctpop -> llvm.ctpop.i8, llvm.ctpop.i16, llvm.ctpop.i32, llvm.ctpop.i64
llvm.ctlz -> llvm.ctlz.i8, llvm.ctlz.i16, llvm.ctlz.i32, llvm.ctlz.i64
llvm.cttz -> llvm.cttz.i8, llvm.cttz.i16, llvm.cttz.i32, llvm.cttz.i64
New code should not use the overloaded intrinsic names. Warnings will be
emitted if they are used.
llvm-svn: 25366
2006-01-16 22:12:35 +01:00
|
|
|
case Intrinsic::cttz_i8:
|
|
|
|
case Intrinsic::cttz_i16:
|
|
|
|
case Intrinsic::cttz_i32:
|
|
|
|
case Intrinsic::cttz_i64: {
|
2005-05-11 22:02:14 +02:00
|
|
|
// cttz(x) -> ctpop(~X & (X-1))
|
2005-05-03 19:19:30 +02:00
|
|
|
Value *Src = CI->getOperand(1);
|
2005-05-11 21:42:05 +02:00
|
|
|
Value *NotSrc = BinaryOperator::createNot(Src, Src->getName()+".not", CI);
|
2005-05-11 22:02:14 +02:00
|
|
|
Value *SrcM1 = ConstantInt::get(Src->getType(), 1);
|
|
|
|
SrcM1 = BinaryOperator::createSub(Src, SrcM1, "", CI);
|
|
|
|
Src = LowerCTPOP(BinaryOperator::createAnd(NotSrc, SrcM1, "", CI), CI);
|
2005-05-03 19:19:30 +02:00
|
|
|
CI->replaceAllUsesWith(Src);
|
|
|
|
break;
|
|
|
|
}
|
2004-06-20 09:49:54 +02:00
|
|
|
|
2006-01-13 03:22:08 +01:00
|
|
|
case Intrinsic::stacksave:
|
|
|
|
case Intrinsic::stackrestore: {
|
|
|
|
static bool Warned = false;
|
|
|
|
if (!Warned)
|
2006-11-28 03:08:17 +01:00
|
|
|
llvm_cerr << "WARNING: this target does not support the llvm.stack"
|
|
|
|
<< (Callee->getIntrinsicID() == Intrinsic::stacksave ?
|
|
|
|
"save" : "restore") << " intrinsic.\n";
|
2006-01-13 03:22:08 +01:00
|
|
|
Warned = true;
|
|
|
|
if (Callee->getIntrinsicID() == Intrinsic::stacksave)
|
|
|
|
CI->replaceAllUsesWith(Constant::getNullValue(CI->getType()));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2004-06-20 09:49:54 +02:00
|
|
|
case Intrinsic::returnaddress:
|
|
|
|
case Intrinsic::frameaddress:
|
2006-11-28 03:08:17 +01:00
|
|
|
llvm_cerr << "WARNING: this target does not support the llvm."
|
2005-04-22 00:36:52 +02:00
|
|
|
<< (Callee->getIntrinsicID() == Intrinsic::returnaddress ?
|
2004-06-20 09:49:54 +02:00
|
|
|
"return" : "frame") << "address intrinsic.\n";
|
|
|
|
CI->replaceAllUsesWith(ConstantPointerNull::get(
|
|
|
|
cast<PointerType>(CI->getType())));
|
|
|
|
break;
|
|
|
|
|
2005-02-28 20:27:23 +01:00
|
|
|
case Intrinsic::prefetch:
|
|
|
|
break; // Simply strip out prefetches on unsupported architectures
|
|
|
|
|
2005-03-28 22:05:49 +02:00
|
|
|
case Intrinsic::pcmarker:
|
|
|
|
break; // Simply strip out pcmarker on unsupported architectures
|
2005-11-11 17:47:30 +01:00
|
|
|
case Intrinsic::readcyclecounter: {
|
2006-11-28 03:08:17 +01:00
|
|
|
llvm_cerr << "WARNING: this target does not support the llvm.readcyclecoun"
|
2006-01-13 03:22:08 +01:00
|
|
|
<< "ter intrinsic. It is being lowered to a constant 0\n";
|
2006-10-20 09:07:24 +02:00
|
|
|
CI->replaceAllUsesWith(ConstantInt::get(Type::ULongTy, 0));
|
2005-11-11 17:47:30 +01:00
|
|
|
break;
|
|
|
|
}
|
2005-03-28 22:05:49 +02:00
|
|
|
|
2004-06-20 09:49:54 +02:00
|
|
|
case Intrinsic::dbg_stoppoint:
|
|
|
|
case Intrinsic::dbg_region_start:
|
|
|
|
case Intrinsic::dbg_region_end:
|
|
|
|
case Intrinsic::dbg_func_start:
|
2006-03-23 19:06:46 +01:00
|
|
|
case Intrinsic::dbg_declare:
|
2004-06-20 09:49:54 +02:00
|
|
|
break; // Simply strip out debugging intrinsics
|
|
|
|
|
2006-11-27 02:05:10 +01:00
|
|
|
case Intrinsic::memcpy_i32: {
|
2004-06-20 09:49:54 +02:00
|
|
|
// The memcpy intrinsic take an extra alignment argument that the memcpy
|
|
|
|
// libc function does not.
|
2006-11-27 02:05:10 +01:00
|
|
|
static unsigned opcodes[] =
|
|
|
|
{ Instruction::BitCast, Instruction::BitCast, Instruction::BitCast };
|
|
|
|
// FIXME:
|
|
|
|
// if (target_is_64_bit) opcodes[2] = Instruction::ZExt;
|
|
|
|
// else opcodes[2] = Instruction::BitCast;
|
|
|
|
static Function *MemcpyFCache = 0;
|
|
|
|
ReplaceCallWith("memcpy", CI, CI->op_begin()+1, CI->op_end()-1,
|
|
|
|
opcodes, (*(CI->op_begin()+1))->getType(), MemcpyFCache);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Intrinsic::memcpy_i64: {
|
|
|
|
static unsigned opcodes[] =
|
|
|
|
{ Instruction::BitCast, Instruction::BitCast, Instruction::Trunc };
|
|
|
|
// FIXME:
|
|
|
|
// if (target_is_64_bit) opcodes[2] = Instruction::BitCast;
|
|
|
|
// else opcodes[2] = Instruction::Trunc;
|
2004-06-20 09:49:54 +02:00
|
|
|
static Function *MemcpyFCache = 0;
|
|
|
|
ReplaceCallWith("memcpy", CI, CI->op_begin()+1, CI->op_end()-1,
|
2006-11-27 02:05:10 +01:00
|
|
|
opcodes, (*(CI->op_begin()+1))->getType(), MemcpyFCache);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Intrinsic::memmove_i32: {
|
|
|
|
// The memmove intrinsic take an extra alignment argument that the memmove
|
|
|
|
// libc function does not.
|
|
|
|
static unsigned opcodes[] =
|
|
|
|
{ Instruction::BitCast, Instruction::BitCast, Instruction::BitCast };
|
|
|
|
// FIXME:
|
|
|
|
// if (target_is_64_bit) opcodes[2] = Instruction::ZExt;
|
|
|
|
// else opcodes[2] = Instruction::BitCast;
|
|
|
|
static Function *MemmoveFCache = 0;
|
|
|
|
ReplaceCallWith("memmove", CI, CI->op_begin()+1, CI->op_end()-1,
|
|
|
|
opcodes, (*(CI->op_begin()+1))->getType(), MemmoveFCache);
|
2004-06-20 09:49:54 +02:00
|
|
|
break;
|
|
|
|
}
|
2006-03-03 01:00:25 +01:00
|
|
|
case Intrinsic::memmove_i64: {
|
2004-06-20 09:49:54 +02:00
|
|
|
// The memmove intrinsic take an extra alignment argument that the memmove
|
|
|
|
// libc function does not.
|
2006-11-27 02:05:10 +01:00
|
|
|
static const unsigned opcodes[] =
|
|
|
|
{ Instruction::BitCast, Instruction::BitCast, Instruction::Trunc };
|
|
|
|
// if (target_is_64_bit) opcodes[2] = Instruction::BitCast;
|
|
|
|
// else opcodes[2] = Instruction::Trunc;
|
2004-06-20 09:49:54 +02:00
|
|
|
static Function *MemmoveFCache = 0;
|
|
|
|
ReplaceCallWith("memmove", CI, CI->op_begin()+1, CI->op_end()-1,
|
2006-11-27 02:05:10 +01:00
|
|
|
opcodes, (*(CI->op_begin()+1))->getType(), MemmoveFCache);
|
2004-06-20 09:49:54 +02:00
|
|
|
break;
|
|
|
|
}
|
2006-11-27 02:05:10 +01:00
|
|
|
case Intrinsic::memset_i32: {
|
|
|
|
// The memset intrinsic take an extra alignment argument that the memset
|
|
|
|
// libc function does not.
|
|
|
|
static const unsigned opcodes[] =
|
|
|
|
{ Instruction::BitCast, Instruction::ZExt, Instruction::ZExt, 0 };
|
|
|
|
// if (target_is_64_bit) opcodes[2] = Instruction::BitCast;
|
|
|
|
// else opcodes[2] = Instruction::ZExt;
|
|
|
|
static Function *MemsetFCache = 0;
|
|
|
|
ReplaceCallWith("memset", CI, CI->op_begin()+1, CI->op_end()-1,
|
|
|
|
opcodes, (*(CI->op_begin()+1))->getType(), MemsetFCache);
|
|
|
|
}
|
2006-03-03 01:00:25 +01:00
|
|
|
case Intrinsic::memset_i64: {
|
2004-06-20 09:49:54 +02:00
|
|
|
// The memset intrinsic take an extra alignment argument that the memset
|
|
|
|
// libc function does not.
|
2006-11-27 02:05:10 +01:00
|
|
|
static const unsigned opcodes[] =
|
|
|
|
{ Instruction::BitCast, Instruction::ZExt, Instruction::Trunc, 0 };
|
|
|
|
// if (target_is_64_bit) opcodes[2] = Instruction::BitCast;
|
|
|
|
// else opcodes[2] = Instruction::Trunc;
|
2004-06-20 09:49:54 +02:00
|
|
|
static Function *MemsetFCache = 0;
|
|
|
|
ReplaceCallWith("memset", CI, CI->op_begin()+1, CI->op_end()-1,
|
2006-11-27 02:05:10 +01:00
|
|
|
opcodes, (*(CI->op_begin()+1))->getType(), MemsetFCache);
|
2004-06-20 09:49:54 +02:00
|
|
|
break;
|
|
|
|
}
|
For PR411:
This patch is an incremental step towards supporting a flat symbol table.
It de-overloads the intrinsic functions by providing type-specific intrinsics
and arranging for automatically upgrading from the old overloaded name to
the new non-overloaded name. Specifically:
llvm.isunordered -> llvm.isunordered.f32, llvm.isunordered.f64
llvm.sqrt -> llvm.sqrt.f32, llvm.sqrt.f64
llvm.ctpop -> llvm.ctpop.i8, llvm.ctpop.i16, llvm.ctpop.i32, llvm.ctpop.i64
llvm.ctlz -> llvm.ctlz.i8, llvm.ctlz.i16, llvm.ctlz.i32, llvm.ctlz.i64
llvm.cttz -> llvm.cttz.i8, llvm.cttz.i16, llvm.cttz.i32, llvm.cttz.i64
New code should not use the overloaded intrinsic names. Warnings will be
emitted if they are used.
llvm-svn: 25366
2006-01-16 22:12:35 +01:00
|
|
|
case Intrinsic::isunordered_f32:
|
|
|
|
case Intrinsic::isunordered_f64: {
|
2005-03-01 03:07:58 +01:00
|
|
|
Value *L = CI->getOperand(1);
|
|
|
|
Value *R = CI->getOperand(2);
|
|
|
|
|
|
|
|
Value *LIsNan = new SetCondInst(Instruction::SetNE, L, L, "LIsNan", CI);
|
|
|
|
Value *RIsNan = new SetCondInst(Instruction::SetNE, R, R, "RIsNan", CI);
|
|
|
|
CI->replaceAllUsesWith(
|
|
|
|
BinaryOperator::create(Instruction::Or, LIsNan, RIsNan,
|
|
|
|
"isunordered", CI));
|
2004-06-20 09:49:54 +02:00
|
|
|
break;
|
|
|
|
}
|
2006-11-27 02:05:10 +01:00
|
|
|
case Intrinsic::sqrt_f32: {
|
|
|
|
static const unsigned opcodes[] = { 0 };
|
|
|
|
static Function *sqrtfFCache = 0;
|
|
|
|
ReplaceCallWith("sqrtf", CI, CI->op_begin()+1, CI->op_end(),
|
|
|
|
opcodes, Type::FloatTy, sqrtfFCache);
|
|
|
|
break;
|
|
|
|
}
|
For PR411:
This patch is an incremental step towards supporting a flat symbol table.
It de-overloads the intrinsic functions by providing type-specific intrinsics
and arranging for automatically upgrading from the old overloaded name to
the new non-overloaded name. Specifically:
llvm.isunordered -> llvm.isunordered.f32, llvm.isunordered.f64
llvm.sqrt -> llvm.sqrt.f32, llvm.sqrt.f64
llvm.ctpop -> llvm.ctpop.i8, llvm.ctpop.i16, llvm.ctpop.i32, llvm.ctpop.i64
llvm.ctlz -> llvm.ctlz.i8, llvm.ctlz.i16, llvm.ctlz.i32, llvm.ctlz.i64
llvm.cttz -> llvm.cttz.i8, llvm.cttz.i16, llvm.cttz.i32, llvm.cttz.i64
New code should not use the overloaded intrinsic names. Warnings will be
emitted if they are used.
llvm-svn: 25366
2006-01-16 22:12:35 +01:00
|
|
|
case Intrinsic::sqrt_f64: {
|
2006-11-27 02:05:10 +01:00
|
|
|
static const unsigned opcodes[] = { 0 };
|
2005-04-30 06:07:50 +02:00
|
|
|
static Function *sqrtFCache = 0;
|
2006-11-27 02:05:10 +01:00
|
|
|
ReplaceCallWith("sqrt", CI, CI->op_begin()+1, CI->op_end(),
|
|
|
|
opcodes, Type::DoubleTy, sqrtFCache);
|
2005-04-30 06:07:50 +02:00
|
|
|
break;
|
|
|
|
}
|
2004-06-20 09:49:54 +02:00
|
|
|
}
|
2005-04-22 00:36:52 +02:00
|
|
|
|
2004-06-20 09:49:54 +02:00
|
|
|
assert(CI->use_empty() &&
|
|
|
|
"Lowering should have eliminated any uses of the intrinsic call!");
|
2005-05-11 21:42:05 +02:00
|
|
|
CI->eraseFromParent();
|
2004-06-20 09:49:54 +02:00
|
|
|
}
|