1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-25 20:23:11 +01:00

Add X86FastISel support for static allocas, and refences

to static allocas. As part of this change, refactor the
address mode code for laods and stores.

llvm-svn: 56066
This commit is contained in:
Dan Gohman 2008-09-10 20:11:02 +00:00
parent 474ba35954
commit 3ccdde5eef
7 changed files with 122 additions and 81 deletions

View File

@ -20,10 +20,12 @@
namespace llvm {
class AllocaInst;
class ConstantFP;
class MachineBasicBlock;
class MachineConstantPool;
class MachineFunction;
class MachineFrameInfo;
class MachineRegisterInfo;
class TargetData;
class TargetInstrInfo;
@ -40,8 +42,11 @@ protected:
DenseMap<const Value *, unsigned> LocalValueMap;
DenseMap<const Value *, unsigned> &ValueMap;
DenseMap<const BasicBlock *, MachineBasicBlock *> &MBBMap;
DenseMap<const AllocaInst *, int> &StaticAllocaMap;
MachineFunction &MF;
MachineRegisterInfo &MRI;
MachineFrameInfo &MFI;
MachineConstantPool &MCP;
const TargetMachine &TM;
const TargetData &TD;
const TargetInstrInfo &TII;
@ -90,7 +95,8 @@ public:
protected:
FastISel(MachineFunction &mf,
DenseMap<const Value *, unsigned> &vm,
DenseMap<const BasicBlock *, MachineBasicBlock *> &bm);
DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
DenseMap<const AllocaInst *, int> &am);
/// FastEmit_r - This method is called by target-independent code
/// to request that an instruction with the given type and opcode
@ -234,8 +240,13 @@ protected:
/// TargetMaterializeConstant - Emit a constant in a register using
/// target-specific logic, such as constant pool loads.
virtual unsigned TargetMaterializeConstant(Constant* C,
MachineConstantPool* MCP) {
virtual unsigned TargetMaterializeConstant(Constant* C) {
return 0;
}
/// TargetMaterializeAlloca - Emit an alloca address in a register using
/// target-specific logic.
virtual unsigned TargetMaterializeAlloca(AllocaInst* C) {
return 0;
}

View File

@ -33,6 +33,7 @@
#include <vector>
namespace llvm {
class AllocaInst;
class Function;
class FastISel;
class MachineBasicBlock;
@ -1120,7 +1121,8 @@ public:
virtual FastISel *
createFastISel(MachineFunction &,
DenseMap<const Value *, unsigned> &,
DenseMap<const BasicBlock *, MachineBasicBlock *> &) {
DenseMap<const BasicBlock *, MachineBasicBlock *> &,
DenseMap<const AllocaInst *, int> &) {
return 0;
}

View File

@ -37,14 +37,14 @@ unsigned FastISel::getRegForValue(Value *V) {
return 0;
if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
if (CI->getValue().getActiveBits() > 64)
return TargetMaterializeConstant(CI,
MBB->getParent()->getConstantPool());
return TargetMaterializeConstant(CI);
// Don't cache constant materializations. To do so would require
// tracking what uses they dominate.
Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
} else if (isa<GlobalValue>(V)) {
return TargetMaterializeConstant(dyn_cast<Constant>(V),
MBB->getParent()->getConstantPool());
return TargetMaterializeConstant(cast<Constant>(V));
} else if (isa<AllocaInst>(V)) {
return TargetMaterializeAlloca(cast<AllocaInst>(V));
} else if (isa<ConstantPointerNull>(V)) {
Reg = FastEmit_i(VT, VT, ISD::Constant, 0);
} else if (ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
@ -58,19 +58,16 @@ unsigned FastISel::getRegForValue(Value *V) {
uint32_t IntBitWidth = IntVT.getSizeInBits();
if (Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
APFloat::rmTowardZero) != APFloat::opOK)
return TargetMaterializeConstant(CF,
MBB->getParent()->getConstantPool());
return TargetMaterializeConstant(CF);
APInt IntVal(IntBitWidth, 2, x);
unsigned IntegerReg = FastEmit_i(IntVT.getSimpleVT(), IntVT.getSimpleVT(),
ISD::Constant, IntVal.getZExtValue());
if (IntegerReg == 0)
return TargetMaterializeConstant(CF,
MBB->getParent()->getConstantPool());
return TargetMaterializeConstant(CF);
Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg);
if (Reg == 0)
return TargetMaterializeConstant(CF,
MBB->getParent()->getConstantPool());;
return TargetMaterializeConstant(CF);
}
} else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
if (!SelectOperator(CE, CE->getOpcode())) return 0;
@ -83,8 +80,7 @@ unsigned FastISel::getRegForValue(Value *V) {
}
if (!Reg && isa<Constant>(V))
return TargetMaterializeConstant(cast<Constant>(V),
MBB->getParent()->getConstantPool());
return TargetMaterializeConstant(cast<Constant>(V));
LocalValueMap[V] = Reg;
return Reg;
@ -416,6 +412,14 @@ FastISel::SelectOperator(User *I, unsigned Opcode) {
case Instruction::PHI:
// PHI nodes are already emitted.
return true;
case Instruction::Alloca:
// FunctionLowering has the static-sized case covered.
if (StaticAllocaMap.count(cast<AllocaInst>(I)))
return true;
// Dynamic-sized alloca is not handled yet.
return false;
case Instruction::BitCast:
return SelectBitCast(I);
@ -453,12 +457,16 @@ FastISel::SelectOperator(User *I, unsigned Opcode) {
FastISel::FastISel(MachineFunction &mf,
DenseMap<const Value *, unsigned> &vm,
DenseMap<const BasicBlock *, MachineBasicBlock *> &bm)
DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
DenseMap<const AllocaInst *, int> &am)
: MBB(0),
ValueMap(vm),
MBBMap(bm),
StaticAllocaMap(am),
MF(mf),
MRI(MF.getRegInfo()),
MFI(*MF.getFrameInfo()),
MCP(*MF.getConstantPool()),
TM(MF.getTarget()),
TD(*TM.getTargetData()),
TII(*TM.getInstrInfo()),

View File

@ -737,7 +737,8 @@ void SelectionDAGISel::SelectAllBasicBlocks(Function &Fn, MachineFunction &MF) {
// FastISel doesn't support EH landing pads, which require special handling.
if (EnableFastISel && !BB->isLandingPad()) {
if (FastISel *F = TLI.createFastISel(*FuncInfo->MF, FuncInfo->ValueMap,
FuncInfo->MBBMap)) {
FuncInfo->MBBMap,
FuncInfo->StaticAllocaMap)) {
// Emit code for any incoming arguments. This must happen before
// beginning FastISel on the entry block.
if (LLVMBB == &Fn.getEntryBlock()) {

View File

@ -31,10 +31,6 @@
using namespace llvm;
class X86FastISel : public FastISel {
/// MFI - Keep track of objects allocated on the stack.
///
MachineFrameInfo *MFI;
/// Subtarget - Keep a pointer to the X86Subtarget around so that we can
/// make the right decision when generating code for different targets.
const X86Subtarget *Subtarget;
@ -53,8 +49,9 @@ class X86FastISel : public FastISel {
public:
explicit X86FastISel(MachineFunction &mf,
DenseMap<const Value *, unsigned> &vm,
DenseMap<const BasicBlock *, MachineBasicBlock *> &bm)
: FastISel(mf, vm, bm), MFI(MF.getFrameInfo()) {
DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
DenseMap<const AllocaInst *, int> &am)
: FastISel(mf, vm, bm, am) {
Subtarget = &TM.getSubtarget<X86Subtarget>();
StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP;
X86ScalarSSEf64 = Subtarget->hasSSE2();
@ -66,10 +63,10 @@ public:
#include "X86GenFastISel.inc"
private:
bool X86FastEmitLoad(MVT VT, unsigned Op0, Value *V, unsigned &RR);
bool X86FastEmitLoad(MVT VT, const X86AddressMode &AM, unsigned &RR);
bool X86FastEmitStore(MVT VT, unsigned Val,
unsigned Ptr, unsigned Offset, Value *V);
const X86AddressMode &AM);
bool X86FastEmitExtend(ISD::NodeType Opc, MVT DstVT, unsigned Src, MVT SrcVT,
unsigned &ResultReg);
@ -77,6 +74,8 @@ private:
bool X86SelectConstAddr(Value *V, unsigned &Op0,
bool isCall = false, bool inReg = false);
bool X86SelectAddress(Value *V, X86AddressMode &AM);
bool X86SelectLoad(Instruction *I);
bool X86SelectStore(Instruction *I);
@ -97,7 +96,9 @@ private:
CCAssignFn *CCAssignFnForCall(unsigned CC, bool isTailCall = false);
unsigned TargetMaterializeConstant(Constant *C, MachineConstantPool* MCP);
unsigned TargetMaterializeConstant(Constant *C);
unsigned TargetMaterializeAlloca(AllocaInst *C);
/// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is
/// computed in an SSE register, not on the X87 floating point stack.
@ -151,7 +152,7 @@ CCAssignFn *X86FastISel::CCAssignFnForCall(unsigned CC, bool isTaillCall) {
/// X86FastEmitLoad - Emit a machine instruction to load a value of type VT.
/// The address is either pre-computed, i.e. Ptr, or a GlobalAddress, i.e. GV.
/// Return true and the result register by reference if it is possible.
bool X86FastISel::X86FastEmitLoad(MVT VT, unsigned Ptr, Value *GV,
bool X86FastISel::X86FastEmitLoad(MVT VT, const X86AddressMode &AM,
unsigned &ResultReg) {
// Get opcode and regclass of the output for the given load instruction.
unsigned Opc = 0;
@ -200,12 +201,6 @@ bool X86FastISel::X86FastEmitLoad(MVT VT, unsigned Ptr, Value *GV,
}
ResultReg = createResultReg(RC);
X86AddressMode AM;
if (Ptr)
// Address is in register.
AM.Base.Reg = Ptr;
else
AM.GV = cast<GlobalValue>(GV);
addFullAddress(BuildMI(MBB, TII.get(Opc), ResultReg), AM);
return true;
}
@ -216,7 +211,7 @@ bool X86FastISel::X86FastEmitLoad(MVT VT, unsigned Ptr, Value *GV,
/// i.e. V. Return true if it is possible.
bool
X86FastISel::X86FastEmitStore(MVT VT, unsigned Val,
unsigned Ptr, unsigned Offset, Value *V) {
const X86AddressMode &AM) {
// Get opcode and regclass of the output for the given store instruction.
unsigned Opc = 0;
const TargetRegisterClass *RC = NULL;
@ -263,13 +258,6 @@ X86FastISel::X86FastEmitStore(MVT VT, unsigned Val,
break;
}
X86AddressMode AM;
if (Ptr) {
// Address is in register.
AM.Base.Reg = Ptr;
AM.Disp = Offset;
} else
AM.GV = cast<GlobalValue>(V);
addFullAddress(BuildMI(MBB, TII.get(Opc)), AM).addReg(Val);
return true;
}
@ -331,6 +319,39 @@ bool X86FastISel::X86SelectConstAddr(Value *V, unsigned &Op0,
return true;
}
/// X86SelectAddress - Attempt to fill in an address from the given value.
///
bool X86FastISel::X86SelectAddress(Value *V, X86AddressMode &AM) {
// Look past bitcasts.
if (const BitCastInst *BC = dyn_cast<BitCastInst>(V))
return X86SelectAddress(BC->getOperand(0), AM);
if (const AllocaInst *A = dyn_cast<AllocaInst>(V)) {
DenseMap<const AllocaInst*, int>::iterator SI = StaticAllocaMap.find(A);
if (SI == StaticAllocaMap.end())
return false;
AM.BaseType = X86AddressMode::FrameIndexBase;
AM.Base.FrameIndex = SI->second;
} else if (unsigned Ptr = lookUpRegForValue(V)) {
AM.Base.Reg = Ptr;
} else {
// Handle constant address.
// FIXME: If load type is something we can't handle, this can result in
// a dead stub load instruction.
if (isa<Constant>(V) && X86SelectConstAddr(V, AM.Base.Reg)) {
if (AM.Base.Reg == 0)
AM.GV = cast<GlobalValue>(V);
} else {
AM.Base.Reg = getRegForValue(V);
if (AM.Base.Reg == 0)
// Unhandled operand. Halt "fast" selection and bail.
return false;
}
}
return true;
}
/// X86SelectStore - Select and emit code to implement store instructions.
bool X86FastISel::X86SelectStore(Instruction* I) {
MVT VT;
@ -341,21 +362,11 @@ bool X86FastISel::X86SelectStore(Instruction* I) {
// Unhandled operand. Halt "fast" selection and bail.
return false;
Value *V = I->getOperand(1);
unsigned Ptr = lookUpRegForValue(V);
if (!Ptr) {
// Handle constant load address.
// FIXME: If load type is something we can't handle, this can result in
// a dead stub load instruction.
if (!isa<Constant>(V) || !X86SelectConstAddr(V, Ptr)) {
Ptr = getRegForValue(V);
if (Ptr == 0)
// Unhandled operand. Halt "fast" selection and bail.
return false;
}
}
X86AddressMode AM;
if (!X86SelectAddress(I->getOperand(1), AM))
return false;
return X86FastEmitStore(VT, Val, Ptr, 0, V);
return X86FastEmitStore(VT, Val, AM);
}
/// X86SelectLoad - Select and emit code to implement load instructions.
@ -365,22 +376,12 @@ bool X86FastISel::X86SelectLoad(Instruction *I) {
if (!isTypeLegal(I->getType(), TLI, VT))
return false;
Value *V = I->getOperand(0);
unsigned Ptr = lookUpRegForValue(V);
if (!Ptr) {
// Handle constant load address.
// FIXME: If load type is something we can't handle, this can result in
// a dead stub load instruction.
if (!isa<Constant>(V) || !X86SelectConstAddr(V, Ptr)) {
Ptr = getRegForValue(V);
if (Ptr == 0)
// Unhandled operand. Halt "fast" selection and bail.
return false;
}
}
X86AddressMode AM;
if (!X86SelectAddress(I->getOperand(0), AM))
return false;
unsigned ResultReg = 0;
if (X86FastEmitLoad(VT, Ptr, V, ResultReg)) {
if (X86FastEmitLoad(VT, AM, ResultReg)) {
UpdateValueMap(I, ResultReg);
return true;
}
@ -831,7 +832,10 @@ bool X86FastISel::X86SelectCall(Instruction *I) {
RegArgs.push_back(VA.getLocReg());
} else {
unsigned LocMemOffset = VA.getLocMemOffset();
X86FastEmitStore(ArgVT, Arg, StackPtr, LocMemOffset, NULL);
X86AddressMode AM;
AM.Base.Reg = StackPtr;
AM.Disp = LocMemOffset;
X86FastEmitStore(ArgVT, Arg, AM);
}
}
@ -885,7 +889,7 @@ bool X86FastISel::X86SelectCall(Instruction *I) {
MVT ResVT = RVLocs[0].getValVT();
unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;
unsigned MemSize = ResVT.getSizeInBits()/8;
int FI = MFI->CreateStackObject(MemSize, MemSize);
int FI = MFI.CreateStackObject(MemSize, MemSize);
addFrameReference(BuildMI(MBB, TII.get(Opc)), FI).addReg(ResultReg);
DstRC = ResVT == MVT::f32
? X86::FR32RegisterClass : X86::FR64RegisterClass;
@ -938,8 +942,7 @@ X86FastISel::TargetSelectInstruction(Instruction *I) {
return false;
}
unsigned X86FastISel::TargetMaterializeConstant(Constant *C,
MachineConstantPool* MCP) {
unsigned X86FastISel::TargetMaterializeConstant(Constant *C) {
// Can't handle PIC-mode yet.
if (TM.getRelocationModel() == Reloc::PIC_)
return 0;
@ -1010,15 +1013,27 @@ unsigned X86FastISel::TargetMaterializeConstant(Constant *C,
Align = Log2_64(Align);
}
unsigned MCPOffset = MCP->getConstantPoolIndex(C, Align);
unsigned MCPOffset = MCP.getConstantPoolIndex(C, Align);
addConstantPoolReference(BuildMI(MBB, TII.get(Opc), ResultReg), MCPOffset);
return ResultReg;
}
unsigned X86FastISel::TargetMaterializeAlloca(AllocaInst *C) {
X86AddressMode AM;
if (!X86SelectAddress(C, AM))
return 0;
unsigned Opc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy());
unsigned ResultReg = createResultReg(RC);
addFullAddress(BuildMI(MBB, TII.get(Opc), ResultReg), AM);
return ResultReg;
}
namespace llvm {
llvm::FastISel *X86::createFastISel(MachineFunction &mf,
DenseMap<const Value *, unsigned> &vm,
DenseMap<const BasicBlock *, MachineBasicBlock *> &bm) {
return new X86FastISel(mf, vm, bm);
DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
DenseMap<const AllocaInst *, int> &am) {
return new X86FastISel(mf, vm, bm, am);
}
}

View File

@ -1886,8 +1886,10 @@ FastISel *
X86TargetLowering::createFastISel(MachineFunction &mf,
DenseMap<const Value *, unsigned> &vm,
DenseMap<const BasicBlock *,
MachineBasicBlock *> &bm) {
return X86::createFastISel(mf, vm, bm);
MachineBasicBlock *> &bm,
DenseMap<const AllocaInst *, int> &am) {
return X86::createFastISel(mf, vm, bm, am);
}

View File

@ -473,7 +473,8 @@ namespace llvm {
virtual FastISel *
createFastISel(MachineFunction &mf,
DenseMap<const Value *, unsigned> &,
DenseMap<const BasicBlock *, MachineBasicBlock *> &);
DenseMap<const BasicBlock *, MachineBasicBlock *> &,
DenseMap<const AllocaInst *, int> &);
private:
/// Subtarget - Keep a pointer to the X86Subtarget around so that we can
@ -604,7 +605,8 @@ namespace llvm {
namespace X86 {
FastISel *createFastISel(MachineFunction &mf,
DenseMap<const Value *, unsigned> &,
DenseMap<const BasicBlock *, MachineBasicBlock *> &);
DenseMap<const BasicBlock *, MachineBasicBlock *> &,
DenseMap<const AllocaInst *, int> &);
}
}