1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 19:23:23 +01:00

Fold the adjust_trampoline intrinsic into

init_trampoline.  There is now only one
trampoline intrinsic.

llvm-svn: 41841
This commit is contained in:
Duncan Sands 2007-09-11 14:10:23 +00:00
parent 8640185d45
commit c358890f73
14 changed files with 46 additions and 112 deletions

View File

@ -200,10 +200,9 @@
<li><a href="#int_memory_barrier">'<tt>llvm.memory.barrier</tt>' Intrinsic</a></li>
</ol>
</li>
<li><a href="#int_trampoline">Trampoline Intrinsics</a>
<li><a href="#int_trampoline">Trampoline Intrinsic</a>
<ol>
<li><a href="#int_it">'<tt>llvm.init.trampoline</tt>' Intrinsic</a></li>
<li><a href="#int_at">'<tt>llvm.adjust.trampoline</tt>' Intrinsic</a></li>
</ol>
</li>
<li><a href="#int_general">General intrinsics</a>
@ -5149,12 +5148,12 @@ declare void @llvm.memory.barrier( i1 &lt;ll&gt;, i1 &lt;ls&gt;, i1 &lt;sl&gt;,
<!-- ======================================================================= -->
<div class="doc_subsection">
<a name="int_trampoline">Trampoline Intrinsics</a>
<a name="int_trampoline">Trampoline Intrinsic</a>
</div>
<div class="doc_text">
<p>
These intrinsics make it possible to excise one parameter, marked with
This intrinsic makes it possible to excise one parameter, marked with
the <tt>nest</tt> attribute, from a function. The result is a callable
function pointer lacking the nest parameter - the caller does not need
to provide a value for it. Instead, the value to use is stored in
@ -5168,11 +5167,10 @@ declare void @llvm.memory.barrier( i1 &lt;ll&gt;, i1 &lt;ls&gt;, i1 &lt;sl&gt;,
<tt>i32 f(i8* nest %c, i32 %x, i32 %y)</tt> then the resulting function
pointer has signature <tt>i32 (i32, i32)*</tt>. It can be created as follows:
<pre>
%tramp1 = alloca [10 x i8], align 4 ; size and alignment only correct for X86
%tramp = getelementptr [10 x i8]* %tramp1, i32 0, i32 0
call void @llvm.init.trampoline( i8* %tramp, i8* bitcast (i32 (i8* nest , i32, i32)* @f to i8*), i8* %nval )
%adj = call i8* @llvm.adjust.trampoline( i8* %tramp )
%fp = bitcast i8* %adj to i32 (i32, i32)*
%tramp = alloca [10 x i8], align 4 ; size and alignment only correct for X86
%tramp1 = getelementptr [10 x i8]* %tramp, i32 0, i32 0
%p = call i8* @llvm.init.trampoline( i8* %tramp1, i8* bitcast (i32 (i8* nest , i32, i32)* @f to i8*), i8* %nval )
%fp = bitcast i8* %p to i32 (i32, i32)*
</pre>
The call <tt>%val = call i32 %fp( i32 %x, i32 %y )</tt> is then equivalent to
<tt>%val = call i32 %f( i8* %nval, i32 %x, i32 %y )</tt>.
@ -5186,11 +5184,12 @@ declare void @llvm.memory.barrier( i1 &lt;ll&gt;, i1 &lt;ls&gt;, i1 &lt;sl&gt;,
<div class="doc_text">
<h5>Syntax:</h5>
<pre>
declare void @llvm.init.trampoline(i8* &lt;tramp&gt;, i8* &lt;func&gt;, i8* &lt;nval&gt;)
declare i8* @llvm.init.trampoline(i8* &lt;tramp&gt;, i8* &lt;func&gt;, i8* &lt;nval&gt;)
</pre>
<h5>Overview:</h5>
<p>
This initializes the memory pointed to by <tt>tramp</tt> as a trampoline.
This fills the memory pointed to by <tt>tramp</tt> with code
and returns a function pointer suitable for executing it.
</p>
<h5>Arguments:</h5>
<p>
@ -5205,42 +5204,18 @@ declare void @llvm.init.trampoline(i8* &lt;tramp&gt;, i8* &lt;func&gt;, i8* &lt;
<h5>Semantics:</h5>
<p>
The block of memory pointed to by <tt>tramp</tt> is filled with target
dependent code, turning it into a function.
The new function's signature is the same as that of <tt>func</tt> with
any arguments marked with the <tt>nest</tt> attribute removed. At most
one such <tt>nest</tt> argument is allowed, and it must be of pointer
type. Calling the new function is equivalent to calling <tt>func</tt>
with the same argument list, but with <tt>nval</tt> used for the missing
<tt>nest</tt> argument.
</p>
</div>
<!-- _______________________________________________________________________ -->
<div class="doc_subsubsection">
<a name="int_at">'<tt>llvm.adjust.trampoline</tt>' Intrinsic</a>
</div>
<div class="doc_text">
<h5>Syntax:</h5>
<pre>
declare i8* @llvm.adjust.trampoline(i8* &lt;tramp&gt;)
</pre>
<h5>Overview:</h5>
<p>
This intrinsic returns a function pointer suitable for executing
the trampoline code pointed to by <tt>tramp</tt>.
</p>
<h5>Arguments:</h5>
<p>
The <tt>llvm.adjust.trampoline</tt> takes one argument, a pointer to a
trampoline initialized by the
<a href="#int_it">'<tt>llvm.init.trampoline</tt>' intrinsic</a>.
</p>
<h5>Semantics:</h5>
<p>
A function pointer that can be used to execute the trampoline code in
<tt>tramp</tt> is returned. The returned value should be bitcast to an
dependent code, turning it into a function. A pointer to this function is
returned, but needs to be bitcast to an
<a href="#int_trampoline">appropriate function pointer type</a>
before being called.
before being called. The new function's signature is the same as that of
<tt>func</tt> with any arguments marked with the <tt>nest</tt> attribute
removed. At most one such <tt>nest</tt> argument is allowed, and it must be
of pointer type. Calling the new function is equivalent to calling
<tt>func</tt> with the same argument list, but with <tt>nval</tt> used for the
missing <tt>nest</tt> argument. If, after calling
<tt>llvm.init.trampoline</tt>, the memory pointed to by <tt>tramp</tt> is
modified, then the effect of any later call to the returned function pointer is
undefined.
</p>
</div>

View File

@ -540,16 +540,13 @@ namespace ISD {
// produces a token chain as output.
DEBUG_LOC,
// ADJUST_TRAMP - This corresponds to the adjust_trampoline intrinsic.
// It takes a value as input and returns a value as output.
ADJUST_TRAMP,
// TRAMPOLINE - This corresponds to the init_trampoline intrinsic.
// It takes as input a token chain, the pointer to the trampoline,
// the pointer to the nested function, the pointer to pass for the
// 'nest' parameter, a SRCVALUE for the trampoline and another for
// the nested function (allowing targets to access the original
// Function*). It produces a token chain as output.
// Function*). It produces the result of the intrinsic and a token
// chain as output.
TRAMPOLINE,
// BUILTIN_OP_END - This must be the last enum value in this list.

View File

@ -243,11 +243,9 @@ def int_var_annotation : Intrinsic<[llvm_void_ty, llvm_ptr_ty, llvm_ptr_ty,
//===------------------------ Trampoline Intrinsics -----------------------===//
//
def int_init_trampoline : Intrinsic<[llvm_void_ty, llvm_ptr_ty, llvm_ptr_ty,
llvm_ptr_ty], []>,
GCCBuiltin<"__builtin_init_trampoline">;
def int_adjust_trampoline : Intrinsic<[llvm_ptr_ty, llvm_ptr_ty], [IntrNoMem]>,
GCCBuiltin<"__builtin_adjust_trampoline">;
def int_init_trampoline : Intrinsic<[llvm_ptr_ty, llvm_ptr_ty, llvm_ptr_ty,
llvm_ptr_ty], []>,
GCCBuiltin<"__builtin_init_trampoline">;
//===----------------------------------------------------------------------===//
// Target-specific intrinsics

View File

@ -3384,21 +3384,6 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) {
}
break;
}
case ISD::ADJUST_TRAMP: {
Tmp1 = LegalizeOp(Node->getOperand(0));
switch (TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0))) {
default: assert(0 && "This action is not supported yet!");
case TargetLowering::Custom:
Result = DAG.UpdateNodeOperands(Result, Tmp1);
Result = TLI.LowerOperation(Result, DAG);
if (Result.Val) break;
// FALL THROUGH
case TargetLowering::Expand:
Result = Tmp1;
break;
}
break;
}
case ISD::TRAMPOLINE: {
SDOperand Ops[6];
for (unsigned i = 0; i != 6; ++i)
@ -3407,7 +3392,14 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) {
// The only option for this node is to custom lower it.
Result = TLI.LowerOperation(Result, DAG);
assert(Result.Val && "Should always custom lower!");
break;
// Since trampoline produces two values, make sure to remember that we
// legalized both of them.
Tmp1 = LegalizeOp(Result.getValue(1));
Result = LegalizeOp(Result);
AddLegalizedOperand(SDOperand(Node, 0), Result);
AddLegalizedOperand(SDOperand(Node, 1), Tmp1);
return Op.ResNo ? Tmp1 : Result;
}
}

View File

@ -3643,8 +3643,7 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::DEBUG_LOC: return "debug_loc";
// Trampolines
case ISD::ADJUST_TRAMP: return "adjust_tramp";
case ISD::TRAMPOLINE: return "trampoline";
case ISD::TRAMPOLINE: return "trampoline";
case ISD::CONDCODE:
switch (cast<CondCodeSDNode>(this)->get()) {

View File

@ -2881,12 +2881,6 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
// Discard annotate attributes
return 0;
case Intrinsic::adjust_trampoline: {
SDOperand Arg = getValue(I.getOperand(1));
setValue(&I, DAG.getNode(ISD::ADJUST_TRAMP, TLI.getPointerTy(), Arg));
return 0;
}
case Intrinsic::init_trampoline: {
const Function *F =
cast<Function>(IntrinsicInst::StripPointerCasts(I.getOperand(2)));
@ -2899,7 +2893,13 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
Ops[4] = DAG.getSrcValue(I.getOperand(1));
Ops[5] = DAG.getSrcValue(F);
DAG.setRoot(DAG.getNode(ISD::TRAMPOLINE, MVT::Other, Ops, 6));
SDOperand Tmp = DAG.getNode(ISD::TRAMPOLINE,
DAG.getNodeValueTypes(TLI.getPointerTy(),
MVT::Other), 2,
Ops, 6);
setValue(&I, Tmp);
DAG.setRoot(Tmp.getValue(1));
return 0;
}
}

View File

@ -191,11 +191,6 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setOperationAction(ISD::MEMCPY , MVT::Other, Custom);
setOperationAction(ISD::MEMMOVE , MVT::Other, Expand);
if (Subtarget->isThumb())
setOperationAction(ISD::ADJUST_TRAMP, MVT::i32, Custom);
else
setOperationAction(ISD::ADJUST_TRAMP, MVT::i32, Expand);
// Use the default implementation.
setOperationAction(ISD::VASTART , MVT::Other, Expand);
setOperationAction(ISD::VAARG , MVT::Other, Expand);
@ -1418,14 +1413,6 @@ SDOperand ARMTargetLowering::LowerMEMCPY(SDOperand Op, SelectionDAG &DAG) {
return Chain;
}
SDOperand ARMTargetLowering::LowerADJUST_TRAMP(SDOperand Op,
SelectionDAG &DAG) {
// Thumb trampolines should be entered in thumb mode, so set the bottom bit
// of the address.
return DAG.getNode(ISD::OR, MVT::i32, Op.getOperand(0),
DAG.getConstant(1, MVT::i32));
}
SDOperand ARMTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
switch (Op.getOpcode()) {
default: assert(0 && "Don't know how to custom lower this!"); abort();
@ -1457,7 +1444,6 @@ SDOperand ARMTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
case ISD::FRAMEADDR: break;
case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG);
case ISD::MEMCPY: return LowerMEMCPY(Op, DAG);
case ISD::ADJUST_TRAMP: return LowerADJUST_TRAMP(Op, DAG);
}
return SDOperand();
}

View File

@ -138,7 +138,6 @@ namespace llvm {
SDOperand LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG);
SDOperand LowerBR_JT(SDOperand Op, SelectionDAG &DAG);
SDOperand LowerMEMCPY(SDOperand Op, SelectionDAG &DAG);
SDOperand LowerADJUST_TRAMP(SDOperand Op, SelectionDAG &DAG);
};
}

View File

@ -124,9 +124,6 @@ AlphaTargetLowering::AlphaTargetLowering(TargetMachine &TM) : TargetLowering(TM)
setOperationAction(ISD::ExternalSymbol, MVT::i64, Custom);
setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
setOperationAction(ISD::ADJUST_TRAMP, MVT::i32, Expand);
setOperationAction(ISD::ADJUST_TRAMP, MVT::i64, Expand);
setOperationAction(ISD::VASTART, MVT::Other, Custom);
setOperationAction(ISD::VAEND, MVT::Other, Expand);
setOperationAction(ISD::VACOPY, MVT::Other, Custom);

View File

@ -97,8 +97,6 @@ IA64TargetLowering::IA64TargetLowering(TargetMachine &TM)
setOperationAction(ISD::ROTR , MVT::i64 , Expand);
setOperationAction(ISD::BSWAP, MVT::i64 , Expand); // mux @rev
setOperationAction(ISD::ADJUST_TRAMP, MVT::i64, Expand);
// VASTART needs to be custom lowered to use the VarArgsFrameIndex
setOperationAction(ISD::VAARG , MVT::Other, Custom);
setOperationAction(ISD::VASTART , MVT::Other, Custom);

View File

@ -105,8 +105,6 @@ MipsTargetLowering(MipsTargetMachine &TM): TargetLowering(TM)
setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
setOperationAction(ISD::ADJUST_TRAMP, MVT::i32, Expand);
setStackPointerRegisterToSaveRestore(Mips::SP);
computeRegisterProperties();
}

View File

@ -171,9 +171,6 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
// RET must be custom lowered, to meet ABI requirements
setOperationAction(ISD::RET , MVT::Other, Custom);
setOperationAction(ISD::ADJUST_TRAMP, MVT::i32, Expand);
setOperationAction(ISD::ADJUST_TRAMP, MVT::i64, Expand);
// VASTART needs to be custom lowered to use the VarArgsFrameIndex
setOperationAction(ISD::VASTART , MVT::Other, Custom);

View File

@ -216,8 +216,6 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM)
// RET must be custom lowered, to meet ABI requirements
setOperationAction(ISD::RET , MVT::Other, Custom);
setOperationAction(ISD::ADJUST_TRAMP, MVT::i32, Expand);
// VASTART needs to be custom lowered to use the VarArgsFrameIndex.
setOperationAction(ISD::VASTART , MVT::Other, Custom);
// VAARG needs to be lowered to not do unaligned accesses for doubles.

View File

@ -246,9 +246,7 @@ X86TargetLowering::X86TargetLowering(TargetMachine &TM)
}
setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
setOperationAction(ISD::ADJUST_TRAMP, MVT::i32, Expand);
setOperationAction(ISD::ADJUST_TRAMP, MVT::i64, Expand);
setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom);
setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom);
// VASTART needs to be custom lowered to use the VarArgsFrameIndex
setOperationAction(ISD::VASTART , MVT::Other, Custom);
@ -4406,7 +4404,9 @@ SDOperand X86TargetLowering::LowerTRAMPOLINE(SDOperand Op,
OutChains[3] = DAG.getStore(Root, Disp, Addr, TrmpSV->getValue(),
TrmpSV->getOffset() + 6, false, 1);
return DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains, 4);
SDOperand Ops[] =
{ Trmp, DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains, 4) };
return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), Ops, 2);
}
}