mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-01-31 20:51:52 +01:00
Added MemOperands to Atomic operations since Atomics touches memory.
Added abstract class MemSDNode for any Node that have an associated MemOperand Changed atomic.lcs => atomic.cmp.swap, atomic.las => atomic.load.add, and atomic.lss => atomic.load.sub llvm-svn: 52706
This commit is contained in:
parent
45bffc6580
commit
7d89d61387
@ -216,9 +216,18 @@
|
||||
<li><a href="#int_atomics">Atomic intrinsics</a>
|
||||
<ol>
|
||||
<li><a href="#int_memory_barrier"><tt>llvm.memory_barrier</tt></a></li>
|
||||
<li><a href="#int_atomic_lcs"><tt>llvm.atomic.lcs</tt></a></li>
|
||||
<li><a href="#int_atomic_las"><tt>llvm.atomic.las</tt></a></li>
|
||||
<li><a href="#int_atomic_cmp_swap"><tt>llvm.atomic.cmp.swap</tt></a></li>
|
||||
<li><a href="#int_atomic_swap"><tt>llvm.atomic.swap</tt></a></li>
|
||||
<li><a href="#int_atomic_load_add"><tt>llvm.atomic.load.add</tt></a></li>
|
||||
<li><a href="#int_atomic_load_sub"><tt>llvm.atomic.load.sub</tt></a></li>
|
||||
<li><a href="#int_atomic_load_and"><tt>llvm.atomic.load.and</tt></a></li>
|
||||
<li><a href="#int_atomic_load_nand"><tt>llvm.atomic.load.nand</tt></a></li>
|
||||
<li><a href="#int_atomic_load_or"><tt>llvm.atomic.load.or</tt></a></li>
|
||||
<li><a href="#int_atomic_load_xor"><tt>llvm.atomic.load.xor</tt></a></li>
|
||||
<li><a href="#int_atomic_load_max"><tt>llvm.atomic.load.max</tt></a></li>
|
||||
<li><a href="#int_atomic_load_min"><tt>llvm.atomic.load.min</tt></a></li>
|
||||
<li><a href="#int_atomic_load_umax"><tt>llvm.atomic.load.umax</tt></a></li>
|
||||
<li><a href="#int_atomic_load_umin"><tt>llvm.atomic.load.umin</tt></a></li>
|
||||
</ol>
|
||||
</li>
|
||||
<li><a href="#int_general">General intrinsics</a>
|
||||
@ -5777,19 +5786,19 @@ i1 <device> )
|
||||
|
||||
<!-- _______________________________________________________________________ -->
|
||||
<div class="doc_subsubsection">
|
||||
<a name="int_atomic_lcs">'<tt>llvm.atomic.lcs.*</tt>' Intrinsic</a>
|
||||
<a name="int_atomic_cmp_swap">'<tt>llvm.atomic.cmp.swap.*</tt>' Intrinsic</a>
|
||||
</div>
|
||||
<div class="doc_text">
|
||||
<h5>Syntax:</h5>
|
||||
<p>
|
||||
This is an overloaded intrinsic. You can use <tt>llvm.atomic.lcs</tt> on any
|
||||
This is an overloaded intrinsic. You can use <tt>llvm.atomic.cmp.swap</tt> on any
|
||||
integer bit width. Not all targets support all bit widths however.</p>
|
||||
|
||||
<pre>
|
||||
declare i8 @llvm.atomic.lcs.i8( i8* <ptr>, i8 <cmp>, i8 <val> )
|
||||
declare i16 @llvm.atomic.lcs.i16( i16* <ptr>, i16 <cmp>, i16 <val> )
|
||||
declare i32 @llvm.atomic.lcs.i32( i32* <ptr>, i32 <cmp>, i32 <val> )
|
||||
declare i64 @llvm.atomic.lcs.i64( i64* <ptr>, i64 <cmp>, i64 <val> )
|
||||
declare i8 @llvm.atomic.cmp.swap.i8( i8* <ptr>, i8 <cmp>, i8 <val> )
|
||||
declare i16 @llvm.atomic.cmp.swap.i16( i16* <ptr>, i16 <cmp>, i16 <val> )
|
||||
declare i32 @llvm.atomic.cmp.swap.i32( i32* <ptr>, i32 <cmp>, i32 <val> )
|
||||
declare i64 @llvm.atomic.cmp.swap.i64( i64* <ptr>, i64 <cmp>, i64 <val> )
|
||||
|
||||
</pre>
|
||||
<h5>Overview:</h5>
|
||||
@ -5799,7 +5808,7 @@ declare i64 @llvm.atomic.lcs.i64( i64* <ptr>, i64 <cmp>, i64 <val
|
||||
</p>
|
||||
<h5>Arguments:</h5>
|
||||
<p>
|
||||
The <tt>llvm.atomic.lcs</tt> intrinsic takes three arguments. The result as
|
||||
The <tt>llvm.atomic.cmp.swap</tt> intrinsic takes three arguments. The result as
|
||||
well as both <tt>cmp</tt> and <tt>val</tt> must be integer values with the
|
||||
same bit width. The <tt>ptr</tt> argument must be a pointer to a value of
|
||||
this integer type. While any bit width integer may be used, targets may only
|
||||
@ -5821,13 +5830,13 @@ declare i64 @llvm.atomic.lcs.i64( i64* <ptr>, i64 <cmp>, i64 <val
|
||||
store i32 4, %ptr
|
||||
|
||||
%val1 = add i32 4, 4
|
||||
%result1 = call i32 @llvm.atomic.lcs.i32( i32* %ptr, i32 4, %val1 )
|
||||
%result1 = call i32 @llvm.atomic.cmp.swap.i32( i32* %ptr, i32 4, %val1 )
|
||||
<i>; yields {i32}:result1 = 4</i>
|
||||
%stored1 = icmp eq i32 %result1, 4 <i>; yields {i1}:stored1 = true</i>
|
||||
%memval1 = load i32* %ptr <i>; yields {i32}:memval1 = 8</i>
|
||||
|
||||
%val2 = add i32 1, 1
|
||||
%result2 = call i32 @llvm.atomic.lcs.i32( i32* %ptr, i32 5, %val2 )
|
||||
%result2 = call i32 @llvm.atomic.cmp.swap.i32( i32* %ptr, i32 5, %val2 )
|
||||
<i>; yields {i32}:result2 = 8</i>
|
||||
%stored2 = icmp eq i32 %result2, 5 <i>; yields {i1}:stored2 = false</i>
|
||||
|
||||
@ -5861,7 +5870,7 @@ declare i64 @llvm.atomic.swap.i64( i64* <ptr>, i64 <val> )
|
||||
<h5>Arguments:</h5>
|
||||
|
||||
<p>
|
||||
The <tt>llvm.atomic.ls</tt> intrinsic takes two arguments. Both the
|
||||
The <tt>llvm.atomic.swap</tt> intrinsic takes two arguments. Both the
|
||||
<tt>val</tt> argument and the result must be integers of the same bit width.
|
||||
The first argument, <tt>ptr</tt>, must be a pointer to a value of this
|
||||
integer type. The targets may only lower integer representations they
|
||||
@ -5896,19 +5905,19 @@ declare i64 @llvm.atomic.swap.i64( i64* <ptr>, i64 <val> )
|
||||
|
||||
<!-- _______________________________________________________________________ -->
|
||||
<div class="doc_subsubsection">
|
||||
<a name="int_atomic_las">'<tt>llvm.atomic.las.*</tt>' Intrinsic</a>
|
||||
<a name="int_atomic_load_add">'<tt>llvm.atomic.load.add.*</tt>' Intrinsic</a>
|
||||
|
||||
</div>
|
||||
<div class="doc_text">
|
||||
<h5>Syntax:</h5>
|
||||
<p>
|
||||
This is an overloaded intrinsic. You can use <tt>llvm.atomic.las</tt> on any
|
||||
This is an overloaded intrinsic. You can use <tt>llvm.atomic.load.add</tt> on any
|
||||
integer bit width. Not all targets support all bit widths however.</p>
|
||||
<pre>
|
||||
declare i8 @llvm.atomic.las.i8.( i8* <ptr>, i8 <delta> )
|
||||
declare i16 @llvm.atomic.las.i16.( i16* <ptr>, i16 <delta> )
|
||||
declare i32 @llvm.atomic.las.i32.( i32* <ptr>, i32 <delta> )
|
||||
declare i64 @llvm.atomic.las.i64.( i64* <ptr>, i64 <delta> )
|
||||
declare i8 @llvm.atomic.load.add.i8.( i8* <ptr>, i8 <delta> )
|
||||
declare i16 @llvm.atomic.load.add.i16.( i16* <ptr>, i16 <delta> )
|
||||
declare i32 @llvm.atomic.load.add.i32.( i32* <ptr>, i32 <delta> )
|
||||
declare i64 @llvm.atomic.load.add.i64.( i64* <ptr>, i64 <delta> )
|
||||
|
||||
</pre>
|
||||
<h5>Overview:</h5>
|
||||
@ -5935,16 +5944,235 @@ declare i64 @llvm.atomic.las.i64.( i64* <ptr>, i64 <delta> )
|
||||
<pre>
|
||||
%ptr = malloc i32
|
||||
store i32 4, %ptr
|
||||
%result1 = call i32 @llvm.atomic.las.i32( i32* %ptr, i32 4 )
|
||||
%result1 = call i32 @llvm.atomic.load.add.i32( i32* %ptr, i32 4 )
|
||||
<i>; yields {i32}:result1 = 4</i>
|
||||
%result2 = call i32 @llvm.atomic.las.i32( i32* %ptr, i32 2 )
|
||||
%result2 = call i32 @llvm.atomic.load.add.i32( i32* %ptr, i32 2 )
|
||||
<i>; yields {i32}:result2 = 8</i>
|
||||
%result3 = call i32 @llvm.atomic.las.i32( i32* %ptr, i32 5 )
|
||||
%result3 = call i32 @llvm.atomic.load.add.i32( i32* %ptr, i32 5 )
|
||||
<i>; yields {i32}:result3 = 10</i>
|
||||
%memval = load i32* %ptr <i>; yields {i32}:memval1 = 15</i>
|
||||
%memval1 = load i32* %ptr <i>; yields {i32}:memval1 = 15</i>
|
||||
</pre>
|
||||
</div>
|
||||
|
||||
<!-- _______________________________________________________________________ -->
|
||||
<div class="doc_subsubsection">
|
||||
<a name="int_atomic_load_sub">'<tt>llvm.atomic.load.sub.*</tt>' Intrinsic</a>
|
||||
|
||||
</div>
|
||||
<div class="doc_text">
|
||||
<h5>Syntax:</h5>
|
||||
<p>
|
||||
This is an overloaded intrinsic. You can use <tt>llvm.atomic.load.sub</tt> on
|
||||
any integer bit width. Not all targets support all bit widths however.</p>
|
||||
<pre>
|
||||
declare i8 @llvm.atomic.load.sub.i8.( i8* <ptr>, i8 <delta> )
|
||||
declare i16 @llvm.atomic.load.sub.i16.( i16* <ptr>, i16 <delta> )
|
||||
declare i32 @llvm.atomic.load.sub.i32.( i32* <ptr>, i32 <delta> )
|
||||
declare i64 @llvm.atomic.load.sub.i64.( i64* <ptr>, i64 <delta> )
|
||||
|
||||
</pre>
|
||||
<h5>Overview:</h5>
|
||||
<p>
|
||||
This intrinsic subtracts <tt>delta</tt> to the value stored in memory at
|
||||
<tt>ptr</tt>. It yields the original value at <tt>ptr</tt>.
|
||||
</p>
|
||||
<h5>Arguments:</h5>
|
||||
<p>
|
||||
|
||||
The intrinsic takes two arguments, the first a pointer to an integer value
|
||||
and the second an integer value. The result is also an integer value. These
|
||||
integer types can have any bit width, but they must all have the same bit
|
||||
width. The targets may only lower integer representations they support.
|
||||
</p>
|
||||
<h5>Semantics:</h5>
|
||||
<p>
|
||||
This intrinsic does a series of operations atomically. It first loads the
|
||||
value stored at <tt>ptr</tt>. It then subtracts <tt>delta</tt>, stores the
|
||||
result to <tt>ptr</tt>. It yields the original value stored at <tt>ptr</tt>.
|
||||
</p>
|
||||
|
||||
<h5>Examples:</h5>
|
||||
<pre>
|
||||
%ptr = malloc i32
|
||||
store i32 8, %ptr
|
||||
%result1 = call i32 @llvm.atomic.load.sub.i32( i32* %ptr, i32 4 )
|
||||
<i>; yields {i32}:result1 = 8</i>
|
||||
%result2 = call i32 @llvm.atomic.load.sub.i32( i32* %ptr, i32 2 )
|
||||
<i>; yields {i32}:result2 = 4</i>
|
||||
%result3 = call i32 @llvm.atomic.load.sub.i32( i32* %ptr, i32 5 )
|
||||
<i>; yields {i32}:result3 = 2</i>
|
||||
%memval1 = load i32* %ptr <i>; yields {i32}:memval1 = -3</i>
|
||||
</pre>
|
||||
</div>
|
||||
|
||||
<!-- _______________________________________________________________________ -->
|
||||
<div class="doc_subsubsection">
|
||||
<a name="int_atomic_load_and">'<tt>llvm.atomic.load.and.*</tt>' Intrinsic</a><br>
|
||||
<a name="int_atomic_load_nand">'<tt>llvm.atomic.load.nand.*</tt>' Intrinsic</a><br>
|
||||
<a name="int_atomic_load_or">'<tt>llvm.atomic.load.or.*</tt>' Intrinsic</a><br>
|
||||
<a name="int_atomic_load_xor">'<tt>llvm.atomic.load.xor.*</tt>' Intrinsic</a><br>
|
||||
|
||||
</div>
|
||||
<div class="doc_text">
|
||||
<h5>Syntax:</h5>
|
||||
<p>
|
||||
These are overloaded intrinsics. You can use <tt>llvm.atomic.load_and</tt>,
|
||||
<tt>llvm.atomic.load_nand</tt>, <tt>llvm.atomic.load_or</tt>, and
|
||||
<tt>llvm.atomic.load_xor</tt> on any integer bit width. Not all targets
|
||||
support all bit widths however.</p>
|
||||
<pre>
|
||||
declare i8 @llvm.atomic.load.and.i8.( i8* <ptr>, i8 <delta> )
|
||||
declare i16 @llvm.atomic.load.and.i16.( i16* <ptr>, i16 <delta> )
|
||||
declare i32 @llvm.atomic.load.and.i32.( i32* <ptr>, i32 <delta> )
|
||||
declare i64 @llvm.atomic.load.and.i64.( i64* <ptr>, i64 <delta> )
|
||||
|
||||
</pre>
|
||||
|
||||
<pre>
|
||||
declare i8 @llvm.atomic.load.or.i8.( i8* <ptr>, i8 <delta> )
|
||||
declare i16 @llvm.atomic.load.or.i16.( i16* <ptr>, i16 <delta> )
|
||||
declare i32 @llvm.atomic.load.or.i32.( i32* <ptr>, i32 <delta> )
|
||||
declare i64 @llvm.atomic.load.or.i64.( i64* <ptr>, i64 <delta> )
|
||||
|
||||
</pre>
|
||||
|
||||
<pre>
|
||||
declare i8 @llvm.atomic.load.nand.i8.( i8* <ptr>, i8 <delta> )
|
||||
declare i16 @llvm.atomic.load.nand.i16.( i16* <ptr>, i16 <delta> )
|
||||
declare i32 @llvm.atomic.load.nand.i32.( i32* <ptr>, i32 <delta> )
|
||||
declare i64 @llvm.atomic.load.nand.i64.( i64* <ptr>, i64 <delta> )
|
||||
|
||||
</pre>
|
||||
|
||||
<pre>
|
||||
declare i8 @llvm.atomic.load.xor.i8.( i8* <ptr>, i8 <delta> )
|
||||
declare i16 @llvm.atomic.load.xor.i16.( i16* <ptr>, i16 <delta> )
|
||||
declare i32 @llvm.atomic.load.xor.i32.( i32* <ptr>, i32 <delta> )
|
||||
declare i64 @llvm.atomic.load.xor.i64.( i64* <ptr>, i64 <delta> )
|
||||
|
||||
</pre>
|
||||
<h5>Overview:</h5>
|
||||
<p>
|
||||
These intrinsics bitwise the operation (and, nand, or, xor) <tt>delta</tt> to
|
||||
the value stored in memory at <tt>ptr</tt>. It yields the original value
|
||||
at <tt>ptr</tt>.
|
||||
</p>
|
||||
<h5>Arguments:</h5>
|
||||
<p>
|
||||
|
||||
These intrinsics take two arguments, the first a pointer to an integer value
|
||||
and the second an integer value. The result is also an integer value. These
|
||||
integer types can have any bit width, but they must all have the same bit
|
||||
width. The targets may only lower integer representations they support.
|
||||
</p>
|
||||
<h5>Semantics:</h5>
|
||||
<p>
|
||||
These intrinsics does a series of operations atomically. They first load the
|
||||
value stored at <tt>ptr</tt>. They then do the bitwise operation
|
||||
<tt>delta</tt>, store the result to <tt>ptr</tt>. They yield the original
|
||||
value stored at <tt>ptr</tt>.
|
||||
</p>
|
||||
|
||||
<h5>Examples:</h5>
|
||||
<pre>
|
||||
%ptr = malloc i32
|
||||
store i32 0x0F0F, %ptr
|
||||
%result0 = call i32 @llvm.atomic.load.nand.i32( i32* %ptr, i32 0xFF )
|
||||
<i>; yields {i32}:result0 = 0x0F0F</i>
|
||||
%result1 = call i32 @llvm.atomic.load.and.i32( i32* %ptr, i32 0xFF )
|
||||
<i>; yields {i32}:result1 = 0xFFFFFFF0</i>
|
||||
%result2 = call i32 @llvm.atomic.load.or.i32( i32* %ptr, i32 0F )
|
||||
<i>; yields {i32}:result2 = 0xF0</i>
|
||||
%result3 = call i32 @llvm.atomic.load.xor.i32( i32* %ptr, i32 0F )
|
||||
<i>; yields {i32}:result3 = FF</i>
|
||||
%memval1 = load i32* %ptr <i>; yields {i32}:memval1 = F0</i>
|
||||
</pre>
|
||||
</div>
|
||||
|
||||
|
||||
<!-- _______________________________________________________________________ -->
|
||||
<div class="doc_subsubsection">
|
||||
<a name="int_atomic_load_max">'<tt>llvm.atomic.load.max.*</tt>' Intrinsic</a><br>
|
||||
<a name="int_atomic_load_min">'<tt>llvm.atomic.load.min.*</tt>' Intrinsic</a><br>
|
||||
<a name="int_atomic_load_umax">'<tt>llvm.atomic.load.umax.*</tt>' Intrinsic</a><br>
|
||||
<a name="int_atomic_load_umin">'<tt>llvm.atomic.load.umin.*</tt>' Intrinsic</a><br>
|
||||
|
||||
</div>
|
||||
<div class="doc_text">
|
||||
<h5>Syntax:</h5>
|
||||
<p>
|
||||
These are overloaded intrinsics. You can use <tt>llvm.atomic.load_max</tt>,
|
||||
<tt>llvm.atomic.load_min</tt>, <tt>llvm.atomic.load_umax</tt>, and
|
||||
<tt>llvm.atomic.load_umin</tt> on any integer bit width. Not all targets
|
||||
support all bit widths however.</p>
|
||||
<pre>
|
||||
declare i8 @llvm.atomic.load.max.i8.( i8* <ptr>, i8 <delta> )
|
||||
declare i16 @llvm.atomic.load.max.i16.( i16* <ptr>, i16 <delta> )
|
||||
declare i32 @llvm.atomic.load.max.i32.( i32* <ptr>, i32 <delta> )
|
||||
declare i64 @llvm.atomic.load.max.i64.( i64* <ptr>, i64 <delta> )
|
||||
|
||||
</pre>
|
||||
|
||||
<pre>
|
||||
declare i8 @llvm.atomic.load.min.i8.( i8* <ptr>, i8 <delta> )
|
||||
declare i16 @llvm.atomic.load.min.i16.( i16* <ptr>, i16 <delta> )
|
||||
declare i32 @llvm.atomic.load.min.i32.( i32* <ptr>, i32 <delta> )
|
||||
declare i64 @llvm.atomic.load.min.i64.( i64* <ptr>, i64 <delta> )
|
||||
|
||||
</pre>
|
||||
|
||||
<pre>
|
||||
declare i8 @llvm.atomic.load.umax.i8.( i8* <ptr>, i8 <delta> )
|
||||
declare i16 @llvm.atomic.load.umax.i16.( i16* <ptr>, i16 <delta> )
|
||||
declare i32 @llvm.atomic.load.umax.i32.( i32* <ptr>, i32 <delta> )
|
||||
declare i64 @llvm.atomic.load.umax.i64.( i64* <ptr>, i64 <delta> )
|
||||
|
||||
</pre>
|
||||
|
||||
<pre>
|
||||
declare i8 @llvm.atomic.load.umin.i8.( i8* <ptr>, i8 <delta> )
|
||||
declare i16 @llvm.atomic.load.umin.i16.( i16* <ptr>, i16 <delta> )
|
||||
declare i32 @llvm.atomic.load.umin.i32.( i32* <ptr>, i32 <delta> )
|
||||
declare i64 @llvm.atomic.load.umin.i64.( i64* <ptr>, i64 <delta> )
|
||||
|
||||
</pre>
|
||||
<h5>Overview:</h5>
|
||||
<p>
|
||||
These intrinsics takes the signed or unsigned minimum or maximum of
|
||||
<tt>delta</tt> and the value stored in memory at <tt>ptr</tt>. It yields the
|
||||
original value at <tt>ptr</tt>.
|
||||
</p>
|
||||
<h5>Arguments:</h5>
|
||||
<p>
|
||||
|
||||
These intrinsics take two arguments, the first a pointer to an integer value
|
||||
and the second an integer value. The result is also an integer value. These
|
||||
integer types can have any bit width, but they must all have the same bit
|
||||
width. The targets may only lower integer representations they support.
|
||||
</p>
|
||||
<h5>Semantics:</h5>
|
||||
<p>
|
||||
These intrinsics does a series of operations atomically. They first load the
|
||||
value stored at <tt>ptr</tt>. They then do the signed or unsigned min or max
|
||||
<tt>delta</tt> and the value, store the result to <tt>ptr</tt>. They yield
|
||||
the original value stored at <tt>ptr</tt>.
|
||||
</p>
|
||||
|
||||
<h5>Examples:</h5>
|
||||
<pre>
|
||||
%ptr = malloc i32
|
||||
store i32 7, %ptr
|
||||
%result0 = call i32 @llvm.atomic.load.min.i32( i32* %ptr, i32 -2 )
|
||||
<i>; yields {i32}:result0 = 7</i>
|
||||
%result1 = call i32 @llvm.atomic.load.max.i32( i32* %ptr, i32 8 )
|
||||
<i>; yields {i32}:result1 = -2</i>
|
||||
%result2 = call i32 @llvm.atomic.load.umin.i32( i32* %ptr, i32 10 )
|
||||
<i>; yields {i32}:result2 = 8</i>
|
||||
%result3 = call i32 @llvm.atomic.load.umax.i32( i32* %ptr, i32 30 )
|
||||
<i>; yields {i32}:result3 = 8</i>
|
||||
%memval1 = load i32* %ptr <i>; yields {i32}:memval1 = 30</i>
|
||||
</pre>
|
||||
</div>
|
||||
|
||||
<!-- ======================================================================= -->
|
||||
<div class="doc_subsection">
|
||||
|
@ -368,14 +368,16 @@ public:
|
||||
SDOperand SV);
|
||||
|
||||
/// getAtomic - Gets a node for an atomic op, produces result and chain, takes
|
||||
// 3 operands
|
||||
/// 3 operands
|
||||
SDOperand getAtomic(unsigned Opcode, SDOperand Chain, SDOperand Ptr,
|
||||
SDOperand Cmp, SDOperand Swp, MVT VT);
|
||||
SDOperand Cmp, SDOperand Swp, MVT VT, const Value* PtrVal,
|
||||
unsigned Alignment=0);
|
||||
|
||||
/// getAtomic - Gets a node for an atomic op, produces result and chain, takes
|
||||
// 2 operands
|
||||
/// 2 operands
|
||||
SDOperand getAtomic(unsigned Opcode, SDOperand Chain, SDOperand Ptr,
|
||||
SDOperand Val, MVT VT);
|
||||
SDOperand Val, MVT VT, const Value* PtrVal,
|
||||
unsigned Alignment = 0);
|
||||
|
||||
/// getLoad - Loads are not normal binary operators: their result type is not
|
||||
/// determined by their operands, and they produce a value AND a token chain.
|
||||
|
@ -584,17 +584,17 @@ namespace ISD {
|
||||
// and produces an output chain.
|
||||
MEMBARRIER,
|
||||
|
||||
// Val, OUTCHAIN = ATOMIC_LCS(INCHAIN, ptr, cmp, swap)
|
||||
// Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap)
|
||||
// this corresponds to the atomic.lcs intrinsic.
|
||||
// cmp is compared to *ptr, and if equal, swap is stored in *ptr.
|
||||
// the return is always the original value in *ptr
|
||||
ATOMIC_LCS,
|
||||
ATOMIC_CMP_SWAP,
|
||||
|
||||
// Val, OUTCHAIN = ATOMIC_LAS(INCHAIN, ptr, amt)
|
||||
// Val, OUTCHAIN = ATOMIC_LOAD_ADD(INCHAIN, ptr, amt)
|
||||
// this corresponds to the atomic.las intrinsic.
|
||||
// *ptr + amt is stored to *ptr atomically.
|
||||
// the return is always the original value in *ptr
|
||||
ATOMIC_LAS,
|
||||
ATOMIC_LOAD_ADD,
|
||||
|
||||
// Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt)
|
||||
// this corresponds to the atomic.swap intrinsic.
|
||||
@ -602,11 +602,11 @@ namespace ISD {
|
||||
// the return is always the original value in *ptr
|
||||
ATOMIC_SWAP,
|
||||
|
||||
// Val, OUTCHAIN = ATOMIC_LSS(INCHAIN, ptr, amt)
|
||||
// Val, OUTCHAIN = ATOMIC_LOAD_SUB(INCHAIN, ptr, amt)
|
||||
// this corresponds to the atomic.lss intrinsic.
|
||||
// *ptr - amt is stored to *ptr atomically.
|
||||
// the return is always the original value in *ptr
|
||||
ATOMIC_LSS,
|
||||
ATOMIC_LOAD_SUB,
|
||||
|
||||
// Val, OUTCHAIN = ATOMIC_L[OpName]S(INCHAIN, ptr, amt)
|
||||
// this corresponds to the atomic.[OpName] intrinsic.
|
||||
@ -1437,32 +1437,122 @@ public:
|
||||
SDUse getValue() const { return Op; }
|
||||
};
|
||||
|
||||
class AtomicSDNode : public SDNode {
|
||||
/// Abstact virtual class for operations for memory operations
|
||||
class MemSDNode : public SDNode {
|
||||
virtual void ANCHOR(); // Out-of-line virtual method to give class a home.
|
||||
|
||||
private:
|
||||
//! SrcValue - Memory location for alias analysis.
|
||||
const Value *SrcValue;
|
||||
|
||||
//! Alignment - Alignment of memory location in bytes.
|
||||
unsigned Alignment;
|
||||
|
||||
public:
|
||||
MemSDNode(unsigned Opc, SDVTList VTs, const Value *SrcValue,
|
||||
unsigned Alignment)
|
||||
: SDNode(Opc, VTs), SrcValue(SrcValue), Alignment(Alignment) {}
|
||||
|
||||
virtual ~MemSDNode() {}
|
||||
|
||||
/// Returns alignment and volatility of the memory access
|
||||
unsigned getAlignment() const { return Alignment; }
|
||||
virtual bool isVolatile() const = 0;
|
||||
|
||||
/// Returns the SrcValue and offset that describes the location of the access
|
||||
const Value *getSrcValue() const { return SrcValue; }
|
||||
virtual int getSrcValueOffset() const = 0;
|
||||
|
||||
/// getMemOperand - Return a MachineMemOperand object describing the memory
|
||||
/// reference performed by operation.
|
||||
virtual MachineMemOperand getMemOperand() const = 0;
|
||||
|
||||
// Methods to support isa and dyn_cast
|
||||
static bool classof(const MemSDNode *) { return true; }
|
||||
static bool classof(const SDNode *N) {
|
||||
return N->getOpcode() == ISD::LOAD ||
|
||||
N->getOpcode() == ISD::STORE ||
|
||||
N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
|
||||
N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
|
||||
N->getOpcode() == ISD::ATOMIC_SWAP ||
|
||||
N->getOpcode() == ISD::ATOMIC_LOAD_SUB ||
|
||||
N->getOpcode() == ISD::ATOMIC_LOAD_AND ||
|
||||
N->getOpcode() == ISD::ATOMIC_LOAD_OR ||
|
||||
N->getOpcode() == ISD::ATOMIC_LOAD_XOR ||
|
||||
N->getOpcode() == ISD::ATOMIC_LOAD_NAND ||
|
||||
N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
|
||||
N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
|
||||
N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
|
||||
N->getOpcode() == ISD::ATOMIC_LOAD_UMAX;
|
||||
}
|
||||
};
|
||||
|
||||
/// Atomic operations node
|
||||
class AtomicSDNode : public MemSDNode {
|
||||
virtual void ANCHOR(); // Out-of-line virtual method to give class a home.
|
||||
SDUse Ops[4];
|
||||
MVT OrigVT;
|
||||
public:
|
||||
|
||||
public:
|
||||
// Opc: opcode for atomic
|
||||
// VTL: value type list
|
||||
// Chain: memory chain for operaand
|
||||
// Ptr: address to update as a SDOperand
|
||||
// Cmp: compare value
|
||||
// Swp: swap value
|
||||
// VT: resulting value type
|
||||
// SrcVal: address to update as a Value (used for MemOperand)
|
||||
// Align: alignment of memory
|
||||
AtomicSDNode(unsigned Opc, SDVTList VTL, SDOperand Chain, SDOperand Ptr,
|
||||
SDOperand Cmp, SDOperand Swp, MVT VT)
|
||||
: SDNode(Opc, VTL) {
|
||||
SDOperand Cmp, SDOperand Swp, MVT VT, const Value* SrcVal,
|
||||
unsigned Align=0)
|
||||
: MemSDNode(Opc, VTL, SrcVal, Align), OrigVT(VT) {
|
||||
Ops[0] = Chain;
|
||||
Ops[1] = Ptr;
|
||||
Ops[2] = Swp;
|
||||
Ops[3] = Cmp;
|
||||
InitOperands(Ops, 4);
|
||||
OrigVT=VT;
|
||||
}
|
||||
AtomicSDNode(unsigned Opc, SDVTList VTL, SDOperand Chain, SDOperand Ptr,
|
||||
SDOperand Val, MVT VT)
|
||||
: SDNode(Opc, VTL) {
|
||||
SDOperand Val, MVT VT, const Value* SrcVal, unsigned Align=0)
|
||||
: MemSDNode(Opc, VTL, SrcVal, Align), OrigVT(VT) {
|
||||
Ops[0] = Chain;
|
||||
Ops[1] = Ptr;
|
||||
Ops[2] = Val;
|
||||
InitOperands(Ops, 3);
|
||||
OrigVT=VT;
|
||||
}
|
||||
|
||||
MVT getVT() const { return OrigVT; }
|
||||
bool isCompareAndSwap() const { return getOpcode() == ISD::ATOMIC_LCS; }
|
||||
const SDOperand &getChain() const { return getOperand(0); }
|
||||
const SDOperand &getBasePtr() const { return getOperand(1); }
|
||||
const SDOperand &getVal() const { return getOperand(2); }
|
||||
|
||||
bool isCompareAndSwap() const { return getOpcode() == ISD::ATOMIC_CMP_SWAP; }
|
||||
|
||||
// Implementation for MemSDNode
|
||||
virtual int getSrcValueOffset() const { return 0; }
|
||||
virtual bool isVolatile() const { return true; }
|
||||
|
||||
/// getMemOperand - Return a MachineMemOperand object describing the memory
|
||||
/// reference performed by this atomic load/store.
|
||||
virtual MachineMemOperand getMemOperand() const;
|
||||
|
||||
// Methods to support isa and dyn_cast
|
||||
static bool classof(const AtomicSDNode *) { return true; }
|
||||
static bool classof(const SDNode *N) {
|
||||
return N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
|
||||
N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
|
||||
N->getOpcode() == ISD::ATOMIC_SWAP ||
|
||||
N->getOpcode() == ISD::ATOMIC_LOAD_SUB ||
|
||||
N->getOpcode() == ISD::ATOMIC_LOAD_AND ||
|
||||
N->getOpcode() == ISD::ATOMIC_LOAD_OR ||
|
||||
N->getOpcode() == ISD::ATOMIC_LOAD_XOR ||
|
||||
N->getOpcode() == ISD::ATOMIC_LOAD_NAND ||
|
||||
N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
|
||||
N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
|
||||
N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
|
||||
N->getOpcode() == ISD::ATOMIC_LOAD_UMAX;
|
||||
}
|
||||
};
|
||||
|
||||
class StringSDNode : public SDNode {
|
||||
@ -1934,7 +2024,7 @@ public:
|
||||
|
||||
/// LSBaseSDNode - Base class for LoadSDNode and StoreSDNode
|
||||
///
|
||||
class LSBaseSDNode : public SDNode {
|
||||
class LSBaseSDNode : public MemSDNode {
|
||||
private:
|
||||
// AddrMode - unindexed, pre-indexed, post-indexed.
|
||||
ISD::MemIndexedMode AddrMode;
|
||||
@ -1942,17 +2032,12 @@ private:
|
||||
// MemoryVT - VT of in-memory value.
|
||||
MVT MemoryVT;
|
||||
|
||||
//! SrcValue - Memory location for alias analysis.
|
||||
const Value *SrcValue;
|
||||
|
||||
//! SVOffset - Memory location offset.
|
||||
//! SVOffset - Memory location offset. Note that base is defined in MemSDNode
|
||||
int SVOffset;
|
||||
|
||||
//! Alignment - Alignment of memory location in bytes.
|
||||
unsigned Alignment;
|
||||
|
||||
//! IsVolatile - True if the store is volatile.
|
||||
//! IsVolatile - True if the load/store is volatile.
|
||||
bool IsVolatile;
|
||||
|
||||
protected:
|
||||
//! Operand array for load and store
|
||||
/*!
|
||||
@ -1965,9 +2050,8 @@ public:
|
||||
LSBaseSDNode(ISD::NodeType NodeTy, SDOperand *Operands, unsigned numOperands,
|
||||
SDVTList VTs, ISD::MemIndexedMode AM, MVT VT,
|
||||
const Value *SV, int SVO, unsigned Align, bool Vol)
|
||||
: SDNode(NodeTy, VTs),
|
||||
AddrMode(AM), MemoryVT(VT),
|
||||
SrcValue(SV), SVOffset(SVO), Alignment(Align), IsVolatile(Vol) {
|
||||
: MemSDNode(NodeTy, VTs, SV, Align), AddrMode(AM), MemoryVT(VT),
|
||||
SVOffset(SVO), IsVolatile(Vol) {
|
||||
for (unsigned i = 0; i != numOperands; ++i)
|
||||
Ops[i] = Operands[i];
|
||||
InitOperands(Ops, numOperands);
|
||||
@ -1984,12 +2068,8 @@ public:
|
||||
return getOperand(getOpcode() == ISD::LOAD ? 2 : 3);
|
||||
}
|
||||
|
||||
const Value *getSrcValue() const { return SrcValue; }
|
||||
int getSrcValueOffset() const { return SVOffset; }
|
||||
unsigned getAlignment() const { return Alignment; }
|
||||
MVT getMemoryVT() const { return MemoryVT; }
|
||||
bool isVolatile() const { return IsVolatile; }
|
||||
|
||||
|
||||
ISD::MemIndexedMode getAddressingMode() const { return AddrMode; }
|
||||
|
||||
/// isIndexed - Return true if this is a pre/post inc/dec load/store.
|
||||
@ -1998,9 +2078,13 @@ public:
|
||||
/// isUnindexed - Return true if this is NOT a pre/post inc/dec load/store.
|
||||
bool isUnindexed() const { return AddrMode == ISD::UNINDEXED; }
|
||||
|
||||
// Implementation for MemSDNode
|
||||
virtual int getSrcValueOffset() const { return SVOffset; }
|
||||
virtual bool isVolatile() const { return IsVolatile; }
|
||||
|
||||
/// getMemOperand - Return a MachineMemOperand object describing the memory
|
||||
/// reference performed by this load or store.
|
||||
MachineMemOperand getMemOperand() const;
|
||||
virtual MachineMemOperand getMemOperand() const;
|
||||
|
||||
static bool classof(const LSBaseSDNode *) { return true; }
|
||||
static bool classof(const SDNode *N) {
|
||||
|
@ -270,26 +270,26 @@ def int_init_trampoline : Intrinsic<[llvm_ptr_ty, llvm_ptr_ty, llvm_ptr_ty,
|
||||
def int_memory_barrier : Intrinsic<[llvm_void_ty, llvm_i1_ty, llvm_i1_ty,
|
||||
llvm_i1_ty, llvm_i1_ty, llvm_i1_ty], []>;
|
||||
|
||||
def int_atomic_lcs : Intrinsic<[llvm_anyint_ty,
|
||||
def int_atomic_cmp_swap : Intrinsic<[llvm_anyint_ty,
|
||||
LLVMPointerType<LLVMMatchType<0>>,
|
||||
LLVMMatchType<0>, LLVMMatchType<0>],
|
||||
[IntrWriteArgMem]>,
|
||||
GCCBuiltin<"__sync_val_compare_and_swap">;
|
||||
def int_atomic_las : Intrinsic<[llvm_anyint_ty,
|
||||
GCCBuiltin<"__sync_val_compare_and_swap">;
|
||||
def int_atomic_load_add : Intrinsic<[llvm_anyint_ty,
|
||||
LLVMPointerType<LLVMMatchType<0>>,
|
||||
LLVMMatchType<0>],
|
||||
[IntrWriteArgMem]>,
|
||||
GCCBuiltin<"__sync_fetch_and_add">;
|
||||
def int_atomic_swap : Intrinsic<[llvm_anyint_ty,
|
||||
GCCBuiltin<"__sync_fetch_and_add">;
|
||||
def int_atomic_swap : Intrinsic<[llvm_anyint_ty,
|
||||
LLVMPointerType<LLVMMatchType<0>>,
|
||||
LLVMMatchType<0>],
|
||||
[IntrWriteArgMem]>,
|
||||
GCCBuiltin<"__sync_lock_test_and_set">;
|
||||
def int_atomic_lss : Intrinsic<[llvm_anyint_ty,
|
||||
GCCBuiltin<"__sync_lock_test_and_set">;
|
||||
def int_atomic_load_sub : Intrinsic<[llvm_anyint_ty,
|
||||
LLVMPointerType<LLVMMatchType<0>>,
|
||||
LLVMMatchType<0>],
|
||||
[IntrWriteArgMem]>,
|
||||
GCCBuiltin<"__sync_fetch_and_sub">;
|
||||
GCCBuiltin<"__sync_fetch_and_sub">;
|
||||
def int_atomic_load_and : Intrinsic<[llvm_anyint_ty,
|
||||
LLVMPointerType<LLVMMatchType<0>>,
|
||||
LLVMMatchType<0>],
|
||||
@ -300,7 +300,7 @@ def int_atomic_load_or : Intrinsic<[llvm_anyint_ty,
|
||||
LLVMMatchType<0>],
|
||||
[IntrWriteArgMem]>,
|
||||
GCCBuiltin<"__sync_fetch_and_or">;
|
||||
def int_atomic_load_xor : Intrinsic<[llvm_anyint_ty,
|
||||
def int_atomic_load_xor : Intrinsic<[llvm_anyint_ty,
|
||||
LLVMPointerType<LLVMMatchType<0>>,
|
||||
LLVMMatchType<0>],
|
||||
[IntrWriteArgMem]>,
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -354,7 +354,7 @@
|
||||
|
||||
#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
|
||||
typedef union YYSTYPE
|
||||
#line 953 "/Users/gohman/LLVM/llvm/lib/AsmParser/llvmAsmParser.y"
|
||||
#line 953 "/Volumes/Data/Code/work/llvm2/lib/AsmParser/llvmAsmParser.y"
|
||||
{
|
||||
llvm::Module *ModuleVal;
|
||||
llvm::Function *FunctionVal;
|
||||
|
@ -1538,10 +1538,10 @@ ConstVal: Types '[' ConstVector ']' { // Nonempty unsized arr
|
||||
uint64_t NumElements = ATy->getNumElements();
|
||||
|
||||
// Verify that we have the correct size...
|
||||
if (NumElements != -1 && NumElements != (int)$3->size())
|
||||
if (NumElements != uint64_t(-1) && NumElements != $3->size())
|
||||
GEN_ERROR("Type mismatch: constant sized array initialized with " +
|
||||
utostr($3->size()) + " arguments, but has size of " +
|
||||
itostr(NumElements) + "");
|
||||
utostr(NumElements) + "");
|
||||
|
||||
// Verify all elements are correct type!
|
||||
for (unsigned i = 0; i < $3->size(); i++) {
|
||||
@ -1564,9 +1564,9 @@ ConstVal: Types '[' ConstVector ']' { // Nonempty unsized arr
|
||||
(*$1)->getDescription() + "'");
|
||||
|
||||
uint64_t NumElements = ATy->getNumElements();
|
||||
if (NumElements != -1 && NumElements != 0)
|
||||
if (NumElements != uint64_t(-1) && NumElements != 0)
|
||||
GEN_ERROR("Type mismatch: constant sized array initialized with 0"
|
||||
" arguments, but has size of " + itostr(NumElements) +"");
|
||||
" arguments, but has size of " + utostr(NumElements) +"");
|
||||
$$ = ConstantArray::get(ATy, std::vector<Constant*>());
|
||||
delete $1;
|
||||
CHECK_FOR_ERROR
|
||||
@ -1581,13 +1581,13 @@ ConstVal: Types '[' ConstVector ']' { // Nonempty unsized arr
|
||||
|
||||
uint64_t NumElements = ATy->getNumElements();
|
||||
const Type *ETy = ATy->getElementType();
|
||||
if (NumElements != -1 && NumElements != int($3->length()))
|
||||
if (NumElements != uint64_t(-1) && NumElements != $3->length())
|
||||
GEN_ERROR("Can't build string constant of size " +
|
||||
itostr((int)($3->length())) +
|
||||
" when array has size " + itostr(NumElements) + "");
|
||||
utostr($3->length()) +
|
||||
" when array has size " + utostr(NumElements) + "");
|
||||
std::vector<Constant*> Vals;
|
||||
if (ETy == Type::Int8Ty) {
|
||||
for (unsigned i = 0; i < $3->length(); ++i)
|
||||
for (uint64_t i = 0; i < $3->length(); ++i)
|
||||
Vals.push_back(ConstantInt::get(ETy, (*$3)[i]));
|
||||
} else {
|
||||
delete $3;
|
||||
@ -1609,10 +1609,10 @@ ConstVal: Types '[' ConstVector ']' { // Nonempty unsized arr
|
||||
unsigned NumElements = PTy->getNumElements();
|
||||
|
||||
// Verify that we have the correct size...
|
||||
if (NumElements != -1 && NumElements != (int)$3->size())
|
||||
if (NumElements != unsigned(-1) && NumElements != (unsigned)$3->size())
|
||||
GEN_ERROR("Type mismatch: constant sized packed initialized with " +
|
||||
utostr($3->size()) + " arguments, but has size of " +
|
||||
itostr(NumElements) + "");
|
||||
utostr(NumElements) + "");
|
||||
|
||||
// Verify all elements are correct type!
|
||||
for (unsigned i = 0; i < $3->size(); i++) {
|
||||
|
@ -1228,7 +1228,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) {
|
||||
break;
|
||||
}
|
||||
|
||||
case ISD::ATOMIC_LCS: {
|
||||
case ISD::ATOMIC_CMP_SWAP: {
|
||||
unsigned int num_operands = 4;
|
||||
assert(Node->getNumOperands() == num_operands && "Invalid Atomic node!");
|
||||
SDOperand Ops[4];
|
||||
@ -1248,8 +1248,8 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) {
|
||||
AddLegalizedOperand(SDOperand(Node, 1), Result.getValue(1));
|
||||
return Result.getValue(Op.ResNo);
|
||||
}
|
||||
case ISD::ATOMIC_LAS:
|
||||
case ISD::ATOMIC_LSS:
|
||||
case ISD::ATOMIC_LOAD_ADD:
|
||||
case ISD::ATOMIC_LOAD_SUB:
|
||||
case ISD::ATOMIC_LOAD_AND:
|
||||
case ISD::ATOMIC_LOAD_OR:
|
||||
case ISD::ATOMIC_LOAD_XOR:
|
||||
@ -4270,18 +4270,20 @@ SDOperand SelectionDAGLegalize::PromoteOp(SDOperand Op) {
|
||||
break;
|
||||
}
|
||||
|
||||
case ISD::ATOMIC_LCS: {
|
||||
case ISD::ATOMIC_CMP_SWAP: {
|
||||
AtomicSDNode* AtomNode = cast<AtomicSDNode>(Node);
|
||||
Tmp2 = PromoteOp(Node->getOperand(2));
|
||||
Tmp3 = PromoteOp(Node->getOperand(3));
|
||||
Result = DAG.getAtomic(Node->getOpcode(), Node->getOperand(0),
|
||||
Node->getOperand(1), Tmp2, Tmp3,
|
||||
cast<AtomicSDNode>(Node)->getVT());
|
||||
Result = DAG.getAtomic(Node->getOpcode(), AtomNode->getChain(),
|
||||
AtomNode->getBasePtr(), Tmp2, Tmp3,
|
||||
AtomNode->getVT(), AtomNode->getSrcValue(),
|
||||
AtomNode->getAlignment());
|
||||
// Remember that we legalized the chain.
|
||||
AddLegalizedOperand(Op.getValue(1), LegalizeOp(Result.getValue(1)));
|
||||
break;
|
||||
}
|
||||
case ISD::ATOMIC_LAS:
|
||||
case ISD::ATOMIC_LSS:
|
||||
case ISD::ATOMIC_LOAD_ADD:
|
||||
case ISD::ATOMIC_LOAD_SUB:
|
||||
case ISD::ATOMIC_LOAD_AND:
|
||||
case ISD::ATOMIC_LOAD_OR:
|
||||
case ISD::ATOMIC_LOAD_XOR:
|
||||
@ -4291,10 +4293,12 @@ SDOperand SelectionDAGLegalize::PromoteOp(SDOperand Op) {
|
||||
case ISD::ATOMIC_LOAD_UMIN:
|
||||
case ISD::ATOMIC_LOAD_UMAX:
|
||||
case ISD::ATOMIC_SWAP: {
|
||||
AtomicSDNode* AtomNode = cast<AtomicSDNode>(Node);
|
||||
Tmp2 = PromoteOp(Node->getOperand(2));
|
||||
Result = DAG.getAtomic(Node->getOpcode(), Node->getOperand(0),
|
||||
Node->getOperand(1), Tmp2,
|
||||
cast<AtomicSDNode>(Node)->getVT());
|
||||
Result = DAG.getAtomic(Node->getOpcode(), AtomNode->getChain(),
|
||||
AtomNode->getBasePtr(), Tmp2,
|
||||
AtomNode->getVT(), AtomNode->getSrcValue(),
|
||||
AtomNode->getAlignment());
|
||||
// Remember that we legalized the chain.
|
||||
AddLegalizedOperand(Op.getValue(1), LegalizeOp(Result.getValue(1)));
|
||||
break;
|
||||
@ -6151,7 +6155,7 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){
|
||||
break;
|
||||
}
|
||||
|
||||
case ISD::ATOMIC_LCS: {
|
||||
case ISD::ATOMIC_CMP_SWAP: {
|
||||
SDOperand Tmp = TLI.LowerOperation(Op, DAG);
|
||||
assert(Tmp.Val && "Node must be custom expanded!");
|
||||
ExpandOp(Tmp.getValue(0), Lo, Hi);
|
||||
|
@ -430,7 +430,24 @@ static void AddNodeIDNode(FoldingSetNodeID &ID, SDNode *N) {
|
||||
ID.AddInteger(ST->isVolatile());
|
||||
break;
|
||||
}
|
||||
case ISD::ATOMIC_CMP_SWAP:
|
||||
case ISD::ATOMIC_LOAD_ADD:
|
||||
case ISD::ATOMIC_SWAP:
|
||||
case ISD::ATOMIC_LOAD_SUB:
|
||||
case ISD::ATOMIC_LOAD_AND:
|
||||
case ISD::ATOMIC_LOAD_OR:
|
||||
case ISD::ATOMIC_LOAD_XOR:
|
||||
case ISD::ATOMIC_LOAD_NAND:
|
||||
case ISD::ATOMIC_LOAD_MIN:
|
||||
case ISD::ATOMIC_LOAD_MAX:
|
||||
case ISD::ATOMIC_LOAD_UMIN:
|
||||
case ISD::ATOMIC_LOAD_UMAX: {
|
||||
AtomicSDNode *AT = cast<AtomicSDNode>(N);
|
||||
ID.AddInteger(AT->getAlignment());
|
||||
ID.AddInteger(AT->isVolatile());
|
||||
break;
|
||||
}
|
||||
} // end switch (N->getOpcode())
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
@ -2972,8 +2989,9 @@ SDOperand SelectionDAG::getMemset(SDOperand Chain, SDOperand Dst,
|
||||
|
||||
SDOperand SelectionDAG::getAtomic(unsigned Opcode, SDOperand Chain,
|
||||
SDOperand Ptr, SDOperand Cmp,
|
||||
SDOperand Swp, MVT VT) {
|
||||
assert(Opcode == ISD::ATOMIC_LCS && "Invalid Atomic Op");
|
||||
SDOperand Swp, MVT VT, const Value* PtrVal,
|
||||
unsigned Alignment) {
|
||||
assert(Opcode == ISD::ATOMIC_CMP_SWAP && "Invalid Atomic Op");
|
||||
assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
|
||||
SDVTList VTs = getVTList(Cmp.getValueType(), MVT::Other);
|
||||
FoldingSetNodeID ID;
|
||||
@ -2983,7 +3001,8 @@ SDOperand SelectionDAG::getAtomic(unsigned Opcode, SDOperand Chain,
|
||||
void* IP = 0;
|
||||
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
|
||||
return SDOperand(E, 0);
|
||||
SDNode* N = new AtomicSDNode(Opcode, VTs, Chain, Ptr, Cmp, Swp, VT);
|
||||
SDNode* N = new AtomicSDNode(Opcode, VTs, Chain, Ptr, Cmp, Swp, VT,
|
||||
PtrVal, Alignment);
|
||||
CSEMap.InsertNode(N, IP);
|
||||
AllNodes.push_back(N);
|
||||
return SDOperand(N, 0);
|
||||
@ -2991,8 +3010,9 @@ SDOperand SelectionDAG::getAtomic(unsigned Opcode, SDOperand Chain,
|
||||
|
||||
SDOperand SelectionDAG::getAtomic(unsigned Opcode, SDOperand Chain,
|
||||
SDOperand Ptr, SDOperand Val,
|
||||
MVT VT) {
|
||||
assert(( Opcode == ISD::ATOMIC_LAS || Opcode == ISD::ATOMIC_LSS
|
||||
MVT VT, const Value* PtrVal,
|
||||
unsigned Alignment) {
|
||||
assert(( Opcode == ISD::ATOMIC_LOAD_ADD || Opcode == ISD::ATOMIC_LOAD_SUB
|
||||
|| Opcode == ISD::ATOMIC_SWAP || Opcode == ISD::ATOMIC_LOAD_AND
|
||||
|| Opcode == ISD::ATOMIC_LOAD_OR || Opcode == ISD::ATOMIC_LOAD_XOR
|
||||
|| Opcode == ISD::ATOMIC_LOAD_NAND
|
||||
@ -3007,7 +3027,8 @@ SDOperand SelectionDAG::getAtomic(unsigned Opcode, SDOperand Chain,
|
||||
void* IP = 0;
|
||||
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
|
||||
return SDOperand(E, 0);
|
||||
SDNode* N = new AtomicSDNode(Opcode, VTs, Chain, Ptr, Val, VT);
|
||||
SDNode* N = new AtomicSDNode(Opcode, VTs, Chain, Ptr, Val, VT,
|
||||
PtrVal, Alignment);
|
||||
CSEMap.InsertNode(N, IP);
|
||||
AllNodes.push_back(N);
|
||||
return SDOperand(N, 0);
|
||||
@ -4171,6 +4192,7 @@ void ExternalSymbolSDNode::ANCHOR() {}
|
||||
void CondCodeSDNode::ANCHOR() {}
|
||||
void ARG_FLAGSSDNode::ANCHOR() {}
|
||||
void VTSDNode::ANCHOR() {}
|
||||
void MemSDNode::ANCHOR() {}
|
||||
void LoadSDNode::ANCHOR() {}
|
||||
void StoreSDNode::ANCHOR() {}
|
||||
void AtomicSDNode::ANCHOR() {}
|
||||
@ -4192,6 +4214,24 @@ GlobalAddressSDNode::GlobalAddressSDNode(bool isTarget, const GlobalValue *GA,
|
||||
TheGlobal = const_cast<GlobalValue*>(GA);
|
||||
}
|
||||
|
||||
/// getMemOperand - Return a MachineMemOperand object describing the memory
|
||||
/// reference performed by this atomic.
|
||||
MachineMemOperand AtomicSDNode::getMemOperand() const {
|
||||
int Size = (getVT().getSizeInBits() + 7) >> 3;
|
||||
int Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
|
||||
if (isVolatile()) Flags |= MachineMemOperand::MOVolatile;
|
||||
|
||||
// Check if the atomic references a frame index
|
||||
const FrameIndexSDNode *FI =
|
||||
dyn_cast<const FrameIndexSDNode>(getBasePtr().Val);
|
||||
if (!getSrcValue() && FI)
|
||||
return MachineMemOperand(PseudoSourceValue::getFixedStack(), Flags,
|
||||
FI->getIndex(), Size, getAlignment());
|
||||
else
|
||||
return MachineMemOperand(getSrcValue(), Flags, getSrcValueOffset(),
|
||||
Size, getAlignment());
|
||||
}
|
||||
|
||||
/// getMemOperand - Return a MachineMemOperand object describing the memory
|
||||
/// reference performed by this load or store.
|
||||
MachineMemOperand LSBaseSDNode::getMemOperand() const {
|
||||
@ -4199,7 +4239,7 @@ MachineMemOperand LSBaseSDNode::getMemOperand() const {
|
||||
int Flags =
|
||||
getOpcode() == ISD::LOAD ? MachineMemOperand::MOLoad :
|
||||
MachineMemOperand::MOStore;
|
||||
if (IsVolatile) Flags |= MachineMemOperand::MOVolatile;
|
||||
if (isVolatile()) Flags |= MachineMemOperand::MOVolatile;
|
||||
|
||||
// Check if the load references a frame index, and does not have
|
||||
// an SV attached.
|
||||
@ -4207,10 +4247,10 @@ MachineMemOperand LSBaseSDNode::getMemOperand() const {
|
||||
dyn_cast<const FrameIndexSDNode>(getBasePtr().Val);
|
||||
if (!getSrcValue() && FI)
|
||||
return MachineMemOperand(PseudoSourceValue::getFixedStack(), Flags,
|
||||
FI->getIndex(), Size, Alignment);
|
||||
FI->getIndex(), Size, getAlignment());
|
||||
else
|
||||
return MachineMemOperand(getSrcValue(), Flags,
|
||||
getSrcValueOffset(), Size, Alignment);
|
||||
getSrcValueOffset(), Size, getAlignment());
|
||||
}
|
||||
|
||||
/// Profile - Gather unique data for the node.
|
||||
@ -4401,9 +4441,9 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
|
||||
|
||||
case ISD::PREFETCH: return "Prefetch";
|
||||
case ISD::MEMBARRIER: return "MemBarrier";
|
||||
case ISD::ATOMIC_LCS: return "AtomicLCS";
|
||||
case ISD::ATOMIC_LAS: return "AtomicLAS";
|
||||
case ISD::ATOMIC_LSS: return "AtomicLSS";
|
||||
case ISD::ATOMIC_CMP_SWAP: return "AtomicCmpSwap";
|
||||
case ISD::ATOMIC_LOAD_ADD: return "AtomicLoadAdd";
|
||||
case ISD::ATOMIC_LOAD_SUB: return "AtomicLoadSub";
|
||||
case ISD::ATOMIC_LOAD_AND: return "AtomicLoadAnd";
|
||||
case ISD::ATOMIC_LOAD_OR: return "AtomicLoadOr";
|
||||
case ISD::ATOMIC_LOAD_XOR: return "AtomicLoadXor";
|
||||
@ -4756,7 +4796,8 @@ void SDNode::dump(const SelectionDAG *G) const {
|
||||
cerr << N->getArgFlags().getArgFlagsString();
|
||||
} else if (const VTSDNode *N = dyn_cast<VTSDNode>(this)) {
|
||||
cerr << ":" << N->getVT().getMVTString();
|
||||
} else if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(this)) {
|
||||
}
|
||||
else if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(this)) {
|
||||
const Value *SrcValue = LD->getSrcValue();
|
||||
int SrcOffset = LD->getSrcValueOffset();
|
||||
cerr << " <";
|
||||
@ -4808,6 +4849,18 @@ void SDNode::dump(const SelectionDAG *G) const {
|
||||
if (ST->isVolatile())
|
||||
cerr << " <volatile>";
|
||||
cerr << " alignment=" << ST->getAlignment();
|
||||
} else if (const AtomicSDNode* AT = dyn_cast<AtomicSDNode>(this)) {
|
||||
const Value *SrcValue = AT->getSrcValue();
|
||||
int SrcOffset = AT->getSrcValueOffset();
|
||||
cerr << " <";
|
||||
if (SrcValue)
|
||||
cerr << SrcValue;
|
||||
else
|
||||
cerr << "null";
|
||||
cerr << ":" << SrcOffset << ">";
|
||||
if (AT->isVolatile())
|
||||
cerr << " <volatile>";
|
||||
cerr << " alignment=" << AT->getAlignment();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3088,7 +3088,8 @@ SelectionDAGLowering::implVisitBinaryAtomic(CallInst& I, ISD::NodeType Op) {
|
||||
SDOperand O2 = getValue(I.getOperand(2));
|
||||
SDOperand L = DAG.getAtomic(Op, Root,
|
||||
getValue(I.getOperand(1)),
|
||||
O2, O2.getValueType());
|
||||
O2, O2.getValueType(),
|
||||
I.getOperand(1));
|
||||
setValue(&I, L);
|
||||
DAG.setRoot(L.getValue(1));
|
||||
return 0;
|
||||
@ -3518,21 +3519,22 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
|
||||
DAG.setRoot(DAG.getNode(ISD::MEMBARRIER, MVT::Other, &Ops[0], 6));
|
||||
return 0;
|
||||
}
|
||||
case Intrinsic::atomic_lcs: {
|
||||
case Intrinsic::atomic_cmp_swap: {
|
||||
SDOperand Root = getRoot();
|
||||
SDOperand O3 = getValue(I.getOperand(3));
|
||||
SDOperand L = DAG.getAtomic(ISD::ATOMIC_LCS, Root,
|
||||
SDOperand L = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, Root,
|
||||
getValue(I.getOperand(1)),
|
||||
getValue(I.getOperand(2)),
|
||||
O3, O3.getValueType());
|
||||
O3, O3.getValueType(),
|
||||
I.getOperand(1));
|
||||
setValue(&I, L);
|
||||
DAG.setRoot(L.getValue(1));
|
||||
return 0;
|
||||
}
|
||||
case Intrinsic::atomic_las:
|
||||
return implVisitBinaryAtomic(I, ISD::ATOMIC_LAS);
|
||||
case Intrinsic::atomic_lss:
|
||||
return implVisitBinaryAtomic(I, ISD::ATOMIC_LSS);
|
||||
case Intrinsic::atomic_load_add:
|
||||
return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD);
|
||||
case Intrinsic::atomic_load_sub:
|
||||
return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB);
|
||||
case Intrinsic::atomic_load_and:
|
||||
return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND);
|
||||
case Intrinsic::atomic_load_or:
|
||||
|
@ -160,14 +160,14 @@ def MEMLABEL : PseudoInstAlpha<(outs), (ins s64imm:$i, s64imm:$j, s64imm:$k, s64
|
||||
|
||||
let usesCustomDAGSchedInserter = 1 in { // Expanded by the scheduler.
|
||||
def CAS32 : PseudoInstAlpha<(outs GPRC:$dst), (ins GPRC:$ptr, GPRC:$cmp, GPRC:$swp), "",
|
||||
[(set GPRC:$dst, (atomic_lcs_32 GPRC:$ptr, GPRC:$cmp, GPRC:$swp))], s_pseudo>;
|
||||
[(set GPRC:$dst, (atomic_cmp_swap_32 GPRC:$ptr, GPRC:$cmp, GPRC:$swp))], s_pseudo>;
|
||||
def CAS64 : PseudoInstAlpha<(outs GPRC:$dst), (ins GPRC:$ptr, GPRC:$cmp, GPRC:$swp), "",
|
||||
[(set GPRC:$dst, (atomic_lcs_64 GPRC:$ptr, GPRC:$cmp, GPRC:$swp))], s_pseudo>;
|
||||
[(set GPRC:$dst, (atomic_cmp_swap_64 GPRC:$ptr, GPRC:$cmp, GPRC:$swp))], s_pseudo>;
|
||||
|
||||
def LAS32 : PseudoInstAlpha<(outs GPRC:$dst), (ins GPRC:$ptr, GPRC:$swp), "",
|
||||
[(set GPRC:$dst, (atomic_las_32 GPRC:$ptr, GPRC:$swp))], s_pseudo>;
|
||||
[(set GPRC:$dst, (atomic_load_add_32 GPRC:$ptr, GPRC:$swp))], s_pseudo>;
|
||||
def LAS64 :PseudoInstAlpha<(outs GPRC:$dst), (ins GPRC:$ptr, GPRC:$swp), "",
|
||||
[(set GPRC:$dst, (atomic_las_64 GPRC:$ptr, GPRC:$swp))], s_pseudo>;
|
||||
[(set GPRC:$dst, (atomic_load_add_64 GPRC:$ptr, GPRC:$swp))], s_pseudo>;
|
||||
|
||||
def SWAP32 : PseudoInstAlpha<(outs GPRC:$dst), (ins GPRC:$ptr, GPRC:$swp), "",
|
||||
[(set GPRC:$dst, (atomic_swap_32 GPRC:$ptr, GPRC:$swp))], s_pseudo>;
|
||||
|
@ -204,12 +204,12 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
|
||||
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom);
|
||||
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom);
|
||||
|
||||
setOperationAction(ISD::ATOMIC_LAS , MVT::i32 , Custom);
|
||||
setOperationAction(ISD::ATOMIC_LCS , MVT::i32 , Custom);
|
||||
setOperationAction(ISD::ATOMIC_LOAD_ADD , MVT::i32 , Custom);
|
||||
setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i32 , Custom);
|
||||
setOperationAction(ISD::ATOMIC_SWAP , MVT::i32 , Custom);
|
||||
if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) {
|
||||
setOperationAction(ISD::ATOMIC_LAS , MVT::i64 , Custom);
|
||||
setOperationAction(ISD::ATOMIC_LCS , MVT::i64 , Custom);
|
||||
setOperationAction(ISD::ATOMIC_LOAD_ADD , MVT::i64 , Custom);
|
||||
setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i64 , Custom);
|
||||
setOperationAction(ISD::ATOMIC_SWAP , MVT::i64 , Custom);
|
||||
}
|
||||
|
||||
@ -2721,7 +2721,7 @@ SDOperand PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op,
|
||||
return DAG.getNode(PPCISD::DYNALLOC, VTs, Ops, 3);
|
||||
}
|
||||
|
||||
SDOperand PPCTargetLowering::LowerAtomicLAS(SDOperand Op, SelectionDAG &DAG) {
|
||||
SDOperand PPCTargetLowering::LowerAtomicLOAD_ADD(SDOperand Op, SelectionDAG &DAG) {
|
||||
MVT VT = Op.Val->getValueType(0);
|
||||
SDOperand Chain = Op.getOperand(0);
|
||||
SDOperand Ptr = Op.getOperand(1);
|
||||
@ -2757,7 +2757,7 @@ SDOperand PPCTargetLowering::LowerAtomicLAS(SDOperand Op, SelectionDAG &DAG) {
|
||||
OutOps, 2);
|
||||
}
|
||||
|
||||
SDOperand PPCTargetLowering::LowerAtomicLCS(SDOperand Op, SelectionDAG &DAG) {
|
||||
SDOperand PPCTargetLowering::LowerAtomicCMP_SWAP(SDOperand Op, SelectionDAG &DAG) {
|
||||
MVT VT = Op.Val->getValueType(0);
|
||||
SDOperand Chain = Op.getOperand(0);
|
||||
SDOperand Ptr = Op.getOperand(1);
|
||||
@ -3942,8 +3942,8 @@ SDOperand PPCTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
|
||||
case ISD::DYNAMIC_STACKALLOC:
|
||||
return LowerDYNAMIC_STACKALLOC(Op, DAG, PPCSubTarget);
|
||||
|
||||
case ISD::ATOMIC_LAS: return LowerAtomicLAS(Op, DAG);
|
||||
case ISD::ATOMIC_LCS: return LowerAtomicLCS(Op, DAG);
|
||||
case ISD::ATOMIC_LOAD_ADD: return LowerAtomicLOAD_ADD(Op, DAG);
|
||||
case ISD::ATOMIC_CMP_SWAP: return LowerAtomicCMP_SWAP(Op, DAG);
|
||||
case ISD::ATOMIC_SWAP: return LowerAtomicSWAP(Op, DAG);
|
||||
|
||||
case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
|
||||
|
@ -366,8 +366,8 @@ namespace llvm {
|
||||
SDOperand LowerDYNAMIC_STACKALLOC(SDOperand Op, SelectionDAG &DAG,
|
||||
const PPCSubtarget &Subtarget);
|
||||
SDOperand LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG);
|
||||
SDOperand LowerAtomicLAS(SDOperand Op, SelectionDAG &DAG);
|
||||
SDOperand LowerAtomicLCS(SDOperand Op, SelectionDAG &DAG);
|
||||
SDOperand LowerAtomicLOAD_ADD(SDOperand Op, SelectionDAG &DAG);
|
||||
SDOperand LowerAtomicCMP_SWAP(SDOperand Op, SelectionDAG &DAG);
|
||||
SDOperand LowerAtomicSWAP(SDOperand Op, SelectionDAG &DAG);
|
||||
SDOperand LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG);
|
||||
SDOperand LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG);
|
||||
|
@ -220,6 +220,7 @@ def SDNPOptInFlag : SDNodeProperty; // Optionally read a flag operand
|
||||
def SDNPMayStore : SDNodeProperty; // May write to memory, sets 'mayStore'.
|
||||
def SDNPMayLoad : SDNodeProperty; // May read memory, sets 'mayLoad'.
|
||||
def SDNPSideEffect : SDNodeProperty; // Sets 'HasUnmodelledSideEffects'.
|
||||
def SDNPMemOperand : SDNodeProperty; // Touches memory, has assoc MemOperand
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Selection DAG Node definitions.
|
||||
@ -353,39 +354,39 @@ def membarrier : SDNode<"ISD::MEMBARRIER" , STDMemBarrier,
|
||||
[SDNPHasChain, SDNPSideEffect]>;
|
||||
|
||||
// Do not use atomic_* directly, use atomic_*_size (see below)
|
||||
def atomic_lcs : SDNode<"ISD::ATOMIC_LCS" , STDAtomic3,
|
||||
[SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
|
||||
def atomic_las : SDNode<"ISD::ATOMIC_LAS" , STDAtomic2,
|
||||
[SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
|
||||
def atomic_swap : SDNode<"ISD::ATOMIC_SWAP", STDAtomic2,
|
||||
[SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
|
||||
def atomic_lss : SDNode<"ISD::ATOMIC_LSS" , STDAtomic2,
|
||||
[SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
|
||||
def atomic_cmp_swap : SDNode<"ISD::ATOMIC_CMP_SWAP" , STDAtomic3,
|
||||
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
|
||||
def atomic_load_add : SDNode<"ISD::ATOMIC_LOAD_ADD" , STDAtomic2,
|
||||
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
|
||||
def atomic_swap : SDNode<"ISD::ATOMIC_SWAP", STDAtomic2,
|
||||
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
|
||||
def atomic_load_sub : SDNode<"ISD::ATOMIC_LOAD_SUB" , STDAtomic2,
|
||||
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
|
||||
def atomic_load_and : SDNode<"ISD::ATOMIC_LOAD_AND" , STDAtomic2,
|
||||
[SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
|
||||
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
|
||||
def atomic_load_or : SDNode<"ISD::ATOMIC_LOAD_OR" , STDAtomic2,
|
||||
[SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
|
||||
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
|
||||
def atomic_load_xor : SDNode<"ISD::ATOMIC_LOAD_XOR" , STDAtomic2,
|
||||
[SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
|
||||
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
|
||||
def atomic_load_nand: SDNode<"ISD::ATOMIC_LOAD_NAND", STDAtomic2,
|
||||
[SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
|
||||
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
|
||||
def atomic_load_min : SDNode<"ISD::ATOMIC_LOAD_MIN", STDAtomic2,
|
||||
[SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
|
||||
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
|
||||
def atomic_load_max : SDNode<"ISD::ATOMIC_LOAD_MAX", STDAtomic2,
|
||||
[SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
|
||||
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
|
||||
def atomic_load_umin : SDNode<"ISD::ATOMIC_LOAD_UMIN", STDAtomic2,
|
||||
[SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
|
||||
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
|
||||
def atomic_load_umax : SDNode<"ISD::ATOMIC_LOAD_UMAX", STDAtomic2,
|
||||
[SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
|
||||
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
|
||||
|
||||
// Do not use ld, st directly. Use load, extload, sextload, zextload, store,
|
||||
// and truncst (see below).
|
||||
def ld : SDNode<"ISD::LOAD" , SDTLoad,
|
||||
[SDNPHasChain, SDNPMayLoad]>;
|
||||
[SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
|
||||
def st : SDNode<"ISD::STORE" , SDTStore,
|
||||
[SDNPHasChain, SDNPMayStore]>;
|
||||
[SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
|
||||
def ist : SDNode<"ISD::STORE" , SDTIStore,
|
||||
[SDNPHasChain, SDNPMayStore]>;
|
||||
[SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
|
||||
|
||||
def vector_shuffle : SDNode<"ISD::VECTOR_SHUFFLE", SDTVecShuffle, []>;
|
||||
def build_vector : SDNode<"ISD::BUILD_VECTOR", SDTypeProfile<1, 0, []>, []>;
|
||||
@ -764,51 +765,51 @@ def post_truncstf32 : PatFrag<(ops node:$val, node:$base, node:$offset),
|
||||
}]>;
|
||||
|
||||
//Atomic patterns
|
||||
def atomic_lcs_8 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp),
|
||||
(atomic_lcs node:$ptr, node:$cmp, node:$swp), [{
|
||||
def atomic_cmp_swap_8 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp),
|
||||
(atomic_cmp_swap node:$ptr, node:$cmp, node:$swp), [{
|
||||
if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
|
||||
return V->getVT() == MVT::i8;
|
||||
return false;
|
||||
}]>;
|
||||
def atomic_lcs_16 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp),
|
||||
(atomic_lcs node:$ptr, node:$cmp, node:$swp), [{
|
||||
def atomic_cmp_swap_16 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp),
|
||||
(atomic_cmp_swap node:$ptr, node:$cmp, node:$swp), [{
|
||||
if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
|
||||
return V->getVT() == MVT::i16;
|
||||
return false;
|
||||
}]>;
|
||||
def atomic_lcs_32 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp),
|
||||
(atomic_lcs node:$ptr, node:$cmp, node:$swp), [{
|
||||
def atomic_cmp_swap_32 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp),
|
||||
(atomic_cmp_swap node:$ptr, node:$cmp, node:$swp), [{
|
||||
if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
|
||||
return V->getVT() == MVT::i32;
|
||||
return false;
|
||||
}]>;
|
||||
def atomic_lcs_64 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp),
|
||||
(atomic_lcs node:$ptr, node:$cmp, node:$swp), [{
|
||||
def atomic_cmp_swap_64 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp),
|
||||
(atomic_cmp_swap node:$ptr, node:$cmp, node:$swp), [{
|
||||
if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
|
||||
return V->getVT() == MVT::i64;
|
||||
return false;
|
||||
}]>;
|
||||
|
||||
def atomic_las_8 : PatFrag<(ops node:$ptr, node:$inc),
|
||||
(atomic_las node:$ptr, node:$inc), [{
|
||||
def atomic_load_add_8 : PatFrag<(ops node:$ptr, node:$inc),
|
||||
(atomic_load_add node:$ptr, node:$inc), [{
|
||||
if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
|
||||
return V->getVT() == MVT::i8;
|
||||
return false;
|
||||
}]>;
|
||||
def atomic_las_16 : PatFrag<(ops node:$ptr, node:$inc),
|
||||
(atomic_las node:$ptr, node:$inc), [{
|
||||
def atomic_load_add_16 : PatFrag<(ops node:$ptr, node:$inc),
|
||||
(atomic_load_add node:$ptr, node:$inc), [{
|
||||
if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
|
||||
return V->getVT() == MVT::i16;
|
||||
return false;
|
||||
}]>;
|
||||
def atomic_las_32 : PatFrag<(ops node:$ptr, node:$inc),
|
||||
(atomic_las node:$ptr, node:$inc), [{
|
||||
def atomic_load_add_32 : PatFrag<(ops node:$ptr, node:$inc),
|
||||
(atomic_load_add node:$ptr, node:$inc), [{
|
||||
if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
|
||||
return V->getVT() == MVT::i32;
|
||||
return false;
|
||||
}]>;
|
||||
def atomic_las_64 : PatFrag<(ops node:$ptr, node:$inc),
|
||||
(atomic_las node:$ptr, node:$inc), [{
|
||||
def atomic_load_add_64 : PatFrag<(ops node:$ptr, node:$inc),
|
||||
(atomic_load_add node:$ptr, node:$inc), [{
|
||||
if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
|
||||
return V->getVT() == MVT::i64;
|
||||
return false;
|
||||
|
@ -292,11 +292,11 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
|
||||
setOperationAction(ISD::MEMBARRIER , MVT::Other, Expand);
|
||||
|
||||
// Expand certain atomics
|
||||
setOperationAction(ISD::ATOMIC_LCS , MVT::i8, Custom);
|
||||
setOperationAction(ISD::ATOMIC_LCS , MVT::i16, Custom);
|
||||
setOperationAction(ISD::ATOMIC_LCS , MVT::i32, Custom);
|
||||
setOperationAction(ISD::ATOMIC_LCS , MVT::i64, Custom);
|
||||
setOperationAction(ISD::ATOMIC_LSS , MVT::i32, Expand);
|
||||
setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i8, Custom);
|
||||
setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i16, Custom);
|
||||
setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i32, Custom);
|
||||
setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i64, Custom);
|
||||
setOperationAction(ISD::ATOMIC_LOAD_SUB , MVT::i32, Expand);
|
||||
|
||||
// Use the default ISD::LOCATION, ISD::DECLARE expansion.
|
||||
setOperationAction(ISD::LOCATION, MVT::Other, Expand);
|
||||
@ -5655,7 +5655,7 @@ SDOperand X86TargetLowering::LowerCTTZ(SDOperand Op, SelectionDAG &DAG) {
|
||||
return Op;
|
||||
}
|
||||
|
||||
SDOperand X86TargetLowering::LowerLCS(SDOperand Op, SelectionDAG &DAG) {
|
||||
SDOperand X86TargetLowering::LowerCMP_SWAP(SDOperand Op, SelectionDAG &DAG) {
|
||||
MVT T = cast<AtomicSDNode>(Op.Val)->getVT();
|
||||
unsigned Reg = 0;
|
||||
unsigned size = 0;
|
||||
@ -5669,7 +5669,7 @@ SDOperand X86TargetLowering::LowerLCS(SDOperand Op, SelectionDAG &DAG) {
|
||||
if (Subtarget->is64Bit()) {
|
||||
Reg = X86::RAX; size = 8;
|
||||
} else //Should go away when LowerType stuff lands
|
||||
return SDOperand(ExpandATOMIC_LCS(Op.Val, DAG), 0);
|
||||
return SDOperand(ExpandATOMIC_CMP_SWAP(Op.Val, DAG), 0);
|
||||
break;
|
||||
};
|
||||
SDOperand cpIn = DAG.getCopyToReg(Op.getOperand(0), Reg,
|
||||
@ -5686,9 +5686,9 @@ SDOperand X86TargetLowering::LowerLCS(SDOperand Op, SelectionDAG &DAG) {
|
||||
return cpOut;
|
||||
}
|
||||
|
||||
SDNode* X86TargetLowering::ExpandATOMIC_LCS(SDNode* Op, SelectionDAG &DAG) {
|
||||
SDNode* X86TargetLowering::ExpandATOMIC_CMP_SWAP(SDNode* Op, SelectionDAG &DAG) {
|
||||
MVT T = cast<AtomicSDNode>(Op)->getVT();
|
||||
assert (T == MVT::i64 && "Only know how to expand i64 CAS");
|
||||
assert (T == MVT::i64 && "Only know how to expand i64 Cmp and Swap");
|
||||
SDOperand cpInL, cpInH;
|
||||
cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(3),
|
||||
DAG.getConstant(0, MVT::i32));
|
||||
@ -5722,13 +5722,15 @@ SDNode* X86TargetLowering::ExpandATOMIC_LCS(SDNode* Op, SelectionDAG &DAG) {
|
||||
return DAG.getNode(ISD::MERGE_VALUES, Tys, ResultVal, cpOutH.getValue(1)).Val;
|
||||
}
|
||||
|
||||
SDNode* X86TargetLowering::ExpandATOMIC_LSS(SDNode* Op, SelectionDAG &DAG) {
|
||||
SDNode* X86TargetLowering::ExpandATOMIC_LOAD_SUB(SDNode* Op, SelectionDAG &DAG) {
|
||||
MVT T = cast<AtomicSDNode>(Op)->getVT();
|
||||
assert (T == MVT::i32 && "Only know how to expand i32 LSS");
|
||||
assert (T == MVT::i32 && "Only know how to expand i32 Atomic Load Sub");
|
||||
SDOperand negOp = DAG.getNode(ISD::SUB, T,
|
||||
DAG.getConstant(0, T), Op->getOperand(2));
|
||||
return DAG.getAtomic(ISD::ATOMIC_LAS, Op->getOperand(0),
|
||||
Op->getOperand(1), negOp, T).Val;
|
||||
return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, Op->getOperand(0),
|
||||
Op->getOperand(1), negOp, T,
|
||||
cast<AtomicSDNode>(Op)->getSrcValue(),
|
||||
cast<AtomicSDNode>(Op)->getAlignment()).Val;
|
||||
}
|
||||
|
||||
/// LowerOperation - Provide custom lowering hooks for some operations.
|
||||
@ -5736,7 +5738,7 @@ SDNode* X86TargetLowering::ExpandATOMIC_LSS(SDNode* Op, SelectionDAG &DAG) {
|
||||
SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
|
||||
switch (Op.getOpcode()) {
|
||||
default: assert(0 && "Should not custom lower this!");
|
||||
case ISD::ATOMIC_LCS: return LowerLCS(Op,DAG);
|
||||
case ISD::ATOMIC_CMP_SWAP: return LowerCMP_SWAP(Op,DAG);
|
||||
case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
|
||||
case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
|
||||
case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
|
||||
@ -5788,8 +5790,8 @@ SDNode *X86TargetLowering::ExpandOperationResult(SDNode *N, SelectionDAG &DAG) {
|
||||
default: assert(0 && "Should not custom lower this!");
|
||||
case ISD::FP_TO_SINT: return ExpandFP_TO_SINT(N, DAG);
|
||||
case ISD::READCYCLECOUNTER: return ExpandREADCYCLECOUNTER(N, DAG);
|
||||
case ISD::ATOMIC_LCS: return ExpandATOMIC_LCS(N, DAG);
|
||||
case ISD::ATOMIC_LSS: return ExpandATOMIC_LSS(N,DAG);
|
||||
case ISD::ATOMIC_CMP_SWAP: return ExpandATOMIC_CMP_SWAP(N, DAG);
|
||||
case ISD::ATOMIC_LOAD_SUB: return ExpandATOMIC_LOAD_SUB(N,DAG);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -541,11 +541,11 @@ namespace llvm {
|
||||
SDOperand LowerFLT_ROUNDS_(SDOperand Op, SelectionDAG &DAG);
|
||||
SDOperand LowerCTLZ(SDOperand Op, SelectionDAG &DAG);
|
||||
SDOperand LowerCTTZ(SDOperand Op, SelectionDAG &DAG);
|
||||
SDOperand LowerLCS(SDOperand Op, SelectionDAG &DAG);
|
||||
SDOperand LowerCMP_SWAP(SDOperand Op, SelectionDAG &DAG);
|
||||
SDNode *ExpandFP_TO_SINT(SDNode *N, SelectionDAG &DAG);
|
||||
SDNode *ExpandREADCYCLECOUNTER(SDNode *N, SelectionDAG &DAG);
|
||||
SDNode *ExpandATOMIC_LCS(SDNode *N, SelectionDAG &DAG);
|
||||
SDNode *ExpandATOMIC_LSS(SDNode *N, SelectionDAG &DAG);
|
||||
SDNode *ExpandATOMIC_CMP_SWAP(SDNode *N, SelectionDAG &DAG);
|
||||
SDNode *ExpandATOMIC_LOAD_SUB(SDNode *N, SelectionDAG &DAG);
|
||||
|
||||
SDOperand EmitTargetCodeForMemset(SelectionDAG &DAG,
|
||||
SDOperand Chain,
|
||||
|
@ -1124,7 +1124,7 @@ def LCMPXCHG64 : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$ptr, GR64:$swap),
|
||||
let Constraints = "$val = $dst", Defs = [EFLAGS] in {
|
||||
def LXADD64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val),
|
||||
"lock xadd $val, $ptr",
|
||||
[(set GR64:$dst, (atomic_las_64 addr:$ptr, GR64:$val))]>,
|
||||
[(set GR64:$dst, (atomic_load_add_64 addr:$ptr, GR64:$val))]>,
|
||||
TB, LOCK;
|
||||
def XCHG64rm : RI<0x87, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val),
|
||||
"xchg $val, $ptr",
|
||||
|
@ -2614,19 +2614,19 @@ def LCMPXCHG8 : I<0xB0, MRMDestMem, (outs), (ins i8mem:$ptr, GR8:$swap),
|
||||
let Constraints = "$val = $dst", Defs = [EFLAGS] in {
|
||||
def LXADD32 : I<0xC1, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val),
|
||||
"lock xadd{l}\t{$val, $ptr|$ptr, $val}",
|
||||
[(set GR32:$dst, (atomic_las_32 addr:$ptr, GR32:$val))]>,
|
||||
[(set GR32:$dst, (atomic_load_add_32 addr:$ptr, GR32:$val))]>,
|
||||
TB, LOCK;
|
||||
def LXADD16 : I<0xC1, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$ptr, GR16:$val),
|
||||
"lock xadd{w}\t{$val, $ptr|$ptr, $val}",
|
||||
[(set GR16:$dst, (atomic_las_16 addr:$ptr, GR16:$val))]>,
|
||||
[(set GR16:$dst, (atomic_load_add_16 addr:$ptr, GR16:$val))]>,
|
||||
TB, OpSize, LOCK;
|
||||
def LXADD8 : I<0xC0, MRMSrcMem, (outs GR8:$dst), (ins i8mem:$ptr, GR8:$val),
|
||||
"lock xadd{b}\t{$val, $ptr|$ptr, $val}",
|
||||
[(set GR8:$dst, (atomic_las_8 addr:$ptr, GR8:$val))]>,
|
||||
[(set GR8:$dst, (atomic_load_add_8 addr:$ptr, GR8:$val))]>,
|
||||
TB, LOCK;
|
||||
}
|
||||
|
||||
// Atomic exchange and and, or, xor
|
||||
// Atomic exchange, and, or, xor
|
||||
let Constraints = "$val = $dst", Defs = [EFLAGS],
|
||||
usesCustomDAGSchedInserter = 1 in {
|
||||
def ATOMAND32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
|
||||
@ -2639,7 +2639,7 @@ def ATOMXOR32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
|
||||
"#ATOMXOR32 PSUEDO!",
|
||||
[(set GR32:$dst, (atomic_load_xor addr:$ptr, GR32:$val))]>;
|
||||
def ATOMNAND32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val),
|
||||
"#ATOMXOR32 PSUEDO!",
|
||||
"#ATOMNAND32 PSUEDO!",
|
||||
[(set GR32:$dst, (atomic_load_nand addr:$ptr, GR32:$val))]>;
|
||||
|
||||
def ATOMMIN32: I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val),
|
||||
|
@ -39,6 +39,30 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
|
||||
Module *M = F->getParent();
|
||||
switch (Name[5]) {
|
||||
default: break;
|
||||
case 'a':
|
||||
// This upgrades the llvm.atomic.lcs, llvm.atomic.las, and llvm.atomic.lss
|
||||
// to their new function name
|
||||
if (Name.compare(5,8,"atomic.l",8) == 0) {
|
||||
if (Name.compare(12,3,"lcs",3) == 0) {
|
||||
std::string::size_type delim = Name.find('.',12);
|
||||
F->setName("llvm.atomic.cmp.swap"+Name.substr(delim));
|
||||
NewFn = F;
|
||||
return true;
|
||||
}
|
||||
else if (Name.compare(12,3,"las",3) == 0) {
|
||||
std::string::size_type delim = Name.find('.',12);
|
||||
F->setName("llvm.atomic.load.add"+Name.substr(delim));
|
||||
NewFn = F;
|
||||
return true;
|
||||
}
|
||||
else if (Name.compare(12,3,"lss",3) == 0) {
|
||||
std::string::size_type delim = Name.find('.',12);
|
||||
F->setName("llvm.atomic.load.sub"+Name.substr(delim));
|
||||
NewFn = F;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case 'b':
|
||||
// This upgrades the name of the llvm.bswap intrinsic function to only use
|
||||
// a single type name for overloading. We only care about the old format
|
||||
|
@ -2,17 +2,17 @@
|
||||
; RUN: llvm-as < %s | llc -march=ppc32 | grep stwcx. | count 4
|
||||
|
||||
define i32 @exchange_and_add(i32* %mem, i32 %val) nounwind {
|
||||
%tmp = call i32 @llvm.atomic.las.i32( i32* %mem, i32 %val )
|
||||
%tmp = call i32 @llvm.atomic.load.add.i32( i32* %mem, i32 %val )
|
||||
ret i32 %tmp
|
||||
}
|
||||
|
||||
define i32 @exchange_and_cmp(i32* %mem) nounwind {
|
||||
%tmp = call i32 @llvm.atomic.lcs.i32( i32* %mem, i32 0, i32 1 )
|
||||
%tmp = call i32 @llvm.atomic.cmp.swap.i32( i32* %mem, i32 0, i32 1 )
|
||||
ret i32 %tmp
|
||||
}
|
||||
|
||||
define i16 @exchange_and_cmp16(i16* %mem) nounwind {
|
||||
%tmp = call i16 @llvm.atomic.lcs.i16( i16* %mem, i16 0, i16 1 )
|
||||
%tmp = call i16 @llvm.atomic.cmp.swap.i16( i16* %mem, i16 0, i16 1 )
|
||||
ret i16 %tmp
|
||||
}
|
||||
|
||||
@ -21,7 +21,7 @@ define i32 @exchange(i32* %mem, i32 %val) nounwind {
|
||||
ret i32 %tmp
|
||||
}
|
||||
|
||||
declare i32 @llvm.atomic.las.i32(i32*, i32) nounwind
|
||||
declare i32 @llvm.atomic.lcs.i32(i32*, i32, i32) nounwind
|
||||
declare i16 @llvm.atomic.lcs.i16(i16*, i16, i16) nounwind
|
||||
declare i32 @llvm.atomic.load.add.i32(i32*, i32) nounwind
|
||||
declare i32 @llvm.atomic.cmp.swap.i32(i32*, i32, i32) nounwind
|
||||
declare i16 @llvm.atomic.cmp.swap.i16(i16*, i16, i16) nounwind
|
||||
declare i32 @llvm.atomic.swap.i32(i32*, i32) nounwind
|
||||
|
@ -2,12 +2,12 @@
|
||||
; RUN: llvm-as < %s | llc -march=ppc64 | grep stdcx. | count 3
|
||||
|
||||
define i64 @exchange_and_add(i64* %mem, i64 %val) nounwind {
|
||||
%tmp = call i64 @llvm.atomic.las.i64( i64* %mem, i64 %val )
|
||||
%tmp = call i64 @llvm.atomic.load.add.i64( i64* %mem, i64 %val )
|
||||
ret i64 %tmp
|
||||
}
|
||||
|
||||
define i64 @exchange_and_cmp(i64* %mem) nounwind {
|
||||
%tmp = call i64 @llvm.atomic.lcs.i64( i64* %mem, i64 0, i64 1 )
|
||||
%tmp = call i64 @llvm.atomic.cmp.swap.i64( i64* %mem, i64 0, i64 1 )
|
||||
ret i64 %tmp
|
||||
}
|
||||
|
||||
@ -16,6 +16,6 @@ define i64 @exchange(i64* %mem, i64 %val) nounwind {
|
||||
ret i64 %tmp
|
||||
}
|
||||
|
||||
declare i64 @llvm.atomic.las.i64(i64*, i64) nounwind
|
||||
declare i64 @llvm.atomic.lcs.i64(i64*, i64, i64) nounwind
|
||||
declare i64 @llvm.atomic.load.add.i64(i64*, i64) nounwind
|
||||
declare i64 @llvm.atomic.cmp.swap.i64(i64*, i64, i64) nounwind
|
||||
declare i64 @llvm.atomic.swap.i64(i64*, i64) nounwind
|
||||
|
@ -29,13 +29,13 @@ entry:
|
||||
store i32 3855, i32* %xort
|
||||
store i32 4, i32* %temp
|
||||
%tmp = load i32* %temp ; <i32> [#uses=1]
|
||||
call i32 @llvm.atomic.las.i32( i32* %val1, i32 %tmp ) ; <i32>:0 [#uses=1]
|
||||
call i32 @llvm.atomic.load.add.i32( i32* %val1, i32 %tmp ) ; <i32>:0 [#uses=1]
|
||||
store i32 %0, i32* %old
|
||||
call i32 @llvm.atomic.lss.i32( i32* %val2, i32 30 ) ; <i32>:1 [#uses=1]
|
||||
call i32 @llvm.atomic.load.sub.i32( i32* %val2, i32 30 ) ; <i32>:1 [#uses=1]
|
||||
store i32 %1, i32* %old
|
||||
call i32 @llvm.atomic.las.i32( i32* %val2, i32 1 ) ; <i32>:2 [#uses=1]
|
||||
call i32 @llvm.atomic.load.add.i32( i32* %val2, i32 1 ) ; <i32>:2 [#uses=1]
|
||||
store i32 %2, i32* %old
|
||||
call i32 @llvm.atomic.lss.i32( i32* %val2, i32 1 ) ; <i32>:3 [#uses=1]
|
||||
call i32 @llvm.atomic.load.sub.i32( i32* %val2, i32 1 ) ; <i32>:3 [#uses=1]
|
||||
store i32 %3, i32* %old
|
||||
call i32 @llvm.atomic.load.and.i32( i32* %andt, i32 4080 ) ; <i32>:4 [#uses=1]
|
||||
store i32 %4, i32* %old
|
||||
@ -63,16 +63,16 @@ entry:
|
||||
call i32 @llvm.atomic.swap.i32( i32* %val2, i32 1976 ) ; <i32>:15 [#uses=1]
|
||||
store i32 %15, i32* %old
|
||||
%neg1 = sub i32 0, 10 ; <i32> [#uses=1]
|
||||
call i32 @llvm.atomic.lcs.i32( i32* %val2, i32 %neg1, i32 1 ) ; <i32>:16 [#uses=1]
|
||||
call i32 @llvm.atomic.cmp.swap.i32( i32* %val2, i32 %neg1, i32 1 ) ; <i32>:16 [#uses=1]
|
||||
store i32 %16, i32* %old
|
||||
call i32 @llvm.atomic.lcs.i32( i32* %val2, i32 1976, i32 1 ) ; <i32>:17 [#uses=1]
|
||||
call i32 @llvm.atomic.cmp.swap.i32( i32* %val2, i32 1976, i32 1 ) ; <i32>:17 [#uses=1]
|
||||
store i32 %17, i32* %old
|
||||
ret void
|
||||
}
|
||||
|
||||
declare i32 @llvm.atomic.las.i32(i32*, i32) nounwind
|
||||
declare i32 @llvm.atomic.load.add.i32(i32*, i32) nounwind
|
||||
|
||||
declare i32 @llvm.atomic.lss.i32(i32*, i32) nounwind
|
||||
declare i32 @llvm.atomic.load.sub.i32(i32*, i32) nounwind
|
||||
|
||||
declare i32 @llvm.atomic.load.and.i32(i32*, i32) nounwind
|
||||
|
||||
@ -90,4 +90,4 @@ declare i32 @llvm.atomic.load.umin.i32(i32*, i32) nounwind
|
||||
|
||||
declare i32 @llvm.atomic.swap.i32(i32*, i32) nounwind
|
||||
|
||||
declare i32 @llvm.atomic.lcs.i32(i32*, i32, i32) nounwind
|
||||
declare i32 @llvm.atomic.cmp.swap.i32(i32*, i32, i32) nounwind
|
||||
|
@ -401,6 +401,8 @@ SDNodeInfo::SDNodeInfo(Record *R) : Def(R) {
|
||||
Properties |= 1 << SDNPMayLoad;
|
||||
} else if (PropList[i]->getName() == "SDNPSideEffect") {
|
||||
Properties |= 1 << SDNPSideEffect;
|
||||
} else if (PropList[i]->getName() == "SDNPMemOperand") {
|
||||
Properties |= 1 << SDNPMemOperand;
|
||||
} else {
|
||||
cerr << "Unknown SD Node property '" << PropList[i]->getName()
|
||||
<< "' on node '" << R->getName() << "'!\n";
|
||||
|
@ -366,6 +366,8 @@ ComplexPattern::ComplexPattern(Record *R) {
|
||||
Properties |= 1 << SDNPMayLoad;
|
||||
} else if (PropList[i]->getName() == "SDNPSideEffect") {
|
||||
Properties |= 1 << SDNPSideEffect;
|
||||
} else if (PropList[i]->getName() == "SDNPMemOperand") {
|
||||
Properties |= 1 << SDNPMemOperand;
|
||||
} else {
|
||||
cerr << "Unsupported SD Node property '" << PropList[i]->getName()
|
||||
<< "' on ComplexPattern '" << R->getName() << "'!\n";
|
||||
|
@ -30,6 +30,8 @@ struct CodeGenRegister;
|
||||
class CodeGenTarget;
|
||||
|
||||
// SelectionDAG node properties.
|
||||
// SDNPMemOperand: indicates that a node touches memory and therefore must
|
||||
// have an associated memory operand that describes the access.
|
||||
enum SDNP {
|
||||
SDNPCommutative,
|
||||
SDNPAssociative,
|
||||
@ -39,7 +41,8 @@ enum SDNP {
|
||||
SDNPOptInFlag,
|
||||
SDNPMayLoad,
|
||||
SDNPMayStore,
|
||||
SDNPSideEffect
|
||||
SDNPSideEffect,
|
||||
SDNPMemOperand
|
||||
};
|
||||
|
||||
// ComplexPattern attributes.
|
||||
|
@ -394,11 +394,8 @@ public:
|
||||
|
||||
// Save loads/stores matched by a pattern.
|
||||
if (!N->isLeaf() && N->getName().empty()) {
|
||||
std::string EnumName = N->getOperator()->getValueAsString("Opcode");
|
||||
if (EnumName == "ISD::LOAD" ||
|
||||
EnumName == "ISD::STORE") {
|
||||
if (NodeHasProperty(N, SDNPMemOperand, CGP))
|
||||
LSI.push_back(RootName);
|
||||
}
|
||||
}
|
||||
|
||||
bool isRoot = (P == NULL);
|
||||
@ -1082,7 +1079,7 @@ public:
|
||||
std::vector<std::string>::const_iterator mi, mie;
|
||||
for (mi = LSI.begin(), mie = LSI.end(); mi != mie; ++mi) {
|
||||
emitCode("SDOperand LSI_" + *mi + " = "
|
||||
"CurDAG->getMemOperand(cast<LSBaseSDNode>(" +
|
||||
"CurDAG->getMemOperand(cast<MemSDNode>(" +
|
||||
*mi + ")->getMemOperand());");
|
||||
if (IsVariadic)
|
||||
emitCode("Ops" + utostr(OpsNo) + ".push_back(LSI_" + *mi + ");");
|
||||
|
Loading…
x
Reference in New Issue
Block a user