mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-23 11:13:28 +01:00
Representation of 'atomic load' and 'atomic store' in IR.
llvm-svn: 137170
This commit is contained in:
parent
8ad37f68a2
commit
5a2d27800e
@ -3,6 +3,7 @@
|
||||
<html>
|
||||
<head>
|
||||
<title>LLVM Atomic Instructions and Concurrency Guide</title>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
|
||||
<link rel="stylesheet" href="llvm.css" type="text/css">
|
||||
</head>
|
||||
<body>
|
||||
|
@ -1583,8 +1583,10 @@ as if it writes to the relevant surrounding bytes.
|
||||
<div class="doc_text">
|
||||
|
||||
<p>Atomic instructions (<a href="#i_cmpxchg"><code>cmpxchg</code></a>,
|
||||
<a href="#i_atomicrmw"><code>atomicrmw</code></a>, and
|
||||
<a href="#i_fence"><code>fence</code></a>) take an ordering parameter
|
||||
<a href="#i_atomicrmw"><code>atomicrmw</code></a>,
|
||||
<a href="#i_fence"><code>fence</code></a>,
|
||||
<a href="#i_load"><code>atomic load</code></a>, and
|
||||
<a href="#i_load"><code>atomic store</code></a>) take an ordering parameter
|
||||
that determines which other atomic instructions on the same address they
|
||||
<i>synchronize with</i>. These semantics are borrowed from Java and C++0x,
|
||||
but are somewhat more colloquial. If these descriptions aren't precise enough,
|
||||
@ -1592,11 +1594,7 @@ check those specs. <a href="#i_fence"><code>fence</code></a> instructions
|
||||
treat these orderings somewhat differently since they don't take an address.
|
||||
See that instruction's documentation for details.</p>
|
||||
|
||||
<!-- FIXME Note atomic load+store here once those get added. -->
|
||||
|
||||
<dl>
|
||||
<!-- FIXME: unordered is intended to be used for atomic load and store;
|
||||
it isn't allowed for any instruction yet. -->
|
||||
<dt><code>unordered</code></dt>
|
||||
<dd>The set of values that can be read is governed by the happens-before
|
||||
partial order. A value cannot be read unless some operation wrote it.
|
||||
@ -4572,8 +4570,8 @@ that the invoke/unwind semantics are likely to change in future versions.</p>
|
||||
|
||||
<h5>Syntax:</h5>
|
||||
<pre>
|
||||
<result> = load <ty>* <pointer>[, align <alignment>][, !nontemporal !<index>]
|
||||
<result> = volatile load <ty>* <pointer>[, align <alignment>][, !nontemporal !<index>]
|
||||
<result> = [volatile] load <ty>* <pointer>[, align <alignment>][, !nontemporal !<index>]
|
||||
<result> = atomic [volatile] load <ty>* <pointer> [singlethread] <ordering>, align <alignment>
|
||||
!<index> = !{ i32 1 }
|
||||
</pre>
|
||||
|
||||
@ -4588,6 +4586,19 @@ that the invoke/unwind semantics are likely to change in future versions.</p>
|
||||
number or order of execution of this <tt>load</tt> with other <a
|
||||
href="#volatile">volatile operations</a>.</p>
|
||||
|
||||
<p>If the <code>load</code> is marked as <code>atomic</code>, it takes an extra
|
||||
<a href="#ordering">ordering</a> and optional <code>singlethread</code>
|
||||
argument. The <code>release</code> and <code>acq_rel</code> orderings are
|
||||
not valid on <code>load</code> instructions. Atomic loads produce <a
|
||||
href="#memorymodel">defined</a> results when they may see multiple atomic
|
||||
stores. The type of the pointee must be an integer type whose bit width
|
||||
is a power of two greater than or equal to eight and less than or equal
|
||||
to a target-specific size limit. <code>align</code> must be explicitly
|
||||
specified on atomic loads, and the load has undefined behavior if the
|
||||
alignment is not set to a value which is at least the size in bytes of
|
||||
the pointee. <code>!nontemporal</code> does not have any defined semantics
|
||||
for atomic loads.</p>
|
||||
|
||||
<p>The optional constant <tt>align</tt> argument specifies the alignment of the
|
||||
operation (that is, the alignment of the memory address). A value of 0 or an
|
||||
omitted <tt>align</tt> argument means that the operation has the preferential
|
||||
@ -4631,8 +4642,8 @@ that the invoke/unwind semantics are likely to change in future versions.</p>
|
||||
|
||||
<h5>Syntax:</h5>
|
||||
<pre>
|
||||
store <ty> <value>, <ty>* <pointer>[, align <alignment>][, !nontemporal !<index>] <i>; yields {void}</i>
|
||||
volatile store <ty> <value>, <ty>* <pointer>[, align <alignment>][, !nontemporal !<index>] <i>; yields {void}</i>
|
||||
[volatile] store <ty> <value>, <ty>* <pointer>[, align <alignment>][, !nontemporal !<index>] <i>; yields {void}</i>
|
||||
atomic [volatile] store <ty> <value>, <ty>* <pointer> [singlethread] <ordering>, align <alignment> <i>; yields {void}</i>
|
||||
</pre>
|
||||
|
||||
<h5>Overview:</h5>
|
||||
@ -4648,6 +4659,19 @@ that the invoke/unwind semantics are likely to change in future versions.</p>
|
||||
order of execution of this <tt>store</tt> with other <a
|
||||
href="#volatile">volatile operations</a>.</p>
|
||||
|
||||
<p>If the <code>store</code> is marked as <code>atomic</code>, it takes an extra
|
||||
<a href="#ordering">ordering</a> and optional <code>singlethread</code>
|
||||
argument. The <code>acquire</code> and <code>acq_rel</code> orderings aren't
|
||||
valid on <code>store</code> instructions. Atomic loads produce <a
|
||||
href="#memorymodel">defined</a> results when they may see multiple atomic
|
||||
stores. The type of the pointee must be an integer type whose bit width
|
||||
is a power of two greater than or equal to eight and less than or equal
|
||||
to a target-specific size limit. <code>align</code> must be explicitly
|
||||
specified on atomic stores, and the store has undefined behavior if the
|
||||
alignment is not set to a value which is at least the size in bytes of
|
||||
the pointee. <code>!nontemporal</code> does not have any defined semantics
|
||||
for atomic stores.</p>
|
||||
|
||||
<p>The optional constant "align" argument specifies the alignment of the
|
||||
operation (that is, the alignment of the memory address). A value of 0 or an
|
||||
omitted "align" argument means that the operation has the preferential
|
||||
@ -4730,9 +4754,6 @@ operations and/or fences.</p>
|
||||
specifies that the fence only synchronizes with other fences in the same
|
||||
thread. (This is useful for interacting with signal handlers.)</p>
|
||||
|
||||
<p>FIXME: This instruction is a work in progress; until it is finished, use
|
||||
llvm.memory.barrier.
|
||||
|
||||
<h5>Example:</h5>
|
||||
<pre>
|
||||
fence acquire <i>; yields {void}</i>
|
||||
|
@ -307,7 +307,11 @@ namespace bitc {
|
||||
FUNC_CODE_INST_ATOMICRMW = 38, // ATOMICRMW: [ptrty,ptr,val, operation,
|
||||
// align, vol,
|
||||
// ordering, synchscope]
|
||||
FUNC_CODE_INST_RESUME = 39 // RESUME: [opval]
|
||||
FUNC_CODE_INST_RESUME = 39, // RESUME: [opval]
|
||||
FUNC_CODE_INST_LOADATOMIC = 40, // LOAD: [opty, op, align, vol,
|
||||
// ordering, synchscope]
|
||||
FUNC_CODE_INST_STOREATOMIC = 41 // STORE: [ptrty,ptr,val, align, vol
|
||||
// ordering, synchscope]
|
||||
};
|
||||
} // End bitc namespace
|
||||
} // End llvm namespace
|
||||
|
@ -142,12 +142,20 @@ public:
|
||||
LoadInst(Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
|
||||
LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile = false,
|
||||
Instruction *InsertBefore = 0);
|
||||
LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
|
||||
unsigned Align, Instruction *InsertBefore = 0);
|
||||
LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
|
||||
BasicBlock *InsertAtEnd);
|
||||
LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
|
||||
unsigned Align, Instruction *InsertBefore = 0);
|
||||
LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
|
||||
unsigned Align, BasicBlock *InsertAtEnd);
|
||||
LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
|
||||
unsigned Align, AtomicOrdering Order,
|
||||
SynchronizationScope SynchScope = CrossThread,
|
||||
Instruction *InsertBefore = 0);
|
||||
LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
|
||||
unsigned Align, AtomicOrdering Order,
|
||||
SynchronizationScope SynchScope,
|
||||
BasicBlock *InsertAtEnd);
|
||||
|
||||
LoadInst(Value *Ptr, const char *NameStr, Instruction *InsertBefore);
|
||||
LoadInst(Value *Ptr, const char *NameStr, BasicBlock *InsertAtEnd);
|
||||
@ -171,11 +179,47 @@ public:
|
||||
/// getAlignment - Return the alignment of the access that is being performed
|
||||
///
|
||||
unsigned getAlignment() const {
|
||||
return (1 << (getSubclassDataFromInstruction() >> 1)) >> 1;
|
||||
return (1 << ((getSubclassDataFromInstruction() >> 1) & 31)) >> 1;
|
||||
}
|
||||
|
||||
void setAlignment(unsigned Align);
|
||||
|
||||
/// Returns the ordering effect of this fence.
|
||||
AtomicOrdering getOrdering() const {
|
||||
return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7);
|
||||
}
|
||||
|
||||
/// Set the ordering constraint on this load. May not be Release or
|
||||
/// AcquireRelease.
|
||||
void setOrdering(AtomicOrdering Ordering) {
|
||||
setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
|
||||
(Ordering << 7));
|
||||
}
|
||||
|
||||
SynchronizationScope getSynchScope() const {
|
||||
return SynchronizationScope((getSubclassDataFromInstruction() >> 6) & 1);
|
||||
}
|
||||
|
||||
/// Specify whether this load is ordered with respect to all
|
||||
/// concurrently executing threads, or only with respect to signal handlers
|
||||
/// executing in the same thread.
|
||||
void setSynchScope(SynchronizationScope xthread) {
|
||||
setInstructionSubclassData((getSubclassDataFromInstruction() & ~(1 << 6)) |
|
||||
(xthread << 6));
|
||||
}
|
||||
|
||||
bool isAtomic() const { return getOrdering() != NotAtomic; }
|
||||
void setAtomic(AtomicOrdering Ordering,
|
||||
SynchronizationScope SynchScope = CrossThread) {
|
||||
setOrdering(Ordering);
|
||||
setSynchScope(SynchScope);
|
||||
}
|
||||
|
||||
bool isSimple() const { return !isAtomic() && !isVolatile(); }
|
||||
bool isUnordered() const {
|
||||
return getOrdering() <= Unordered && !isVolatile();
|
||||
}
|
||||
|
||||
Value *getPointerOperand() { return getOperand(0); }
|
||||
const Value *getPointerOperand() const { return getOperand(0); }
|
||||
static unsigned getPointerOperandIndex() { return 0U; }
|
||||
@ -222,19 +266,27 @@ public:
|
||||
StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
|
||||
StoreInst(Value *Val, Value *Ptr, bool isVolatile = false,
|
||||
Instruction *InsertBefore = 0);
|
||||
StoreInst(Value *Val, Value *Ptr, bool isVolatile,
|
||||
unsigned Align, Instruction *InsertBefore = 0);
|
||||
StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
|
||||
StoreInst(Value *Val, Value *Ptr, bool isVolatile,
|
||||
unsigned Align, Instruction *InsertBefore = 0);
|
||||
StoreInst(Value *Val, Value *Ptr, bool isVolatile,
|
||||
unsigned Align, BasicBlock *InsertAtEnd);
|
||||
StoreInst(Value *Val, Value *Ptr, bool isVolatile,
|
||||
unsigned Align, AtomicOrdering Order,
|
||||
SynchronizationScope SynchScope = CrossThread,
|
||||
Instruction *InsertBefore = 0);
|
||||
StoreInst(Value *Val, Value *Ptr, bool isVolatile,
|
||||
unsigned Align, AtomicOrdering Order,
|
||||
SynchronizationScope SynchScope,
|
||||
BasicBlock *InsertAtEnd);
|
||||
|
||||
|
||||
|
||||
/// isVolatile - Return true if this is a load from a volatile memory
|
||||
/// isVolatile - Return true if this is a store to a volatile memory
|
||||
/// location.
|
||||
///
|
||||
bool isVolatile() const { return getSubclassDataFromInstruction() & 1; }
|
||||
|
||||
/// setVolatile - Specify whether this is a volatile load or not.
|
||||
/// setVolatile - Specify whether this is a volatile store or not.
|
||||
///
|
||||
void setVolatile(bool V) {
|
||||
setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
|
||||
@ -247,11 +299,47 @@ public:
|
||||
/// getAlignment - Return the alignment of the access that is being performed
|
||||
///
|
||||
unsigned getAlignment() const {
|
||||
return (1 << (getSubclassDataFromInstruction() >> 1)) >> 1;
|
||||
return (1 << ((getSubclassDataFromInstruction() >> 1) & 31)) >> 1;
|
||||
}
|
||||
|
||||
void setAlignment(unsigned Align);
|
||||
|
||||
/// Returns the ordering effect of this store.
|
||||
AtomicOrdering getOrdering() const {
|
||||
return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7);
|
||||
}
|
||||
|
||||
/// Set the ordering constraint on this store. May not be Acquire or
|
||||
/// AcquireRelease.
|
||||
void setOrdering(AtomicOrdering Ordering) {
|
||||
setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
|
||||
(Ordering << 7));
|
||||
}
|
||||
|
||||
SynchronizationScope getSynchScope() const {
|
||||
return SynchronizationScope((getSubclassDataFromInstruction() >> 6) & 1);
|
||||
}
|
||||
|
||||
/// Specify whether this store instruction is ordered with respect to all
|
||||
/// concurrently executing threads, or only with respect to signal handlers
|
||||
/// executing in the same thread.
|
||||
void setSynchScope(SynchronizationScope xthread) {
|
||||
setInstructionSubclassData((getSubclassDataFromInstruction() & ~(1 << 6)) |
|
||||
(xthread << 6));
|
||||
}
|
||||
|
||||
bool isAtomic() const { return getOrdering() != NotAtomic; }
|
||||
void setAtomic(AtomicOrdering Ordering,
|
||||
SynchronizationScope SynchScope = CrossThread) {
|
||||
setOrdering(Ordering);
|
||||
setSynchScope(SynchScope);
|
||||
}
|
||||
|
||||
bool isSimple() const { return !isAtomic() && !isVolatile(); }
|
||||
bool isUnordered() const {
|
||||
return getOrdering() <= Unordered && !isVolatile();
|
||||
}
|
||||
|
||||
Value *getValueOperand() { return getOperand(0); }
|
||||
const Value *getValueOperand() const { return getOperand(0); }
|
||||
|
||||
@ -319,18 +407,8 @@ public:
|
||||
/// Set the ordering constraint on this fence. May only be Acquire, Release,
|
||||
/// AcquireRelease, or SequentiallyConsistent.
|
||||
void setOrdering(AtomicOrdering Ordering) {
|
||||
switch (Ordering) {
|
||||
case Acquire:
|
||||
case Release:
|
||||
case AcquireRelease:
|
||||
case SequentiallyConsistent:
|
||||
setInstructionSubclassData((getSubclassDataFromInstruction() & 1) |
|
||||
(Ordering << 1));
|
||||
return;
|
||||
default:
|
||||
llvm_unreachable("FenceInst ordering must be Acquire, Release,"
|
||||
" AcquireRelease, or SequentiallyConsistent");
|
||||
}
|
||||
setInstructionSubclassData((getSubclassDataFromInstruction() & 1) |
|
||||
(Ordering << 1));
|
||||
}
|
||||
|
||||
SynchronizationScope getSynchScope() const {
|
||||
@ -555,7 +633,7 @@ public:
|
||||
void setOrdering(AtomicOrdering Ordering) {
|
||||
assert(Ordering != NotAtomic &&
|
||||
"atomicrmw instructions can only be atomic.");
|
||||
setInstructionSubclassData((getSubclassDataFromInstruction() & ~28) |
|
||||
setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 2)) |
|
||||
(Ordering << 2));
|
||||
}
|
||||
|
||||
@ -569,7 +647,7 @@ public:
|
||||
|
||||
/// Returns the ordering constraint on this RMW.
|
||||
AtomicOrdering getOrdering() const {
|
||||
return AtomicOrdering((getSubclassDataFromInstruction() & 28) >> 2);
|
||||
return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7);
|
||||
}
|
||||
|
||||
/// Returns whether this RMW is atomic between threads or only within a
|
||||
|
@ -2949,16 +2949,23 @@ int LLParser::ParseInstruction(Instruction *&Inst, BasicBlock *BB,
|
||||
case lltok::kw_tail: return ParseCall(Inst, PFS, true);
|
||||
// Memory.
|
||||
case lltok::kw_alloca: return ParseAlloc(Inst, PFS);
|
||||
case lltok::kw_load: return ParseLoad(Inst, PFS, false);
|
||||
case lltok::kw_store: return ParseStore(Inst, PFS, false);
|
||||
case lltok::kw_load: return ParseLoad(Inst, PFS, false, false);
|
||||
case lltok::kw_store: return ParseStore(Inst, PFS, false, false);
|
||||
case lltok::kw_cmpxchg: return ParseCmpXchg(Inst, PFS, false);
|
||||
case lltok::kw_atomicrmw: return ParseAtomicRMW(Inst, PFS, false);
|
||||
case lltok::kw_fence: return ParseFence(Inst, PFS);
|
||||
case lltok::kw_atomic: {
|
||||
bool isVolatile = EatIfPresent(lltok::kw_volatile);
|
||||
if (EatIfPresent(lltok::kw_load))
|
||||
return ParseLoad(Inst, PFS, true, isVolatile);
|
||||
else if (EatIfPresent(lltok::kw_store))
|
||||
return ParseStore(Inst, PFS, true, isVolatile);
|
||||
}
|
||||
case lltok::kw_volatile:
|
||||
if (EatIfPresent(lltok::kw_load))
|
||||
return ParseLoad(Inst, PFS, true);
|
||||
return ParseLoad(Inst, PFS, false, true);
|
||||
else if (EatIfPresent(lltok::kw_store))
|
||||
return ParseStore(Inst, PFS, true);
|
||||
return ParseStore(Inst, PFS, false, true);
|
||||
else if (EatIfPresent(lltok::kw_cmpxchg))
|
||||
return ParseCmpXchg(Inst, PFS, true);
|
||||
else if (EatIfPresent(lltok::kw_atomicrmw))
|
||||
@ -3635,34 +3642,48 @@ int LLParser::ParseAlloc(Instruction *&Inst, PerFunctionState &PFS) {
|
||||
}
|
||||
|
||||
/// ParseLoad
|
||||
/// ::= 'volatile'? 'load' TypeAndValue (',' OptionalInfo)?
|
||||
/// ::= 'volatile'? 'load' TypeAndValue (',' 'align' i32)?
|
||||
// ::= 'atomic' 'volatile'? 'load' TypeAndValue
|
||||
// 'singlethread'? AtomicOrdering (',' 'align' i32)?
|
||||
int LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS,
|
||||
bool isVolatile) {
|
||||
bool isAtomic, bool isVolatile) {
|
||||
Value *Val; LocTy Loc;
|
||||
unsigned Alignment = 0;
|
||||
bool AteExtraComma = false;
|
||||
AtomicOrdering Ordering = NotAtomic;
|
||||
SynchronizationScope Scope = CrossThread;
|
||||
if (ParseTypeAndValue(Val, Loc, PFS) ||
|
||||
ParseScopeAndOrdering(isAtomic, Scope, Ordering) ||
|
||||
ParseOptionalCommaAlign(Alignment, AteExtraComma))
|
||||
return true;
|
||||
|
||||
if (!Val->getType()->isPointerTy() ||
|
||||
!cast<PointerType>(Val->getType())->getElementType()->isFirstClassType())
|
||||
return Error(Loc, "load operand must be a pointer to a first class type");
|
||||
if (isAtomic && !Alignment)
|
||||
return Error(Loc, "atomic load must have explicit non-zero alignment");
|
||||
if (Ordering == Release || Ordering == AcquireRelease)
|
||||
return Error(Loc, "atomic load cannot use Release ordering");
|
||||
|
||||
Inst = new LoadInst(Val, "", isVolatile, Alignment);
|
||||
Inst = new LoadInst(Val, "", isVolatile, Alignment, Ordering, Scope);
|
||||
return AteExtraComma ? InstExtraComma : InstNormal;
|
||||
}
|
||||
|
||||
/// ParseStore
|
||||
/// ::= 'volatile'? 'store' TypeAndValue ',' TypeAndValue (',' 'align' i32)?
|
||||
/// ::= 'atomic' 'volatile'? 'store' TypeAndValue ',' TypeAndValue
|
||||
/// 'singlethread'? AtomicOrdering (',' 'align' i32)?
|
||||
int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS,
|
||||
bool isVolatile) {
|
||||
bool isAtomic, bool isVolatile) {
|
||||
Value *Val, *Ptr; LocTy Loc, PtrLoc;
|
||||
unsigned Alignment = 0;
|
||||
bool AteExtraComma = false;
|
||||
AtomicOrdering Ordering = NotAtomic;
|
||||
SynchronizationScope Scope = CrossThread;
|
||||
if (ParseTypeAndValue(Val, Loc, PFS) ||
|
||||
ParseToken(lltok::comma, "expected ',' after store operand") ||
|
||||
ParseTypeAndValue(Ptr, PtrLoc, PFS) ||
|
||||
ParseScopeAndOrdering(isAtomic, Scope, Ordering) ||
|
||||
ParseOptionalCommaAlign(Alignment, AteExtraComma))
|
||||
return true;
|
||||
|
||||
@ -3672,8 +3693,12 @@ int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS,
|
||||
return Error(Loc, "store operand must be a first class value");
|
||||
if (cast<PointerType>(Ptr->getType())->getElementType() != Val->getType())
|
||||
return Error(Loc, "stored value and pointer type do not match");
|
||||
if (isAtomic && !Alignment)
|
||||
return Error(Loc, "atomic store must have explicit non-zero alignment");
|
||||
if (Ordering == Acquire || Ordering == AcquireRelease)
|
||||
return Error(Loc, "atomic store cannot use Acquire ordering");
|
||||
|
||||
Inst = new StoreInst(Val, Ptr, isVolatile, Alignment);
|
||||
Inst = new StoreInst(Val, Ptr, isVolatile, Alignment, Ordering, Scope);
|
||||
return AteExtraComma ? InstExtraComma : InstNormal;
|
||||
}
|
||||
|
||||
|
@ -362,8 +362,10 @@ namespace llvm {
|
||||
int ParsePHI(Instruction *&I, PerFunctionState &PFS);
|
||||
bool ParseCall(Instruction *&I, PerFunctionState &PFS, bool isTail);
|
||||
int ParseAlloc(Instruction *&I, PerFunctionState &PFS);
|
||||
int ParseLoad(Instruction *&I, PerFunctionState &PFS, bool isVolatile);
|
||||
int ParseStore(Instruction *&I, PerFunctionState &PFS, bool isVolatile);
|
||||
int ParseLoad(Instruction *&I, PerFunctionState &PFS,
|
||||
bool isAtomic, bool isVolatile);
|
||||
int ParseStore(Instruction *&I, PerFunctionState &PFS,
|
||||
bool isAtomic, bool isVolatile);
|
||||
int ParseCmpXchg(Instruction *&I, PerFunctionState &PFS, bool isVolatile);
|
||||
int ParseAtomicRMW(Instruction *&I, PerFunctionState &PFS, bool isVolatile);
|
||||
int ParseFence(Instruction *&I, PerFunctionState &PFS);
|
||||
|
@ -2567,6 +2567,28 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
|
||||
InstructionList.push_back(I);
|
||||
break;
|
||||
}
|
||||
case bitc::FUNC_CODE_INST_LOADATOMIC: {
|
||||
// LOADATOMIC: [opty, op, align, vol, ordering, synchscope]
|
||||
unsigned OpNum = 0;
|
||||
Value *Op;
|
||||
if (getValueTypePair(Record, OpNum, NextValueNo, Op) ||
|
||||
OpNum+4 != Record.size())
|
||||
return Error("Invalid LOADATOMIC record");
|
||||
|
||||
|
||||
AtomicOrdering Ordering = GetDecodedOrdering(Record[OpNum+2]);
|
||||
if (Ordering == NotAtomic || Ordering == Release ||
|
||||
Ordering == AcquireRelease)
|
||||
return Error("Invalid LOADATOMIC record");
|
||||
if (Ordering != NotAtomic && Record[OpNum] == 0)
|
||||
return Error("Invalid LOADATOMIC record");
|
||||
SynchronizationScope SynchScope = GetDecodedSynchScope(Record[OpNum+3]);
|
||||
|
||||
I = new LoadInst(Op, "", Record[OpNum+1], (1 << Record[OpNum]) >> 1,
|
||||
Ordering, SynchScope);
|
||||
InstructionList.push_back(I);
|
||||
break;
|
||||
}
|
||||
case bitc::FUNC_CODE_INST_STORE: { // STORE2:[ptrty, ptr, val, align, vol]
|
||||
unsigned OpNum = 0;
|
||||
Value *Val, *Ptr;
|
||||
@ -2580,6 +2602,29 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
|
||||
InstructionList.push_back(I);
|
||||
break;
|
||||
}
|
||||
case bitc::FUNC_CODE_INST_STOREATOMIC: {
|
||||
// STOREATOMIC: [ptrty, ptr, val, align, vol, ordering, synchscope]
|
||||
unsigned OpNum = 0;
|
||||
Value *Val, *Ptr;
|
||||
if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) ||
|
||||
getValue(Record, OpNum,
|
||||
cast<PointerType>(Ptr->getType())->getElementType(), Val) ||
|
||||
OpNum+4 != Record.size())
|
||||
return Error("Invalid STOREATOMIC record");
|
||||
|
||||
AtomicOrdering Ordering = GetDecodedOrdering(Record[OpNum+2]);
|
||||
if (Ordering == NotAtomic || Ordering == Release ||
|
||||
Ordering == AcquireRelease)
|
||||
return Error("Invalid STOREATOMIC record");
|
||||
SynchronizationScope SynchScope = GetDecodedSynchScope(Record[OpNum+3]);
|
||||
if (Ordering != NotAtomic && Record[OpNum] == 0)
|
||||
return Error("Invalid STOREATOMIC record");
|
||||
|
||||
I = new StoreInst(Val, Ptr, Record[OpNum+1], (1 << Record[OpNum]) >> 1,
|
||||
Ordering, SynchScope);
|
||||
InstructionList.push_back(I);
|
||||
break;
|
||||
}
|
||||
case bitc::FUNC_CODE_INST_CMPXCHG: {
|
||||
// CMPXCHG:[ptrty, ptr, cmp, new, vol, ordering, synchscope]
|
||||
unsigned OpNum = 0;
|
||||
@ -2592,7 +2637,7 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
|
||||
OpNum+3 != Record.size())
|
||||
return Error("Invalid CMPXCHG record");
|
||||
AtomicOrdering Ordering = GetDecodedOrdering(Record[OpNum+1]);
|
||||
if (Ordering == NotAtomic)
|
||||
if (Ordering == NotAtomic || Ordering == Unordered)
|
||||
return Error("Invalid CMPXCHG record");
|
||||
SynchronizationScope SynchScope = GetDecodedSynchScope(Record[OpNum+2]);
|
||||
I = new AtomicCmpXchgInst(Ptr, Cmp, New, Ordering, SynchScope);
|
||||
@ -2614,7 +2659,7 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
|
||||
Operation > AtomicRMWInst::LAST_BINOP)
|
||||
return Error("Invalid ATOMICRMW record");
|
||||
AtomicOrdering Ordering = GetDecodedOrdering(Record[OpNum+2]);
|
||||
if (Ordering == NotAtomic)
|
||||
if (Ordering == NotAtomic || Ordering == Unordered)
|
||||
return Error("Invalid ATOMICRMW record");
|
||||
SynchronizationScope SynchScope = GetDecodedSynchScope(Record[OpNum+3]);
|
||||
I = new AtomicRMWInst(Operation, Ptr, Val, Ordering, SynchScope);
|
||||
|
@ -1175,19 +1175,34 @@ static void WriteInstruction(const Instruction &I, unsigned InstID,
|
||||
break;
|
||||
|
||||
case Instruction::Load:
|
||||
Code = bitc::FUNC_CODE_INST_LOAD;
|
||||
if (!PushValueAndType(I.getOperand(0), InstID, Vals, VE)) // ptr
|
||||
AbbrevToUse = FUNCTION_INST_LOAD_ABBREV;
|
||||
|
||||
if (cast<LoadInst>(I).isAtomic()) {
|
||||
Code = bitc::FUNC_CODE_INST_LOADATOMIC;
|
||||
PushValueAndType(I.getOperand(0), InstID, Vals, VE);
|
||||
} else {
|
||||
Code = bitc::FUNC_CODE_INST_LOAD;
|
||||
if (!PushValueAndType(I.getOperand(0), InstID, Vals, VE)) // ptr
|
||||
AbbrevToUse = FUNCTION_INST_LOAD_ABBREV;
|
||||
}
|
||||
Vals.push_back(Log2_32(cast<LoadInst>(I).getAlignment())+1);
|
||||
Vals.push_back(cast<LoadInst>(I).isVolatile());
|
||||
if (cast<LoadInst>(I).isAtomic()) {
|
||||
Vals.push_back(GetEncodedOrdering(cast<LoadInst>(I).getOrdering()));
|
||||
Vals.push_back(GetEncodedSynchScope(cast<LoadInst>(I).getSynchScope()));
|
||||
}
|
||||
break;
|
||||
case Instruction::Store:
|
||||
Code = bitc::FUNC_CODE_INST_STORE;
|
||||
if (cast<StoreInst>(I).isAtomic())
|
||||
Code = bitc::FUNC_CODE_INST_STOREATOMIC;
|
||||
else
|
||||
Code = bitc::FUNC_CODE_INST_STORE;
|
||||
PushValueAndType(I.getOperand(1), InstID, Vals, VE); // ptrty + ptr
|
||||
Vals.push_back(VE.getValueID(I.getOperand(0))); // val.
|
||||
Vals.push_back(Log2_32(cast<StoreInst>(I).getAlignment())+1);
|
||||
Vals.push_back(cast<StoreInst>(I).isVolatile());
|
||||
if (cast<StoreInst>(I).isAtomic()) {
|
||||
Vals.push_back(GetEncodedOrdering(cast<StoreInst>(I).getOrdering()));
|
||||
Vals.push_back(GetEncodedSynchScope(cast<StoreInst>(I).getSynchScope()));
|
||||
}
|
||||
break;
|
||||
case Instruction::AtomicCmpXchg:
|
||||
Code = bitc::FUNC_CODE_INST_CMPXCHG;
|
||||
|
@ -190,6 +190,16 @@ static bool LowerFenceInst(FenceInst *FI) {
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool LowerLoadInst(LoadInst *LI) {
|
||||
LI->setAtomic(NotAtomic);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool LowerStoreInst(StoreInst *SI) {
|
||||
SI->setAtomic(NotAtomic);
|
||||
return true;
|
||||
}
|
||||
|
||||
namespace {
|
||||
struct LowerAtomic : public BasicBlockPass {
|
||||
static char ID;
|
||||
@ -208,6 +218,13 @@ namespace {
|
||||
Changed |= LowerAtomicCmpXchgInst(CXI);
|
||||
else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(Inst))
|
||||
Changed |= LowerAtomicRMWInst(RMWI);
|
||||
else if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
|
||||
if (LI->isAtomic())
|
||||
LowerLoadInst(LI);
|
||||
} else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
|
||||
if (SI->isAtomic())
|
||||
LowerStoreInst(SI);
|
||||
}
|
||||
}
|
||||
return Changed;
|
||||
}
|
||||
|
@ -1659,14 +1659,18 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
|
||||
Out << '%' << SlotNum << " = ";
|
||||
}
|
||||
|
||||
// If this is an atomic load or store, print out the atomic marker.
|
||||
if ((isa<LoadInst>(I) && cast<LoadInst>(I).isAtomic()) ||
|
||||
(isa<StoreInst>(I) && cast<StoreInst>(I).isAtomic()))
|
||||
Out << "atomic ";
|
||||
|
||||
// If this is a volatile load or store, print out the volatile marker.
|
||||
if ((isa<LoadInst>(I) && cast<LoadInst>(I).isVolatile()) ||
|
||||
(isa<StoreInst>(I) && cast<StoreInst>(I).isVolatile())) {
|
||||
Out << "volatile ";
|
||||
} else if (isa<CallInst>(I) && cast<CallInst>(I).isTailCall()) {
|
||||
// If this is a call, check if it's a tail call.
|
||||
(isa<StoreInst>(I) && cast<StoreInst>(I).isVolatile()))
|
||||
Out << "volatile ";
|
||||
|
||||
if (isa<CallInst>(I) && cast<CallInst>(I).isTailCall())
|
||||
Out << "tail ";
|
||||
}
|
||||
|
||||
// Print out the opcode...
|
||||
Out << I.getOpcodeName();
|
||||
@ -1913,11 +1917,17 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
|
||||
}
|
||||
}
|
||||
|
||||
// Print post operand alignment for load/store.
|
||||
if (isa<LoadInst>(I) && cast<LoadInst>(I).getAlignment()) {
|
||||
Out << ", align " << cast<LoadInst>(I).getAlignment();
|
||||
} else if (isa<StoreInst>(I) && cast<StoreInst>(I).getAlignment()) {
|
||||
Out << ", align " << cast<StoreInst>(I).getAlignment();
|
||||
// Print atomic ordering/alignment for memory operations
|
||||
if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
|
||||
if (LI->isAtomic())
|
||||
writeAtomic(LI->getOrdering(), LI->getSynchScope());
|
||||
if (LI->getAlignment())
|
||||
Out << ", align " << LI->getAlignment();
|
||||
} else if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
|
||||
if (SI->isAtomic())
|
||||
writeAtomic(SI->getOrdering(), SI->getSynchScope());
|
||||
if (SI->getAlignment())
|
||||
Out << ", align " << SI->getAlignment();
|
||||
} else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(&I)) {
|
||||
writeAtomic(CXI->getOrdering(), CXI->getSynchScope());
|
||||
} else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(&I)) {
|
||||
|
@ -822,6 +822,8 @@ bool AllocaInst::isStaticAlloca() const {
|
||||
void LoadInst::AssertOK() {
|
||||
assert(getOperand(0)->getType()->isPointerTy() &&
|
||||
"Ptr must have pointer type.");
|
||||
assert(!(isAtomic() && getAlignment() == 0) &&
|
||||
"Alignment required for atomic load");
|
||||
}
|
||||
|
||||
LoadInst::LoadInst(Value *Ptr, const Twine &Name, Instruction *InsertBef)
|
||||
@ -829,6 +831,7 @@ LoadInst::LoadInst(Value *Ptr, const Twine &Name, Instruction *InsertBef)
|
||||
Load, Ptr, InsertBef) {
|
||||
setVolatile(false);
|
||||
setAlignment(0);
|
||||
setAtomic(NotAtomic);
|
||||
AssertOK();
|
||||
setName(Name);
|
||||
}
|
||||
@ -838,6 +841,7 @@ LoadInst::LoadInst(Value *Ptr, const Twine &Name, BasicBlock *InsertAE)
|
||||
Load, Ptr, InsertAE) {
|
||||
setVolatile(false);
|
||||
setAlignment(0);
|
||||
setAtomic(NotAtomic);
|
||||
AssertOK();
|
||||
setName(Name);
|
||||
}
|
||||
@ -848,6 +852,18 @@ LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
|
||||
Load, Ptr, InsertBef) {
|
||||
setVolatile(isVolatile);
|
||||
setAlignment(0);
|
||||
setAtomic(NotAtomic);
|
||||
AssertOK();
|
||||
setName(Name);
|
||||
}
|
||||
|
||||
LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
|
||||
BasicBlock *InsertAE)
|
||||
: UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
|
||||
Load, Ptr, InsertAE) {
|
||||
setVolatile(isVolatile);
|
||||
setAlignment(0);
|
||||
setAtomic(NotAtomic);
|
||||
AssertOK();
|
||||
setName(Name);
|
||||
}
|
||||
@ -858,6 +874,7 @@ LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
|
||||
Load, Ptr, InsertBef) {
|
||||
setVolatile(isVolatile);
|
||||
setAlignment(Align);
|
||||
setAtomic(NotAtomic);
|
||||
AssertOK();
|
||||
setName(Name);
|
||||
}
|
||||
@ -868,27 +885,43 @@ LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
|
||||
Load, Ptr, InsertAE) {
|
||||
setVolatile(isVolatile);
|
||||
setAlignment(Align);
|
||||
setAtomic(NotAtomic);
|
||||
AssertOK();
|
||||
setName(Name);
|
||||
}
|
||||
|
||||
LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
|
||||
LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
|
||||
unsigned Align, AtomicOrdering Order,
|
||||
SynchronizationScope SynchScope,
|
||||
Instruction *InsertBef)
|
||||
: UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
|
||||
Load, Ptr, InsertBef) {
|
||||
setVolatile(isVolatile);
|
||||
setAlignment(Align);
|
||||
setAtomic(Order, SynchScope);
|
||||
AssertOK();
|
||||
setName(Name);
|
||||
}
|
||||
|
||||
LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
|
||||
unsigned Align, AtomicOrdering Order,
|
||||
SynchronizationScope SynchScope,
|
||||
BasicBlock *InsertAE)
|
||||
: UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
|
||||
Load, Ptr, InsertAE) {
|
||||
setVolatile(isVolatile);
|
||||
setAlignment(0);
|
||||
setAlignment(Align);
|
||||
setAtomic(Order, SynchScope);
|
||||
AssertOK();
|
||||
setName(Name);
|
||||
}
|
||||
|
||||
|
||||
|
||||
LoadInst::LoadInst(Value *Ptr, const char *Name, Instruction *InsertBef)
|
||||
: UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
|
||||
Load, Ptr, InsertBef) {
|
||||
setVolatile(false);
|
||||
setAlignment(0);
|
||||
setAtomic(NotAtomic);
|
||||
AssertOK();
|
||||
if (Name && Name[0]) setName(Name);
|
||||
}
|
||||
@ -898,6 +931,7 @@ LoadInst::LoadInst(Value *Ptr, const char *Name, BasicBlock *InsertAE)
|
||||
Load, Ptr, InsertAE) {
|
||||
setVolatile(false);
|
||||
setAlignment(0);
|
||||
setAtomic(NotAtomic);
|
||||
AssertOK();
|
||||
if (Name && Name[0]) setName(Name);
|
||||
}
|
||||
@ -908,6 +942,7 @@ LoadInst::LoadInst(Value *Ptr, const char *Name, bool isVolatile,
|
||||
Load, Ptr, InsertBef) {
|
||||
setVolatile(isVolatile);
|
||||
setAlignment(0);
|
||||
setAtomic(NotAtomic);
|
||||
AssertOK();
|
||||
if (Name && Name[0]) setName(Name);
|
||||
}
|
||||
@ -918,6 +953,7 @@ LoadInst::LoadInst(Value *Ptr, const char *Name, bool isVolatile,
|
||||
Load, Ptr, InsertAE) {
|
||||
setVolatile(isVolatile);
|
||||
setAlignment(0);
|
||||
setAtomic(NotAtomic);
|
||||
AssertOK();
|
||||
if (Name && Name[0]) setName(Name);
|
||||
}
|
||||
@ -926,7 +962,7 @@ void LoadInst::setAlignment(unsigned Align) {
|
||||
assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
|
||||
assert(Align <= MaximumAlignment &&
|
||||
"Alignment is greater than MaximumAlignment!");
|
||||
setInstructionSubclassData((getSubclassDataFromInstruction() & 1) |
|
||||
setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) |
|
||||
((Log2_32(Align)+1)<<1));
|
||||
assert(getAlignment() == Align && "Alignment representation error!");
|
||||
}
|
||||
@ -942,6 +978,8 @@ void StoreInst::AssertOK() {
|
||||
assert(getOperand(0)->getType() ==
|
||||
cast<PointerType>(getOperand(1)->getType())->getElementType()
|
||||
&& "Ptr must be a pointer to Val type!");
|
||||
assert(!(isAtomic() && getAlignment() == 0) &&
|
||||
"Alignment required for atomic load");
|
||||
}
|
||||
|
||||
|
||||
@ -954,6 +992,7 @@ StoreInst::StoreInst(Value *val, Value *addr, Instruction *InsertBefore)
|
||||
Op<1>() = addr;
|
||||
setVolatile(false);
|
||||
setAlignment(0);
|
||||
setAtomic(NotAtomic);
|
||||
AssertOK();
|
||||
}
|
||||
|
||||
@ -966,6 +1005,7 @@ StoreInst::StoreInst(Value *val, Value *addr, BasicBlock *InsertAtEnd)
|
||||
Op<1>() = addr;
|
||||
setVolatile(false);
|
||||
setAlignment(0);
|
||||
setAtomic(NotAtomic);
|
||||
AssertOK();
|
||||
}
|
||||
|
||||
@ -979,6 +1019,7 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
|
||||
Op<1>() = addr;
|
||||
setVolatile(isVolatile);
|
||||
setAlignment(0);
|
||||
setAtomic(NotAtomic);
|
||||
AssertOK();
|
||||
}
|
||||
|
||||
@ -992,19 +1033,23 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
|
||||
Op<1>() = addr;
|
||||
setVolatile(isVolatile);
|
||||
setAlignment(Align);
|
||||
setAtomic(NotAtomic);
|
||||
AssertOK();
|
||||
}
|
||||
|
||||
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
|
||||
unsigned Align, BasicBlock *InsertAtEnd)
|
||||
unsigned Align, AtomicOrdering Order,
|
||||
SynchronizationScope SynchScope,
|
||||
Instruction *InsertBefore)
|
||||
: Instruction(Type::getVoidTy(val->getContext()), Store,
|
||||
OperandTraits<StoreInst>::op_begin(this),
|
||||
OperandTraits<StoreInst>::operands(this),
|
||||
InsertAtEnd) {
|
||||
InsertBefore) {
|
||||
Op<0>() = val;
|
||||
Op<1>() = addr;
|
||||
setVolatile(isVolatile);
|
||||
setAlignment(Align);
|
||||
setAtomic(Order, SynchScope);
|
||||
AssertOK();
|
||||
}
|
||||
|
||||
@ -1018,6 +1063,37 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
|
||||
Op<1>() = addr;
|
||||
setVolatile(isVolatile);
|
||||
setAlignment(0);
|
||||
setAtomic(NotAtomic);
|
||||
AssertOK();
|
||||
}
|
||||
|
||||
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
|
||||
unsigned Align, BasicBlock *InsertAtEnd)
|
||||
: Instruction(Type::getVoidTy(val->getContext()), Store,
|
||||
OperandTraits<StoreInst>::op_begin(this),
|
||||
OperandTraits<StoreInst>::operands(this),
|
||||
InsertAtEnd) {
|
||||
Op<0>() = val;
|
||||
Op<1>() = addr;
|
||||
setVolatile(isVolatile);
|
||||
setAlignment(Align);
|
||||
setAtomic(NotAtomic);
|
||||
AssertOK();
|
||||
}
|
||||
|
||||
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
|
||||
unsigned Align, AtomicOrdering Order,
|
||||
SynchronizationScope SynchScope,
|
||||
BasicBlock *InsertAtEnd)
|
||||
: Instruction(Type::getVoidTy(val->getContext()), Store,
|
||||
OperandTraits<StoreInst>::op_begin(this),
|
||||
OperandTraits<StoreInst>::operands(this),
|
||||
InsertAtEnd) {
|
||||
Op<0>() = val;
|
||||
Op<1>() = addr;
|
||||
setVolatile(isVolatile);
|
||||
setAlignment(Align);
|
||||
setAtomic(Order, SynchScope);
|
||||
AssertOK();
|
||||
}
|
||||
|
||||
@ -1025,7 +1101,7 @@ void StoreInst::setAlignment(unsigned Align) {
|
||||
assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
|
||||
assert(Align <= MaximumAlignment &&
|
||||
"Alignment is greater than MaximumAlignment!");
|
||||
setInstructionSubclassData((getSubclassDataFromInstruction() & 1) |
|
||||
setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) |
|
||||
((Log2_32(Align)+1) << 1));
|
||||
assert(getAlignment() == Align && "Alignment representation error!");
|
||||
}
|
||||
@ -3158,14 +3234,14 @@ AllocaInst *AllocaInst::clone_impl() const {
|
||||
}
|
||||
|
||||
LoadInst *LoadInst::clone_impl() const {
|
||||
return new LoadInst(getOperand(0),
|
||||
Twine(), isVolatile(),
|
||||
getAlignment());
|
||||
return new LoadInst(getOperand(0), Twine(), isVolatile(),
|
||||
getAlignment(), getOrdering(), getSynchScope());
|
||||
}
|
||||
|
||||
StoreInst *StoreInst::clone_impl() const {
|
||||
return new StoreInst(getOperand(0), getOperand(1),
|
||||
isVolatile(), getAlignment());
|
||||
return new StoreInst(getOperand(0), getOperand(1),isVolatile(),
|
||||
getAlignment(), getOrdering(), getSynchScope());
|
||||
|
||||
}
|
||||
|
||||
AtomicCmpXchgInst *AtomicCmpXchgInst::clone_impl() const {
|
||||
|
@ -1297,6 +1297,15 @@ void Verifier::visitLoadInst(LoadInst &LI) {
|
||||
Type *ElTy = PTy->getElementType();
|
||||
Assert2(ElTy == LI.getType(),
|
||||
"Load result type does not match pointer operand type!", &LI, ElTy);
|
||||
if (LI.isAtomic()) {
|
||||
Assert1(LI.getOrdering() != Release && LI.getOrdering() != AcquireRelease,
|
||||
"Load cannot have Release ordering", &LI);
|
||||
Assert1(LI.getAlignment() != 0,
|
||||
"Atomic load must specify explicit alignment", &LI);
|
||||
} else {
|
||||
Assert1(LI.getSynchScope() == CrossThread,
|
||||
"Non-atomic load cannot have SynchronizationScope specified", &LI);
|
||||
}
|
||||
visitInstruction(LI);
|
||||
}
|
||||
|
||||
@ -1307,6 +1316,15 @@ void Verifier::visitStoreInst(StoreInst &SI) {
|
||||
Assert2(ElTy == SI.getOperand(0)->getType(),
|
||||
"Stored value type does not match pointer operand type!",
|
||||
&SI, ElTy);
|
||||
if (SI.isAtomic()) {
|
||||
Assert1(SI.getOrdering() != Acquire && SI.getOrdering() != AcquireRelease,
|
||||
"Store cannot have Acquire ordering", &SI);
|
||||
Assert1(SI.getAlignment() != 0,
|
||||
"Atomic store must specify explicit alignment", &SI);
|
||||
} else {
|
||||
Assert1(SI.getSynchScope() == CrossThread,
|
||||
"Non-atomic store cannot have SynchronizationScope specified", &SI);
|
||||
}
|
||||
visitInstruction(SI);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user