1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 03:02:36 +01:00

Remove extra MayLoad/MayStore flags from atomic_load/store.

These extra flags are not required to properly order the atomic
load/store instructions. SelectionDAGBuilder chains atomics as if they
were volatile, and SelectionDAG::getAtomic() sets the isVolatile bit on
the memory operands of all atomic operations.

The volatile bit is enough to order atomic loads and stores during and
after SelectionDAG.

This means we set mayLoad on atomic_load, mayStore on atomic_store, and
mayLoad+mayStore on the remaining atomic read-modify-write operations.

llvm-svn: 162733
This commit is contained in:
Jakob Stoklund Olesen 2012-08-28 03:11:32 +00:00
parent eefb981463
commit 93b4cf4daf
5 changed files with 25 additions and 31 deletions

View File

@ -1011,11 +1011,6 @@ class AtomicSDNode : public MemSDNode {
SubclassData |= SynchScope << 12;
assert(getOrdering() == Ordering && "Ordering encoding error!");
assert(getSynchScope() == SynchScope && "Synch-scope encoding error!");
assert((readMem() || getOrdering() <= Monotonic) &&
"Acquire/Release MachineMemOperand must be a load!");
assert((writeMem() || getOrdering() <= Monotonic) &&
"Acquire/Release MachineMemOperand must be a store!");
}
public:

View File

@ -445,9 +445,9 @@ def atomic_load_umin : SDNode<"ISD::ATOMIC_LOAD_UMIN", SDTAtomic2,
def atomic_load_umax : SDNode<"ISD::ATOMIC_LOAD_UMAX", SDTAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load : SDNode<"ISD::ATOMIC_LOAD", SDTAtomicLoad,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
[SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
def atomic_store : SDNode<"ISD::ATOMIC_STORE", SDTAtomicStore,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
[SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
// Do not use ld, st directly. Use load, extload, sextload, zextload, store,
// and truncst (see below).

View File

@ -3927,12 +3927,16 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
Alignment = getEVTAlignment(MemVT);
MachineFunction &MF = getMachineFunction();
unsigned Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
// All atomics are load and store, except for ATMOIC_LOAD and ATOMIC_STORE.
// For now, atomics are considered to be volatile always.
// FIXME: Volatile isn't really correct; we should keep track of atomic
// orderings in the memoperand.
Flags |= MachineMemOperand::MOVolatile;
unsigned Flags = MachineMemOperand::MOVolatile;
if (Opcode != ISD::ATOMIC_STORE)
Flags |= MachineMemOperand::MOLoad;
if (Opcode != ISD::ATOMIC_LOAD)
Flags |= MachineMemOperand::MOStore;
MachineMemOperand *MMO =
MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment);
@ -3982,17 +3986,17 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
Alignment = getEVTAlignment(MemVT);
MachineFunction &MF = getMachineFunction();
// A monotonic store does not load; a release store "loads" in the sense
// that other stores cannot be sunk past it.
// An atomic store does not load. An atomic load does not store.
// (An atomicrmw obviously both loads and stores.)
unsigned Flags = MachineMemOperand::MOStore;
if (Opcode != ISD::ATOMIC_STORE || Ordering > Monotonic)
Flags |= MachineMemOperand::MOLoad;
// For now, atomics are considered to be volatile always.
// For now, atomics are considered to be volatile always, and they are
// chained as such.
// FIXME: Volatile isn't really correct; we should keep track of atomic
// orderings in the memoperand.
Flags |= MachineMemOperand::MOVolatile;
unsigned Flags = MachineMemOperand::MOVolatile;
if (Opcode != ISD::ATOMIC_STORE)
Flags |= MachineMemOperand::MOLoad;
if (Opcode != ISD::ATOMIC_LOAD)
Flags |= MachineMemOperand::MOStore;
MachineMemOperand *MMO =
MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags,
@ -4055,16 +4059,17 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
Alignment = getEVTAlignment(MemVT);
MachineFunction &MF = getMachineFunction();
// A monotonic load does not store; an acquire load "stores" in the sense
// that other loads cannot be hoisted past it.
unsigned Flags = MachineMemOperand::MOLoad;
if (Ordering > Monotonic)
Flags |= MachineMemOperand::MOStore;
// For now, atomics are considered to be volatile always.
// An atomic store does not load. An atomic load does not store.
// (An atomicrmw obviously both loads and stores.)
// For now, atomics are considered to be volatile always, and they are
// chained as such.
// FIXME: Volatile isn't really correct; we should keep track of atomic
// orderings in the memoperand.
Flags |= MachineMemOperand::MOVolatile;
unsigned Flags = MachineMemOperand::MOVolatile;
if (Opcode != ISD::ATOMIC_STORE)
Flags |= MachineMemOperand::MOLoad;
if (Opcode != ISD::ATOMIC_LOAD)
Flags |= MachineMemOperand::MOStore;
MachineMemOperand *MMO =
MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags,

View File

@ -1,7 +1,4 @@
; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s
; XFAIL: *
; This is xfailed into we have atomic load pseudos. PR13693.
; Check that we generate new value jump.
@i = global i32 0, align 4

View File

@ -1,7 +1,4 @@
; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s
; XFAIL: *
; This is xfailed into we have atomic load pseudos. PR13693.
; Check that we generate new value store packet in V4
@i = global i32 0, align 4