1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-24 03:33:20 +01:00

Implement 32 & 64 bit versions of PPC atomic

binary primitives.

llvm-svn: 55343
This commit is contained in:
Dale Johannesen 2008-08-25 22:34:37 +00:00
parent 45e24233c7
commit f201a3aaf3
5 changed files with 230 additions and 48 deletions

View File

@ -3860,6 +3860,60 @@ SDNode *PPCTargetLowering::ReplaceNodeResults(SDNode *N, SelectionDAG &DAG) {
// Other Lowering Code
//===----------------------------------------------------------------------===//
MachineBasicBlock *
PPCTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
bool is64bit, unsigned BinOpcode) {
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
const BasicBlock *LLVM_BB = BB->getBasicBlock();
MachineFunction *F = BB->getParent();
MachineFunction::iterator It = BB;
++It;
unsigned dest = MI->getOperand(0).getReg();
unsigned ptrA = MI->getOperand(1).getReg();
unsigned ptrB = MI->getOperand(2).getReg();
unsigned incr = MI->getOperand(3).getReg();
MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
F->insert(It, loopMBB);
F->insert(It, exitMBB);
exitMBB->transferSuccessors(BB);
MachineRegisterInfo &RegInfo = F->getRegInfo();
unsigned TmpReg = RegInfo.createVirtualRegister(
is64bit ? (const TargetRegisterClass *) &PPC::GPRCRegClass :
(const TargetRegisterClass *) &PPC::G8RCRegClass);
// thisMBB:
// ...
// fallthrough --> loopMBB
BB->addSuccessor(loopMBB);
// loopMBB:
// l[wd]arx dest, ptr
// add r0, dest, incr
// st[wd]cx. r0, ptr
// bne- loopMBB
// fallthrough --> exitMBB
BB = loopMBB;
BuildMI(BB, TII->get(is64bit ? PPC::LDARX : PPC::LWARX), dest)
.addReg(ptrA).addReg(ptrB);
BuildMI(BB, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest);
BuildMI(BB, TII->get(is64bit ? PPC::STDCX : PPC::STWCX))
.addReg(TmpReg).addReg(ptrA).addReg(ptrB);
BuildMI(BB, TII->get(PPC::BCC))
.addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
BB->addSuccessor(loopMBB);
BB->addSuccessor(exitMBB);
// exitMBB:
// ...
BB = exitMBB;
return BB;
}
MachineBasicBlock *
PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *BB) {
@ -3920,53 +3974,30 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
.addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB)
.addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
}
else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I32 ||
MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) {
bool is64bit = MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I64;
unsigned dest = MI->getOperand(0).getReg();
unsigned ptrA = MI->getOperand(1).getReg();
unsigned ptrB = MI->getOperand(2).getReg();
unsigned incr = MI->getOperand(3).getReg();
MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
F->insert(It, loopMBB);
F->insert(It, exitMBB);
exitMBB->transferSuccessors(BB);
MachineRegisterInfo &RegInfo = F->getRegInfo();
unsigned TmpReg = RegInfo.createVirtualRegister(
is64bit ? (const TargetRegisterClass *) &PPC::GPRCRegClass :
(const TargetRegisterClass *) &PPC::G8RCRegClass);
// thisMBB:
// ...
// fallthrough --> loopMBB
BB->addSuccessor(loopMBB);
// loopMBB:
// l[wd]arx dest, ptr
// add r0, dest, incr
// st[wd]cx. r0, ptr
// bne- loopMBB
// fallthrough --> exitMBB
BB = loopMBB;
BuildMI(BB, TII->get(is64bit ? PPC::LDARX : PPC::LWARX), dest)
.addReg(ptrA).addReg(ptrB);
BuildMI(BB, TII->get(is64bit ? PPC::ADD4 : PPC::ADD8), TmpReg)
.addReg(incr).addReg(dest);
BuildMI(BB, TII->get(is64bit ? PPC::STDCX : PPC::STWCX))
.addReg(TmpReg).addReg(ptrA).addReg(ptrB);
BuildMI(BB, TII->get(PPC::BCC))
.addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
BB->addSuccessor(loopMBB);
BB->addSuccessor(exitMBB);
// exitMBB:
// ...
BB = exitMBB;
}
else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I32)
BB = EmitAtomicBinary(MI, BB, false, PPC::ADD4);
else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I64)
BB = EmitAtomicBinary(MI, BB, true, PPC::ADD8);
else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I32)
BB = EmitAtomicBinary(MI, BB, false, PPC::AND);
else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I64)
BB = EmitAtomicBinary(MI, BB, true, PPC::AND8);
else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I32)
BB = EmitAtomicBinary(MI, BB, false, PPC::OR);
else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I64)
BB = EmitAtomicBinary(MI, BB, true, PPC::OR8);
else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I32)
BB = EmitAtomicBinary(MI, BB, false, PPC::XOR);
else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I64)
BB = EmitAtomicBinary(MI, BB, true, PPC::XOR8);
else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
BB = EmitAtomicBinary(MI, BB, false, PPC::NAND);
else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
BB = EmitAtomicBinary(MI, BB, true, PPC::NAND8);
else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I32)
BB = EmitAtomicBinary(MI, BB, false, PPC::SUBF);
else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
BB = EmitAtomicBinary(MI, BB, true, PPC::SUBF8);
else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64) {
bool is64bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;

View File

@ -282,6 +282,9 @@ namespace llvm {
virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *MBB);
MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI,
MachineBasicBlock *MBB, bool is64Bit,
unsigned BinOpcode);
ConstraintType getConstraintType(const std::string &Constraint) const;
std::pair<unsigned, const TargetRegisterClass*>

View File

@ -123,11 +123,33 @@ let usesCustomDAGSchedInserter = 1 in {
(outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr),
"${:comment} ATOMIC_LOAD_ADD_I64 PSEUDO!",
[(set G8RC:$dst, (atomic_load_add_64 xoaddr:$ptr, G8RC:$incr))]>;
def ATOMIC_LOAD_SUB_I64 : Pseudo<
(outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr),
"${:comment} ATOMIC_LOAD_SUB_I64 PSEUDO!",
[(set G8RC:$dst, (atomic_load_sub_64 xoaddr:$ptr, G8RC:$incr))]>;
def ATOMIC_LOAD_OR_I64 : Pseudo<
(outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr),
"${:comment} ATOMIC_LOAD_OR_I64 PSEUDO!",
[(set G8RC:$dst, (atomic_load_or_64 xoaddr:$ptr, G8RC:$incr))]>;
def ATOMIC_LOAD_XOR_I64 : Pseudo<
(outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr),
"${:comment} ATOMIC_LOAD_XOR_I64 PSEUDO!",
[(set G8RC:$dst, (atomic_load_xor_64 xoaddr:$ptr, G8RC:$incr))]>;
def ATOMIC_LOAD_AND_I64 : Pseudo<
(outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr),
"${:comment} ATOMIC_LOAD_AND_I64 PSEUDO!",
[(set G8RC:$dst, (atomic_load_and_64 xoaddr:$ptr, G8RC:$incr))]>;
def ATOMIC_LOAD_NAND_I64 : Pseudo<
(outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr),
"${:comment} ATOMIC_LOAD_NAND_I64 PSEUDO!",
[(set G8RC:$dst, (atomic_load_nand_64 xoaddr:$ptr, G8RC:$incr))]>;
def ATOMIC_CMP_SWAP_I64 : Pseudo<
(outs G8RC:$dst), (ins memrr:$ptr, G8RC:$old, G8RC:$new),
"${:comment} ATOMIC_CMP_SWAP_I64 PSEUDO!",
[(set G8RC:$dst,
(atomic_cmp_swap_64 xoaddr:$ptr, G8RC:$old, G8RC:$new))]>;
def ATOMIC_SWAP_I64 : Pseudo<
(outs G8RC:$dst), (ins memrr:$ptr, G8RC:$new),
"${:comment} ATOMIC_SWAP_I64 PSEUDO!",
@ -313,7 +335,6 @@ def SUBFIC8: DForm_2< 8, (outs G8RC:$rD), (ins G8RC:$rA, s16imm64:$imm),
def SUBF8 : XOForm_1<31, 40, 0, (outs G8RC:$rT), (ins G8RC:$rA, G8RC:$rB),
"subf $rT, $rA, $rB", IntGeneral,
[(set G8RC:$rT, (sub G8RC:$rB, G8RC:$rA))]>;
def SUBFC8 : XOForm_1<31, 8, 0, (outs G8RC:$rT), (ins G8RC:$rA, G8RC:$rB),
"subfc $rT, $rA, $rB", IntGeneral,
[(set G8RC:$rT, (subc G8RC:$rB, G8RC:$rA))]>,

View File

@ -532,11 +532,33 @@ let usesCustomDAGSchedInserter = 1 in {
(outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr),
"${:comment} ATOMIC_LOAD_ADD_I32 PSEUDO!",
[(set GPRC:$dst, (atomic_load_add_32 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_SUB_I32 : Pseudo<
(outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr),
"${:comment} ATOMIC_LOAD_SUB_I32 PSEUDO!",
[(set GPRC:$dst, (atomic_load_sub_32 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_AND_I32 : Pseudo<
(outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr),
"${:comment} ATOMIC_LOAD_AND_I32 PSEUDO!",
[(set GPRC:$dst, (atomic_load_and_32 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_OR_I32 : Pseudo<
(outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr),
"${:comment} ATOMIC_LOAD_OR_I32 PSEUDO!",
[(set GPRC:$dst, (atomic_load_or_32 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_XOR_I32 : Pseudo<
(outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr),
"${:comment} ATOMIC_LOAD_XOR_I32 PSEUDO!",
[(set GPRC:$dst, (atomic_load_xor_32 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_LOAD_NAND_I32 : Pseudo<
(outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr),
"${:comment} ATOMIC_LOAD_NAND_I32 PSEUDO!",
[(set GPRC:$dst, (atomic_load_nand_32 xoaddr:$ptr, GPRC:$incr))]>;
def ATOMIC_CMP_SWAP_I32 : Pseudo<
(outs GPRC:$dst), (ins memrr:$ptr, GPRC:$old, GPRC:$new),
"${:comment} ATOMIC_CMP_SWAP_I32 PSEUDO!",
[(set GPRC:$dst,
(atomic_cmp_swap_32 xoaddr:$ptr, GPRC:$old, GPRC:$new))]>;
def ATOMIC_SWAP_I32 : Pseudo<
(outs GPRC:$dst), (ins memrr:$ptr, GPRC:$new),
"${:comment} ATOMIC_SWAP_I32 PSEUDO!",

View File

@ -767,6 +767,111 @@ def atomic_load_add_64 : PatFrag<(ops node:$ptr, node:$inc),
return V->getValueType(0) == MVT::i64;
}]>;
def atomic_load_sub_8 : PatFrag<(ops node:$ptr, node:$inc),
(atomic_load_sub node:$ptr, node:$inc), [{
AtomicSDNode* V = cast<AtomicSDNode>(N);
return V->getValueType(0) == MVT::i8;
}]>;
def atomic_load_sub_16 : PatFrag<(ops node:$ptr, node:$inc),
(atomic_load_sub node:$ptr, node:$inc), [{
AtomicSDNode* V = cast<AtomicSDNode>(N);
return V->getValueType(0) == MVT::i16;
}]>;
def atomic_load_sub_32 : PatFrag<(ops node:$ptr, node:$inc),
(atomic_load_sub node:$ptr, node:$inc), [{
AtomicSDNode* V = cast<AtomicSDNode>(N);
return V->getValueType(0) == MVT::i32;
}]>;
def atomic_load_sub_64 : PatFrag<(ops node:$ptr, node:$inc),
(atomic_load_sub node:$ptr, node:$inc), [{
AtomicSDNode* V = cast<AtomicSDNode>(N);
return V->getValueType(0) == MVT::i64;
}]>;
def atomic_load_and_8 : PatFrag<(ops node:$ptr, node:$inc),
(atomic_load_and node:$ptr, node:$inc), [{
AtomicSDNode* V = cast<AtomicSDNode>(N);
return V->getValueType(0) == MVT::i8;
}]>;
def atomic_load_and_16 : PatFrag<(ops node:$ptr, node:$inc),
(atomic_load_and node:$ptr, node:$inc), [{
AtomicSDNode* V = cast<AtomicSDNode>(N);
return V->getValueType(0) == MVT::i16;
}]>;
def atomic_load_and_32 : PatFrag<(ops node:$ptr, node:$inc),
(atomic_load_and node:$ptr, node:$inc), [{
AtomicSDNode* V = cast<AtomicSDNode>(N);
return V->getValueType(0) == MVT::i32;
}]>;
def atomic_load_and_64 : PatFrag<(ops node:$ptr, node:$inc),
(atomic_load_and node:$ptr, node:$inc), [{
AtomicSDNode* V = cast<AtomicSDNode>(N);
return V->getValueType(0) == MVT::i64;
}]>;
def atomic_load_or_8 : PatFrag<(ops node:$ptr, node:$inc),
(atomic_load_or node:$ptr, node:$inc), [{
AtomicSDNode* V = cast<AtomicSDNode>(N);
return V->getValueType(0) == MVT::i8;
}]>;
def atomic_load_or_16 : PatFrag<(ops node:$ptr, node:$inc),
(atomic_load_or node:$ptr, node:$inc), [{
AtomicSDNode* V = cast<AtomicSDNode>(N);
return V->getValueType(0) == MVT::i16;
}]>;
def atomic_load_or_32 : PatFrag<(ops node:$ptr, node:$inc),
(atomic_load_or node:$ptr, node:$inc), [{
AtomicSDNode* V = cast<AtomicSDNode>(N);
return V->getValueType(0) == MVT::i32;
}]>;
def atomic_load_or_64 : PatFrag<(ops node:$ptr, node:$inc),
(atomic_load_or node:$ptr, node:$inc), [{
AtomicSDNode* V = cast<AtomicSDNode>(N);
return V->getValueType(0) == MVT::i64;
}]>;
def atomic_load_xor_8 : PatFrag<(ops node:$ptr, node:$inc),
(atomic_load_xor node:$ptr, node:$inc), [{
AtomicSDNode* V = cast<AtomicSDNode>(N);
return V->getValueType(0) == MVT::i8;
}]>;
def atomic_load_xor_16 : PatFrag<(ops node:$ptr, node:$inc),
(atomic_load_xor node:$ptr, node:$inc), [{
AtomicSDNode* V = cast<AtomicSDNode>(N);
return V->getValueType(0) == MVT::i16;
}]>;
def atomic_load_xor_32 : PatFrag<(ops node:$ptr, node:$inc),
(atomic_load_xor node:$ptr, node:$inc), [{
AtomicSDNode* V = cast<AtomicSDNode>(N);
return V->getValueType(0) == MVT::i32;
}]>;
def atomic_load_xor_64 : PatFrag<(ops node:$ptr, node:$inc),
(atomic_load_xor node:$ptr, node:$inc), [{
AtomicSDNode* V = cast<AtomicSDNode>(N);
return V->getValueType(0) == MVT::i64;
}]>;
def atomic_load_nand_8 : PatFrag<(ops node:$ptr, node:$inc),
(atomic_load_nand node:$ptr, node:$inc), [{
AtomicSDNode* V = cast<AtomicSDNode>(N);
return V->getValueType(0) == MVT::i8;
}]>;
def atomic_load_nand_16 : PatFrag<(ops node:$ptr, node:$inc),
(atomic_load_nand node:$ptr, node:$inc), [{
AtomicSDNode* V = cast<AtomicSDNode>(N);
return V->getValueType(0) == MVT::i16;
}]>;
def atomic_load_nand_32 : PatFrag<(ops node:$ptr, node:$inc),
(atomic_load_nand node:$ptr, node:$inc), [{
AtomicSDNode* V = cast<AtomicSDNode>(N);
return V->getValueType(0) == MVT::i32;
}]>;
def atomic_load_nand_64 : PatFrag<(ops node:$ptr, node:$inc),
(atomic_load_nand node:$ptr, node:$inc), [{
AtomicSDNode* V = cast<AtomicSDNode>(N);
return V->getValueType(0) == MVT::i64;
}]>;
def atomic_swap_8 : PatFrag<(ops node:$ptr, node:$inc),
(atomic_swap node:$ptr, node:$inc), [{
AtomicSDNode* V = cast<AtomicSDNode>(N);