mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-01-31 20:51:52 +01:00
[AMDGPU] Split 64-Bit XNOR to 64-Bit NOT/XOR
The identity ~(x ^ y) == (~x ^ y) == (x ^ ~y) allows XNOR (XOR/NOT) to turn into NOT/XOR. Handling this case with its own split means we can make the NOT remain in the scalar unit. Previously, we split 64-bit XNOR into two 32-bit XNOR, then lowered. Now, we get three instructions (s_not, v_xor, v_xor) rather than four in the case where either of the sources is a scalar 64-bit. Add test cases to xnor.ll to attempt XNOR Vx, Sy and XNOR Sx, Vy. Also adding test that uses the opposite identity such that (~x ^ y) on the scalar unit (or vector for gfx906) can generate XNOR. This already worked, but I didn't see a test for it. Differential: https://reviews.llvm.org/D55071 llvm-svn: 348075
This commit is contained in:
parent
b4155e7296
commit
6a0a522a58
@ -876,7 +876,7 @@ void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
|
||||
MachineFunction *MF = MBB.getParent();
|
||||
SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
|
||||
MachineFrameInfo &FrameInfo = MF->getFrameInfo();
|
||||
DebugLoc DL = MBB.findDebugLoc(MI);
|
||||
const DebugLoc &DL = MBB.findDebugLoc(MI);
|
||||
|
||||
unsigned Size = FrameInfo.getObjectSize(FrameIndex);
|
||||
unsigned Align = FrameInfo.getObjectAlignment(FrameIndex);
|
||||
@ -977,7 +977,7 @@ void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
|
||||
MachineFunction *MF = MBB.getParent();
|
||||
SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
|
||||
MachineFrameInfo &FrameInfo = MF->getFrameInfo();
|
||||
DebugLoc DL = MBB.findDebugLoc(MI);
|
||||
const DebugLoc &DL = MBB.findDebugLoc(MI);
|
||||
unsigned Align = FrameInfo.getObjectAlignment(FrameIndex);
|
||||
unsigned Size = FrameInfo.getObjectSize(FrameIndex);
|
||||
unsigned SpillSize = TRI->getSpillSize(*RC);
|
||||
@ -1032,7 +1032,7 @@ unsigned SIInstrInfo::calculateLDSSpillAddress(
|
||||
MachineFunction *MF = MBB.getParent();
|
||||
SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
|
||||
const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
|
||||
DebugLoc DL = MBB.findDebugLoc(MI);
|
||||
const DebugLoc &DL = MBB.findDebugLoc(MI);
|
||||
unsigned WorkGroupSize = MFI->getMaxFlatWorkGroupSize();
|
||||
unsigned WavefrontSize = ST.getWavefrontSize();
|
||||
|
||||
@ -1040,7 +1040,7 @@ unsigned SIInstrInfo::calculateLDSSpillAddress(
|
||||
if (!MFI->hasCalculatedTID()) {
|
||||
MachineBasicBlock &Entry = MBB.getParent()->front();
|
||||
MachineBasicBlock::iterator Insert = Entry.front();
|
||||
DebugLoc DL = Insert->getDebugLoc();
|
||||
const DebugLoc &DL = Insert->getDebugLoc();
|
||||
|
||||
TIDReg = RI.findUnusedRegister(MF->getRegInfo(), &AMDGPU::VGPR_32RegClass,
|
||||
*MF);
|
||||
@ -4162,7 +4162,10 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst,
|
||||
continue;
|
||||
|
||||
case AMDGPU::S_XNOR_B64:
|
||||
splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XNOR_B32, MDT);
|
||||
if (ST.hasDLInsts())
|
||||
splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XNOR_B32, MDT);
|
||||
else
|
||||
splitScalar64BitXnor(Worklist, Inst, MDT);
|
||||
Inst.eraseFromParent();
|
||||
continue;
|
||||
|
||||
@ -4753,13 +4756,55 @@ void SIInstrInfo::splitScalar64BitBinaryOp(SetVectorType &Worklist,
|
||||
addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
|
||||
}
|
||||
|
||||
void SIInstrInfo::splitScalar64BitXnor(SetVectorType &Worklist,
|
||||
MachineInstr &Inst,
|
||||
MachineDominatorTree *MDT) const {
|
||||
MachineBasicBlock &MBB = *Inst.getParent();
|
||||
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
|
||||
|
||||
MachineOperand &Dest = Inst.getOperand(0);
|
||||
MachineOperand &Src0 = Inst.getOperand(1);
|
||||
MachineOperand &Src1 = Inst.getOperand(2);
|
||||
const DebugLoc &DL = Inst.getDebugLoc();
|
||||
|
||||
MachineBasicBlock::iterator MII = Inst;
|
||||
|
||||
const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
|
||||
|
||||
unsigned Interm = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
|
||||
|
||||
MachineOperand* Op0;
|
||||
MachineOperand* Op1;
|
||||
|
||||
if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg())) {
|
||||
Op0 = &Src0;
|
||||
Op1 = &Src1;
|
||||
} else {
|
||||
Op0 = &Src1;
|
||||
Op1 = &Src0;
|
||||
}
|
||||
|
||||
BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B64), Interm)
|
||||
.add(*Op0);
|
||||
|
||||
unsigned NewDest = MRI.createVirtualRegister(DestRC);
|
||||
|
||||
MachineInstr &Xor = *BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B64), NewDest)
|
||||
.addReg(Interm)
|
||||
.add(*Op1);
|
||||
|
||||
MRI.replaceRegWith(Dest.getReg(), NewDest);
|
||||
|
||||
Worklist.insert(&Xor);
|
||||
}
|
||||
|
||||
void SIInstrInfo::splitScalar64BitBCNT(
|
||||
SetVectorType &Worklist, MachineInstr &Inst) const {
|
||||
MachineBasicBlock &MBB = *Inst.getParent();
|
||||
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
|
||||
|
||||
MachineBasicBlock::iterator MII = Inst;
|
||||
DebugLoc DL = Inst.getDebugLoc();
|
||||
const DebugLoc &DL = Inst.getDebugLoc();
|
||||
|
||||
MachineOperand &Dest = Inst.getOperand(0);
|
||||
MachineOperand &Src = Inst.getOperand(1);
|
||||
@ -4795,7 +4840,7 @@ void SIInstrInfo::splitScalar64BitBFE(SetVectorType &Worklist,
|
||||
MachineBasicBlock &MBB = *Inst.getParent();
|
||||
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
|
||||
MachineBasicBlock::iterator MII = Inst;
|
||||
DebugLoc DL = Inst.getDebugLoc();
|
||||
const DebugLoc &DL = Inst.getDebugLoc();
|
||||
|
||||
MachineOperand &Dest = Inst.getOperand(0);
|
||||
uint32_t Imm = Inst.getOperand(2).getImm();
|
||||
|
@ -107,6 +107,9 @@ private:
|
||||
unsigned Opcode,
|
||||
MachineDominatorTree *MDT = nullptr) const;
|
||||
|
||||
void splitScalar64BitXnor(SetVectorType &Worklist, MachineInstr &Inst,
|
||||
MachineDominatorTree *MDT = nullptr) const;
|
||||
|
||||
void splitScalar64BitBCNT(SetVectorType &Worklist,
|
||||
MachineInstr &Inst) const;
|
||||
void splitScalar64BitBFE(SetVectorType &Worklist,
|
||||
|
@ -74,9 +74,9 @@ entry:
|
||||
; GCN-LABEL: {{^}}vector_xnor_i64_one_use
|
||||
; GCN-NOT: s_xnor_b64
|
||||
; GCN: v_not_b32
|
||||
; GCN: v_xor_b32
|
||||
; GCN: v_not_b32
|
||||
; GCN: v_xor_b32
|
||||
; GCN: v_xor_b32
|
||||
; GCN-DL: v_xnor_b32
|
||||
; GCN-DL: v_xnor_b32
|
||||
define i64 @vector_xnor_i64_one_use(i64 %a, i64 %b) {
|
||||
@ -110,5 +110,89 @@ define amdgpu_kernel void @xnor_v_s_i32_one_use(i32 addrspace(1)* %out, i32 %s)
|
||||
ret void
|
||||
}
|
||||
|
||||
; GCN-LABEL: {{^}}xnor_i64_s_v_one_use
|
||||
; GCN-NOT: s_xnor_b64
|
||||
; GCN: s_not_b64
|
||||
; GCN: v_xor_b32
|
||||
; GCN: v_xor_b32
|
||||
; GCN-DL: v_xnor_b32
|
||||
; GCN-DL: v_xnor_b32
|
||||
define amdgpu_kernel void @xnor_i64_s_v_one_use(
|
||||
i64 addrspace(1)* %r0, i64 %a) {
|
||||
entry:
|
||||
%b32 = call i32 @llvm.amdgcn.workitem.id.x() #1
|
||||
%b64 = zext i32 %b32 to i64
|
||||
%b = shl i64 %b64, 29
|
||||
%xor = xor i64 %a, %b
|
||||
%r0.val = xor i64 %xor, -1
|
||||
store i64 %r0.val, i64 addrspace(1)* %r0
|
||||
ret void
|
||||
}
|
||||
|
||||
; GCN-LABEL: {{^}}xnor_i64_v_s_one_use
|
||||
; GCN-NOT: s_xnor_b64
|
||||
; GCN: s_not_b64
|
||||
; GCN: v_xor_b32
|
||||
; GCN: v_xor_b32
|
||||
; GCN-DL: v_xnor_b32
|
||||
; GCN-DL: v_xnor_b32
|
||||
define amdgpu_kernel void @xnor_i64_v_s_one_use(
|
||||
i64 addrspace(1)* %r0, i64 %a) {
|
||||
entry:
|
||||
%b32 = call i32 @llvm.amdgcn.workitem.id.x() #1
|
||||
%b64 = zext i32 %b32 to i64
|
||||
%b = shl i64 %b64, 29
|
||||
%xor = xor i64 %b, %a
|
||||
%r0.val = xor i64 %xor, -1
|
||||
store i64 %r0.val, i64 addrspace(1)* %r0
|
||||
ret void
|
||||
}
|
||||
|
||||
; GCN-LABEL: {{^}}vector_xor_na_b_i32_one_use
|
||||
; GCN-NOT: s_xnor_b32
|
||||
; GCN: v_not_b32
|
||||
; GCN: v_xor_b32
|
||||
; GCN-DL: v_xnor_b32
|
||||
define i32 @vector_xor_na_b_i32_one_use(i32 %a, i32 %b) {
|
||||
entry:
|
||||
%na = xor i32 %a, -1
|
||||
%r = xor i32 %na, %b
|
||||
ret i32 %r
|
||||
}
|
||||
|
||||
; GCN-LABEL: {{^}}vector_xor_a_nb_i32_one_use
|
||||
; GCN-NOT: s_xnor_b32
|
||||
; GCN: v_not_b32
|
||||
; GCN: v_xor_b32
|
||||
; GCN-DL: v_xnor_b32
|
||||
define i32 @vector_xor_a_nb_i32_one_use(i32 %a, i32 %b) {
|
||||
entry:
|
||||
%nb = xor i32 %b, -1
|
||||
%r = xor i32 %a, %nb
|
||||
ret i32 %r
|
||||
}
|
||||
|
||||
; GCN-LABEL: {{^}}scalar_xor_a_nb_i64_one_use
|
||||
; GCN: s_xnor_b64
|
||||
define amdgpu_kernel void @scalar_xor_a_nb_i64_one_use(
|
||||
i64 addrspace(1)* %r0, i64 %a, i64 %b) {
|
||||
entry:
|
||||
%nb = xor i64 %b, -1
|
||||
%r0.val = xor i64 %a, %nb
|
||||
store i64 %r0.val, i64 addrspace(1)* %r0
|
||||
ret void
|
||||
}
|
||||
|
||||
; GCN-LABEL: {{^}}scalar_xor_na_b_i64_one_use
|
||||
; GCN: s_xnor_b64
|
||||
define amdgpu_kernel void @scalar_xor_na_b_i64_one_use(
|
||||
i64 addrspace(1)* %r0, i64 %a, i64 %b) {
|
||||
entry:
|
||||
%na = xor i64 %a, -1
|
||||
%r0.val = xor i64 %na, %b
|
||||
store i64 %r0.val, i64 addrspace(1)* %r0
|
||||
ret void
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare i32 @llvm.amdgcn.workitem.id.x() #0
|
||||
|
Loading…
x
Reference in New Issue
Block a user