From 5379c4a713de6bade4f974013282634938f14978 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Wed, 27 Mar 2019 17:29:34 +0000 Subject: [PATCH] [X86] Add post-isel pseudos for rotate by immediate using SHLD/SHRD Haswell CPUs have special support for SHLD/SHRD with the same register for both sources. Such an instruction will go to the rotate/shift unit on port 0 or 6. This gives it 1 cycle latency and 0.5 cycle reciprocal throughput. When the register is not the same, it becomes a 3 cycle operation on port 1. Sandybridge and Ivybridge always have 1 cyc latency and 0.5 cycle reciprocal throughput for any SHLD. When FastSHLDRotate feature flag is set, we try to use SHLD for rotate by immediate unless BMI2 is enabled. But MachineCopyPropagation can look through a copy and change one of the sources to be different. This will break the hardware optimization. This patch adds psuedo instruction to hide the second source input until after register allocation and MachineCopyPropagation. I'm not sure if this is the best way to do this or if there's some other way we can make this work. Fixes PR41055 Differential Revision: https://reviews.llvm.org/D59391 llvm-svn: 357096 --- lib/Target/X86/X86InstrInfo.cpp | 18 ++++++++++++++++++ lib/Target/X86/X86InstrShiftRotate.td | 26 +++++++++++++++++--------- test/CodeGen/X86/rot32.ll | 16 ++++++++-------- test/CodeGen/X86/rot64.ll | 16 ++++++++-------- 4 files changed, 51 insertions(+), 25 deletions(-) diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index c03b602dfe0..e157bc13ec2 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -4083,6 +4083,20 @@ static bool expandNOVLXStore(MachineInstrBuilder &MIB, return true; } + +static bool expandSHXDROT(MachineInstrBuilder &MIB, const MCInstrDesc &Desc) { + MIB->setDesc(Desc); + int64_t ShiftAmt = MIB->getOperand(2).getImm(); + // Temporarily remove the immediate so we can add another source register. + MIB->RemoveOperand(2); + // Add the register. Don't copy the kill flag if there is one. + MIB.addReg(MIB->getOperand(1).getReg(), + getUndefRegState(MIB->getOperand(1).isUndef())); + // Add back the immediate. + MIB.addImm(ShiftAmt); + return true; +} + bool X86InstrInfo::expandPostRAPseudo(MachineInstr &MI) const { bool HasAVX = Subtarget.hasAVX(); MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI); @@ -4237,6 +4251,10 @@ bool X86InstrInfo::expandPostRAPseudo(MachineInstr &MI) const { case X86::XOR64_FP: case X86::XOR32_FP: return expandXorFP(MIB, *this); + case X86::SHLDROT32ri: return expandSHXDROT(MIB, get(X86::SHLD32rri8)); + case X86::SHLDROT64ri: return expandSHXDROT(MIB, get(X86::SHLD64rri8)); + case X86::SHRDROT32ri: return expandSHXDROT(MIB, get(X86::SHRD32rri8)); + case X86::SHRDROT64ri: return expandSHXDROT(MIB, get(X86::SHRD64rri8)); case X86::ADD8rr_DB: MIB->setDesc(get(X86::OR8rr)); break; case X86::ADD16rr_DB: MIB->setDesc(get(X86::OR16rr)); break; case X86::ADD32rr_DB: MIB->setDesc(get(X86::OR32rr)); break; diff --git a/lib/Target/X86/X86InstrShiftRotate.td b/lib/Target/X86/X86InstrShiftRotate.td index 633b7099af6..9d974b716dd 100644 --- a/lib/Target/X86/X86InstrShiftRotate.td +++ b/lib/Target/X86/X86InstrShiftRotate.td @@ -838,16 +838,24 @@ def : Pat<(store (rotr (loadi64 addr:$dst), (i8 63)), addr:$dst), // Sandy Bridge and newer Intel processors support faster rotates using // SHLD to avoid a partial flag update on the normal rotate instructions. -let Predicates = [HasFastSHLDRotate], AddedComplexity = 5 in { - def : Pat<(rotl GR32:$src, (i8 imm:$shamt)), - (SHLD32rri8 GR32:$src, GR32:$src, imm:$shamt)>; - def : Pat<(rotl GR64:$src, (i8 imm:$shamt)), - (SHLD64rri8 GR64:$src, GR64:$src, imm:$shamt)>; +// Use a pseudo so that TwoInstructionPass and register allocation will see +// this as unary instruction. +let Predicates = [HasFastSHLDRotate], AddedComplexity = 5, + Defs = [EFLAGS], isPseudo = 1, SchedRW = [WriteSHDrri], + Constraints = "$src1 = $dst" in { + def SHLDROT32ri : I<0, Pseudo, (outs GR32:$dst), + (ins GR32:$src1, u8imm:$shamt), "", + [(set GR32:$dst, (rotl GR32:$src1, (i8 imm:$shamt)))]>; + def SHLDROT64ri : I<0, Pseudo, (outs GR64:$dst), + (ins GR64:$src1, u8imm:$shamt), "", + [(set GR64:$dst, (rotl GR64:$src1, (i8 imm:$shamt)))]>; - def : Pat<(rotr GR32:$src, (i8 imm:$shamt)), - (SHRD32rri8 GR32:$src, GR32:$src, imm:$shamt)>; - def : Pat<(rotr GR64:$src, (i8 imm:$shamt)), - (SHRD64rri8 GR64:$src, GR64:$src, imm:$shamt)>; + def SHRDROT32ri : I<0, Pseudo, (outs GR32:$dst), + (ins GR32:$src1, u8imm:$shamt), "", + [(set GR32:$dst, (rotr GR32:$src1, (i8 imm:$shamt)))]>; + def SHRDROT64ri : I<0, Pseudo, (outs GR64:$dst), + (ins GR64:$src1, u8imm:$shamt), "", + [(set GR64:$dst, (rotr GR64:$src1, (i8 imm:$shamt)))]>; } def ROT32L2R_imm8 : SDNodeXForm