1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 18:54:02 +01:00

[RISCV] Add isel patterns to optimize slli.uw patterns without Zba extension.

This pattern can occur when an unsigned is used to index an array
on RV64.

Reviewed By: luismarques

Differential Revision: https://reviews.llvm.org/D95290
This commit is contained in:
Craig Topper 2021-01-25 15:59:25 -08:00
parent 117234d675
commit d64f83a48c
3 changed files with 46 additions and 53 deletions

View File

@ -332,6 +332,19 @@ def ImmSub32 : SDNodeXForm<imm, [{
N->getValueType(0));
}]>;
// Return an immediate subtracted from XLen.
def ImmSubFromXLen : SDNodeXForm<imm, [{
uint64_t XLen = Subtarget->getXLen();
return CurDAG->getTargetConstant(XLen - N->getZExtValue(), SDLoc(N),
N->getValueType(0));
}]>;
// Return an immediate subtracted from 32.
def ImmSubFrom32 : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(32 - N->getZExtValue(), SDLoc(N),
N->getValueType(0));
}]>;
//===----------------------------------------------------------------------===//
// Instruction Formats
//===----------------------------------------------------------------------===//
@ -855,6 +868,13 @@ def SRLIWPat : PatFrag<(ops node:$A, node:$B),
return MatchSRLIW(N);
}]>;
// Check that it is a SLLIUW (Shift Logical Left Immediate Unsigned i32
// on RV64). Also used to optimize the same sequence without SLLIUW.
def SLLIUWPat : PatFrag<(ops node:$A, node:$B),
(and (shl node:$A, node:$B), imm), [{
return MatchSLLIUW(N);
}]>;
/// Immediates
def : Pat<(simm12:$imm), (ADDI X0, simm12:$imm)>;
@ -1162,9 +1182,18 @@ def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
/// RV64 patterns
let Predicates = [IsRV64, NotHasStdExtZba] in
let Predicates = [IsRV64, NotHasStdExtZba] in {
def : Pat<(and GPR:$rs1, 0xffffffff), (SRLI (SLLI GPR:$rs1, 32), 32)>;
// If we're shifting a 32-bit zero extended value left by 0-31 bits, use 2
// shifts instead of 3. This can occur when unsigned is used to index an array.
def : Pat<(shl (and GPR:$rs1, 0xffffffff), uimm5:$shamt),
(SRLI (SLLI GPR:$rs1, 32), (ImmSubFrom32 uimm5:$shamt))>;
// shl/and can appear in the other order too.
def : Pat<(SLLIUWPat GPR:$rs1, uimm5:$shamt),
(SRLI (SLLI GPR:$rs1, 32), (ImmSubFrom32 uimm5:$shamt))>;
}
let Predicates = [IsRV64] in {
/// sext and zext

View File

@ -46,19 +46,6 @@ def shfl_uimm : Operand<XLenVT>, ImmLeaf<XLenVT, [{
}
// Convert rotl immediate to a rotr immediate for XLen instructions.
def ImmROTL2R : SDNodeXForm<imm, [{
uint64_t XLen = Subtarget->getXLen();
return CurDAG->getTargetConstant(XLen - N->getZExtValue(), SDLoc(N),
N->getValueType(0));
}]>;
// Convert rotl immediate to a rotr immediate for W instructions.
def ImmROTL2RW : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(32 - N->getZExtValue(), SDLoc(N),
N->getValueType(0));
}]>;
// Check that it is a SLOI (Shift Left Ones Immediate).
def SLOIPat : PatFrag<(ops node:$A, node:$B),
(or (shl node:$A, node:$B), imm), [{
@ -77,13 +64,6 @@ def SROIWPat : PatFrag<(ops node:$A, node:$B),
return MatchSROIW(N);
}]>;
// Check that it is a SLLIUW (Shift Logical Left Immediate Unsigned i32
// on RV64).
def SLLIUWPat : PatFrag<(ops node:$A, node:$B),
(and (shl node:$A, node:$B), imm), [{
return MatchSLLIUW(N);
}]>;
// Checks if this mask has a single 0 bit and cannot be used with ANDI.
def BCLRMask : ImmLeaf<XLenVT, [{
if (Subtarget->is64Bit())
@ -743,7 +723,7 @@ let Predicates = [HasStdExtZbbOrZbp] in {
def : Pat<(rotr GPR:$rs1, uimmlog2xlen:$shamt),
(RORI GPR:$rs1, uimmlog2xlen:$shamt)>;
def : Pat<(rotl GPR:$rs1, uimmlog2xlen:$shamt),
(RORI GPR:$rs1, (ImmROTL2R uimmlog2xlen:$shamt))>;
(RORI GPR:$rs1, (ImmSubFromXLen uimmlog2xlen:$shamt))>;
}
def riscv_grevi : SDNode<"RISCVISD::GREVI", SDTIntBinOp, []>;
@ -829,7 +809,7 @@ def : Pat<(fshr GPR:$rs3, GPR:$rs1, uimmlog2xlen:$shamt),
// We can use FSRI for fshl by immediate if we subtract the immediate from
// XLen and swap the operands.
def : Pat<(fshl GPR:$rs3, GPR:$rs1, uimmlog2xlen:$shamt),
(FSRI GPR:$rs1, GPR:$rs3, (ImmROTL2R uimmlog2xlen:$shamt))>;
(FSRI GPR:$rs1, GPR:$rs3, (ImmSubFromXLen uimmlog2xlen:$shamt))>;
} // Predicates = [HasStdExtZbt]
let Predicates = [HasStdExtZbb] in {
@ -971,7 +951,7 @@ def : Pat<(riscv_rorw GPR:$rs1, GPR:$rs2),
def : Pat<(riscv_rorw GPR:$rs1, uimm5:$rs2),
(RORIW GPR:$rs1, uimm5:$rs2)>;
def : Pat<(riscv_rolw GPR:$rs1, uimm5:$rs2),
(RORIW GPR:$rs1, (ImmROTL2RW uimm5:$rs2))>;
(RORIW GPR:$rs1, (ImmSubFrom32 uimm5:$rs2))>;
} // Predicates = [HasStdExtZbbOrZbp, IsRV64]
let Predicates = [HasStdExtZbs, IsRV64] in {
@ -1024,7 +1004,7 @@ def : Pat<(riscv_fsrw GPR:$rs3, GPR:$rs1, GPR:$rs2),
def : Pat<(riscv_fsrw GPR:$rs3, GPR:$rs1, uimm5:$shamt),
(FSRIW GPR:$rs1, GPR:$rs3, uimm5:$shamt)>;
def : Pat<(riscv_fslw GPR:$rs3, GPR:$rs1, uimm5:$shamt),
(FSRIW GPR:$rs1, GPR:$rs3, (ImmROTL2RW uimm5:$shamt))>;
(FSRIW GPR:$rs1, GPR:$rs3, (ImmSubFrom32 uimm5:$shamt))>;
} // Predicates = [HasStdExtZbt, IsRV64]
let Predicates = [HasStdExtZbb, IsRV64] in {

View File

@ -9,11 +9,8 @@
define i64 @slliuw(i64 %a) nounwind {
; RV64I-LABEL: slliuw:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a0, a0, 1
; RV64I-NEXT: addi a1, zero, 1
; RV64I-NEXT: slli a1, a1, 33
; RV64I-NEXT: addi a1, a1, -2
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: slli a0, a0, 32
; RV64I-NEXT: srli a0, a0, 31
; RV64I-NEXT: ret
;
; RV64IB-LABEL: slliuw:
@ -34,8 +31,7 @@ define i128 @slliuw_2(i32 signext %0, i128* %1) {
; RV64I-LABEL: slliuw_2:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a0, a0, 32
; RV64I-NEXT: srli a0, a0, 32
; RV64I-NEXT: slli a0, a0, 4
; RV64I-NEXT: srli a0, a0, 28
; RV64I-NEXT: add a1, a1, a0
; RV64I-NEXT: ld a0, 0(a1)
; RV64I-NEXT: ld a1, 8(a1)
@ -206,8 +202,7 @@ define signext i16 @sh1adduw(i32 signext %0, i16* %1) {
; RV64I-LABEL: sh1adduw:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a0, a0, 32
; RV64I-NEXT: srli a0, a0, 32
; RV64I-NEXT: slli a0, a0, 1
; RV64I-NEXT: srli a0, a0, 31
; RV64I-NEXT: add a0, a1, a0
; RV64I-NEXT: lh a0, 0(a0)
; RV64I-NEXT: ret
@ -232,11 +227,8 @@ define signext i16 @sh1adduw(i32 signext %0, i16* %1) {
define i64 @sh1adduw_2(i64 %0, i64 %1) {
; RV64I-LABEL: sh1adduw_2:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a0, a0, 1
; RV64I-NEXT: addi a2, zero, 1
; RV64I-NEXT: slli a2, a2, 33
; RV64I-NEXT: addi a2, a2, -2
; RV64I-NEXT: and a0, a0, a2
; RV64I-NEXT: slli a0, a0, 32
; RV64I-NEXT: srli a0, a0, 31
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
@ -259,8 +251,7 @@ define signext i32 @sh2adduw(i32 signext %0, i32* %1) {
; RV64I-LABEL: sh2adduw:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a0, a0, 32
; RV64I-NEXT: srli a0, a0, 32
; RV64I-NEXT: slli a0, a0, 2
; RV64I-NEXT: srli a0, a0, 30
; RV64I-NEXT: add a0, a1, a0
; RV64I-NEXT: lw a0, 0(a0)
; RV64I-NEXT: ret
@ -285,11 +276,8 @@ define signext i32 @sh2adduw(i32 signext %0, i32* %1) {
define i64 @sh2adduw_2(i64 %0, i64 %1) {
; RV64I-LABEL: sh2adduw_2:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a0, a0, 2
; RV64I-NEXT: addi a2, zero, 1
; RV64I-NEXT: slli a2, a2, 34
; RV64I-NEXT: addi a2, a2, -4
; RV64I-NEXT: and a0, a0, a2
; RV64I-NEXT: slli a0, a0, 32
; RV64I-NEXT: srli a0, a0, 30
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;
@ -312,8 +300,7 @@ define i64 @sh3adduw(i32 signext %0, i64* %1) {
; RV64I-LABEL: sh3adduw:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a0, a0, 32
; RV64I-NEXT: srli a0, a0, 32
; RV64I-NEXT: slli a0, a0, 3
; RV64I-NEXT: srli a0, a0, 29
; RV64I-NEXT: add a0, a1, a0
; RV64I-NEXT: ld a0, 0(a0)
; RV64I-NEXT: ret
@ -338,11 +325,8 @@ define i64 @sh3adduw(i32 signext %0, i64* %1) {
define i64 @sh3adduw_2(i64 %0, i64 %1) {
; RV64I-LABEL: sh3adduw_2:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a0, a0, 3
; RV64I-NEXT: addi a2, zero, 1
; RV64I-NEXT: slli a2, a2, 35
; RV64I-NEXT: addi a2, a2, -8
; RV64I-NEXT: and a0, a0, a2
; RV64I-NEXT: slli a0, a0, 32
; RV64I-NEXT: srli a0, a0, 29
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ret
;