1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-19 02:52:53 +02:00

[AArch64][GlobalISel] Fix one use check in getTestBitReg

(1) The check needs to be on the 0th operand of whatever we're folding
(2) Checks for validity should happen before we change the bit

Fixes a bug which caused MultiSource/Applications/JM/lencod to fail at -O3.

Differential Revision: https://reviews.llvm.org/D74002
This commit is contained in:
Jessica Paquette 2020-02-04 15:10:53 -08:00
parent 657b413bfa
commit bdc86b5318
3 changed files with 55 additions and 5 deletions

View File

@ -996,6 +996,11 @@ static Register getTestBitReg(Register Reg, uint64_t &Bit, bool &Invert,
assert(Reg.isValid() && "Expected valid register!");
while (MachineInstr *MI = getDefIgnoringCopies(Reg, MRI)) {
unsigned Opc = MI->getOpcode();
if (!MI->getOperand(0).isReg() ||
!MRI.hasOneUse(MI->getOperand(0).getReg()))
break;
// (tbz (any_ext x), b) -> (tbz x, b) if we don't use the extended bits.
//
// (tbz (trunc x), b) -> (tbz x, b) is always safe, because the bit number
@ -1044,8 +1049,8 @@ static Register getTestBitReg(Register Reg, uint64_t &Bit, bool &Invert,
}
}
// Didn't find a constant. Bail out of the loop.
if (!C)
// Didn't find a constant or viable register. Bail out of the loop.
if (!C || !TestReg.isValid())
break;
// We found a suitable instruction with a constant. Check to see if we can
@ -1083,8 +1088,8 @@ static Register getTestBitReg(Register Reg, uint64_t &Bit, bool &Invert,
}
// Check if we found anything worth folding.
if (!NextReg.isValid() || !MRI.hasOneUse(NextReg))
break;
if (!NextReg.isValid())
return Reg;
Reg = NextReg;
}

View File

@ -114,7 +114,10 @@ body: |
; CHECK: %copy:gpr32 = COPY $w0
; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, %copy, %subreg.sub_32
; CHECK: %zext:gpr64 = UBFMXri [[SUBREG_TO_REG]], 0, 31
; CHECK: TBNZW %copy, 3, %bb.1
; CHECK: [[COPY:%[0-9]+]]:gpr64all = COPY %zext
; CHECK: [[COPY1:%[0-9]+]]:gpr32all = COPY [[COPY]].sub_32
; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY [[COPY1]]
; CHECK: TBNZW [[COPY2]], 3, %bb.1
; CHECK: B %bb.0
; CHECK: bb.1:
; CHECK: $x0 = COPY %zext

View File

@ -112,3 +112,45 @@ body: |
G_BR %bb.0
bb.1:
RET_ReallyLR
...
---
name: dont_fold_shl_3
alignment: 4
legalized: true
regBankSelected: true
body: |
; CHECK-LABEL: name: dont_fold_shl_3
; CHECK: bb.0:
; CHECK: successors: %bb.0(0x40000000), %bb.1(0x40000000)
; CHECK: %copy:gpr64 = COPY $x0
; CHECK: %shl:gpr64 = UBFMXri %copy, 62, 61
; CHECK: [[COPY:%[0-9]+]]:gpr64all = COPY %shl
; CHECK: [[COPY1:%[0-9]+]]:gpr32all = COPY [[COPY]].sub_32
; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY [[COPY1]]
; CHECK: TBNZW [[COPY2]], 3, %bb.1
; CHECK: B %bb.0
; CHECK: bb.1:
; CHECK: %second_use:gpr64sp = ORRXri %shl, 8000
; CHECK: $x0 = COPY %second_use
; CHECK: RET_ReallyLR implicit $x0
bb.0:
successors: %bb.0, %bb.1
liveins: $x0
%copy:gpr(s64) = COPY $x0
%bit:gpr(s64) = G_CONSTANT i64 8
%zero:gpr(s64) = G_CONSTANT i64 0
%fold_cst:gpr(s64) = G_CONSTANT i64 2
; Don't walk past the G_SHL when it's used more than once.
%shl:gpr(s64) = G_SHL %copy, %fold_cst
%and:gpr(s64) = G_AND %shl, %bit
%cmp:gpr(s32) = G_ICMP intpred(ne), %and(s64), %zero
%cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
G_BRCOND %cmp_trunc(s1), %bb.1
G_BR %bb.0
bb.1:
%second_use:gpr(s64) = G_OR %shl, %bit
$x0 = COPY %second_use
RET_ReallyLR implicit $x0