1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-25 04:02:41 +01:00

[X86] Prevent folding loads with 64-bit ANDs with immediates that fit in 32-bits.

Prefer to use the 32-bit AND with immediate instead.

Primarily I'm doing this to ensure that immediates created by shrinkAndImmediate will always get absorbed into the AND. But I do believe this would be a reduction in the number of uops that need to execute. Ideally we should shrink the 'and' and the 'load' during DAG combine to re-enable the fold.

Fixes PR37063.

llvm-svn: 329667
This commit is contained in:
Craig Topper 2018-04-10 03:44:15 +00:00
parent 68365a2ff5
commit 124ae55576
2 changed files with 43 additions and 1 deletions

View File

@ -525,10 +525,21 @@ X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const {
// addl 4(%esp), %eax
// The former is 2 bytes shorter. In case where the increment is 1, then
// the saving can be 4 bytes (by using incl %eax).
if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1))
if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1)) {
if (Imm->getAPIntValue().isSignedIntN(8))
return false;
// If this is a 64-bit AND with an immediate that fits in 32-bits,
// prefer using the smaller and over folding the load. This is needed to
// make sure immediates created by shrinkAndImmediate are always folded.
// Ideally we would narrow the load during DAG combine and get the
// best of both worlds.
if (U->getOpcode() == ISD::AND &&
Imm->getAPIntValue().getBitWidth() == 64 &&
Imm->getAPIntValue().isIntN(32))
return false;
}
// If the other operand is a TLS address, we should fold it instead.
// This produces
// movl %gs:0, %eax

View File

@ -0,0 +1,31 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
declare void @bar()
define void @foo(i64*) {
; CHECK-LABEL: foo:
; CHECK: # %bb.0: # %start
; CHECK-NEXT: movq (%rdi), %rax
; CHECK-NEXT: andl $-2, %eax
; CHECK-NEXT: cmpq $4, %rax
; CHECK-NEXT: jne .LBB0_2
; CHECK-NEXT: # %bb.1: # %bb1
; CHECK-NEXT: retq
; CHECK-NEXT: .LBB0_2: # %bb2.i
; CHECK-NEXT: jmp bar # TAILCALL
start:
%1 = load i64, i64* %0, align 8, !range !0
%2 = and i64 %1, 6
%3 = icmp eq i64 %2, 4
br i1 %3, label %bb1, label %bb2.i
bb1: ; preds = %bb2.i, %start
ret void
bb2.i: ; preds = %start
tail call fastcc void @bar()
br label %bb1
}
!0 = !{i64 0, i64 6}