diff --git a/lib/Target/X86/X86FixupLEAs.cpp b/lib/Target/X86/X86FixupLEAs.cpp index 42427903892..fcc3e8f781a 100644 --- a/lib/Target/X86/X86FixupLEAs.cpp +++ b/lib/Target/X86/X86FixupLEAs.cpp @@ -376,7 +376,7 @@ bool FixupLEAPass::optTwoAddrLEA(MachineBasicBlock::iterator &I, const MachineOperand &Segment = MI.getOperand(1 + X86::AddrSegmentReg); if (Segment.getReg() != 0 || !Disp.isImm() || Scale.getImm() > 1 || - !TII->isSafeToClobberEFLAGS(MBB, I)) + !TII->isSafeToClobberEFLAGS(MBB, I, 10)) return false; Register DestReg = MI.getOperand(0).getReg(); diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index c753880fc92..b27959ad9bf 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -1127,7 +1127,7 @@ void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB, const MachineInstr &Orig, const TargetRegisterInfo &TRI) const { bool ClobbersEFLAGS = Orig.modifiesRegister(X86::EFLAGS, &TRI); - if (ClobbersEFLAGS && !isSafeToClobberEFLAGS(MBB, I)) { + if (ClobbersEFLAGS && !isSafeToClobberEFLAGS(MBB, I, 10)) { // The instruction clobbers EFLAGS. Re-materialize as MOV32ri to avoid side // effects. int Value; diff --git a/lib/Target/X86/X86InstrInfo.h b/lib/Target/X86/X86InstrInfo.h index c345a821716..60b7d51bf9e 100644 --- a/lib/Target/X86/X86InstrInfo.h +++ b/lib/Target/X86/X86InstrInfo.h @@ -442,8 +442,9 @@ public: /// conservative. If it cannot definitely determine the safety after visiting /// a few instructions in each direction it assumes it's not safe. bool isSafeToClobberEFLAGS(MachineBasicBlock &MBB, - MachineBasicBlock::iterator I) const { - return MBB.computeRegisterLiveness(&RI, X86::EFLAGS, I, 4) == + MachineBasicBlock::iterator I, + unsigned Neighborhood = 4) const { + return MBB.computeRegisterLiveness(&RI, X86::EFLAGS, I, Neighborhood) == MachineBasicBlock::LQR_Dead; } diff --git a/test/CodeGen/X86/optimize-max-0.ll b/test/CodeGen/X86/optimize-max-0.ll index e7f885625b7..5367f390d1c 100644 --- a/test/CodeGen/X86/optimize-max-0.ll +++ b/test/CodeGen/X86/optimize-max-0.ll @@ -85,7 +85,7 @@ define void @foo(i8* %r, i32 %s, i32 %w, i32 %x, i8* %j, i32 %d) nounwind { ; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax ## 4-byte Reload ; CHECK-NEXT: addl %ecx, %eax ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %esi -; CHECK-NEXT: leal 2(%esi), %esi +; CHECK-NEXT: addl $2, %esi ; CHECK-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Spill ; CHECK-NEXT: movl (%esp), %esi ## 4-byte Reload ; CHECK-NEXT: addl %esi, %ecx @@ -513,7 +513,7 @@ define void @bar(i8* %r, i32 %s, i32 %w, i32 %x, i8* %j, i32 %d) nounwind { ; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx ## 4-byte Reload ; CHECK-NEXT: addl %eax, %ecx ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx -; CHECK-NEXT: leal 2(%edx), %edx +; CHECK-NEXT: addl $2, %edx ; CHECK-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Spill ; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx ## 4-byte Reload ; CHECK-NEXT: addl %edx, %eax