1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 11:13:28 +01:00
llvm-mirror/test/CodeGen/Mips/atomic64.ll

1372 lines
48 KiB
LLVM
Raw Normal View History

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=mips64el-unknown-linux-gnu --disable-machine-licm -mcpu=mips4 -relocation-model=pic -verify-machineinstrs < %s | \
; RUN: FileCheck %s -check-prefix=MIPS4
; RUN: llc -mtriple=mips64el-unknown-linux-gnu --disable-machine-licm -mcpu=mips64 -relocation-model=pic -verify-machineinstrs < %s | \
; RUN: FileCheck %s -check-prefix=MIPS64
; RUN: llc -mtriple=mips64el-unknown-linux-gnu --disable-machine-licm -mcpu=mips64r2 -relocation-model=pic -verify-machineinstrs < %s | \
; RUN: FileCheck %s -check-prefix=MIPS64R2
; RUN: llc -mtriple=mips64el-unknown-linux-gnu --disable-machine-licm -mcpu=mips64r6 -relocation-model=pic -verify-machineinstrs < %s | \
; RUN: FileCheck %s -check-prefix=MIPS64R6
; RUN: llc -mtriple=mips64-unknown-linux-gnu -O0 -mcpu=mips64r6 -relocation-model=pic -verify-machineinstrs -verify-machineinstrs < %s | \
; RUN: FileCheck %s -check-prefix=MIPS64R6O0
; We want to verify the produced code is well formed all optimization levels, the rest of the test which ensure correctness.
; RUN: llc -mtriple=mips64el-unknown-linux-gnu -O1 --disable-machine-licm -mcpu=mips64 -relocation-model=pic -verify-machineinstrs < %s | FileCheck %s --check-prefix=O1
; RUN: llc -mtriple=mips64el-unknown-linux-gnu -O2 --disable-machine-licm -mcpu=mips64 -relocation-model=pic -verify-machineinstrs < %s | FileCheck %s --check-prefix=O2
; RUN: llc -mtriple=mips64el-unknown-linux-gnu -O3 --disable-machine-licm -mcpu=mips64 -relocation-model=pic -verify-machineinstrs < %s | FileCheck %s --check-prefix=O3
; Keep one big-endian check so that we don't reduce testing, but don't add more
; since endianness doesn't affect the body of the atomic operations.
; RUN: llc -mtriple=mips64-unknown-linux-gnu --disable-machine-licm -mcpu=mips64 -relocation-model=pic -verify-machineinstrs < %s | \
; RUN: FileCheck %s -check-prefix=MIPS64EB
@x = common global i64 0, align 4
define i64 @AtomicLoadAdd(i64 signext %incr) nounwind {
; MIPS4-LABEL: AtomicLoadAdd:
; MIPS4: # %bb.0: # %entry
; MIPS4-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadAdd)))
; MIPS4-NEXT: daddu $1, $1, $25
; MIPS4-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadAdd)))
; MIPS4-NEXT: ld $1, %got_disp(x)($1)
; MIPS4-NEXT: .LBB0_1: # %entry
; MIPS4-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS4-NEXT: lld $2, 0($1)
; MIPS4-NEXT: daddu $3, $2, $4
; MIPS4-NEXT: scd $3, 0($1)
; MIPS4-NEXT: beqz $3, .LBB0_1
; MIPS4-NEXT: nop
; MIPS4-NEXT: # %bb.2: # %entry
; MIPS4-NEXT: jr $ra
; MIPS4-NEXT: nop
;
; MIPS64-LABEL: AtomicLoadAdd:
; MIPS64: # %bb.0: # %entry
; MIPS64-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadAdd)))
; MIPS64-NEXT: daddu $1, $1, $25
; MIPS64-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadAdd)))
; MIPS64-NEXT: ld $1, %got_disp(x)($1)
; MIPS64-NEXT: .LBB0_1: # %entry
; MIPS64-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64-NEXT: lld $2, 0($1)
; MIPS64-NEXT: daddu $3, $2, $4
; MIPS64-NEXT: scd $3, 0($1)
; MIPS64-NEXT: beqz $3, .LBB0_1
; MIPS64-NEXT: nop
; MIPS64-NEXT: # %bb.2: # %entry
; MIPS64-NEXT: jr $ra
; MIPS64-NEXT: nop
;
; MIPS64R2-LABEL: AtomicLoadAdd:
; MIPS64R2: # %bb.0: # %entry
; MIPS64R2-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadAdd)))
; MIPS64R2-NEXT: daddu $1, $1, $25
; MIPS64R2-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadAdd)))
; MIPS64R2-NEXT: ld $1, %got_disp(x)($1)
; MIPS64R2-NEXT: .LBB0_1: # %entry
; MIPS64R2-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64R2-NEXT: lld $2, 0($1)
; MIPS64R2-NEXT: daddu $3, $2, $4
; MIPS64R2-NEXT: scd $3, 0($1)
; MIPS64R2-NEXT: beqz $3, .LBB0_1
; MIPS64R2-NEXT: nop
; MIPS64R2-NEXT: # %bb.2: # %entry
; MIPS64R2-NEXT: jr $ra
; MIPS64R2-NEXT: nop
;
; MIPS64R6-LABEL: AtomicLoadAdd:
; MIPS64R6: # %bb.0: # %entry
; MIPS64R6-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadAdd)))
; MIPS64R6-NEXT: daddu $1, $1, $25
; MIPS64R6-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadAdd)))
; MIPS64R6-NEXT: ld $1, %got_disp(x)($1)
; MIPS64R6-NEXT: .LBB0_1: # %entry
; MIPS64R6-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64R6-NEXT: lld $2, 0($1)
; MIPS64R6-NEXT: daddu $3, $2, $4
; MIPS64R6-NEXT: scd $3, 0($1)
; MIPS64R6-NEXT: beqzc $3, .LBB0_1
; MIPS64R6-NEXT: nop
; MIPS64R6-NEXT: # %bb.2: # %entry
; MIPS64R6-NEXT: jrc $ra
;
; MIPS64R6O0-LABEL: AtomicLoadAdd:
; MIPS64R6O0: # %bb.0: # %entry
; MIPS64R6O0-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadAdd)))
; MIPS64R6O0-NEXT: daddu $1, $1, $25
; MIPS64R6O0-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadAdd)))
; MIPS64R6O0-NEXT: ld $1, %got_disp(x)($1)
; MIPS64R6O0-NEXT: .LBB0_1: # %entry
; MIPS64R6O0-NEXT: # =>This Inner Loop Header: Depth=1
[FastISel] Skip creating unnecessary vregs for arguments This behavior was added in r130928 for both FastISel and SD, and then disabled in r131156 for FastISel. This re-enables it for FastISel with the corresponding fix. This is triggered only when FastISel can't lower the arguments and falls back to SelectionDAG for it. FastISel contains a map of "register fixups" where at the end of the selection phase it replaces all uses of a register with another register that FastISel sometimes pre-assigned. Code at the end of SelectionDAGISel::runOnMachineFunction is doing the replacement at the very end of the function, while other pieces that come in before that look through the MachineFunction and assume everything is done. In this case, the real issue is that the code emitting COPY instructions for the liveins (physreg to vreg) (EmitLiveInCopies) is checking if the vreg assigned to the physreg is used, and if it's not, it will skip the COPY. If a register wasn't replaced with its assigned fixup yet, the copy will be skipped and we'll end up with uses of undefined registers. This fix moves the replacement of registers before the emission of copies for the live-ins. The initial motivation for this fix is to enable tail calls for swiftself functions, which were blocked because we couldn't prove that the swiftself argument (which is callee-save) comes from a function argument (live-in), because there was an extra copy (vreg to vreg). A few tests are affected by this: * llvm/test/CodeGen/AArch64/swifterror.ll: we used to spill x21 (callee-save) but never reload it because it's attached to the return. We now don't even spill it anymore. * llvm/test/CodeGen/*/swiftself.ll: we tail-call now. * llvm/test/CodeGen/AMDGPU/mubuf-legalize-operands.ll: I believe this test was not really testing the right thing, but it worked because the same registers were re-used. * llvm/test/CodeGen/ARM/cmpxchg-O0.ll: regalloc changes * llvm/test/CodeGen/ARM/swifterror.ll: get rid of a copy * llvm/test/CodeGen/Mips/*: get rid of spills and copies * llvm/test/CodeGen/SystemZ/swift-return.ll: smaller stack * llvm/test/CodeGen/X86/atomic-unordered.ll: smaller stack * llvm/test/CodeGen/X86/swifterror.ll: same as AArch64 * llvm/test/DebugInfo/X86/dbg-declare-arg.ll: stack size changed Differential Revision: https://reviews.llvm.org/D62361 llvm-svn: 362963
2019-06-10 18:53:37 +02:00
; MIPS64R6O0-NEXT: lld $2, 0($1)
; MIPS64R6O0-NEXT: daddu $3, $2, $4
; MIPS64R6O0-NEXT: scd $3, 0($1)
; MIPS64R6O0-NEXT: beqzc $3, .LBB0_1
; MIPS64R6O0-NEXT: nop
; MIPS64R6O0-NEXT: # %bb.2: # %entry
; MIPS64R6O0-NEXT: jrc $ra
;
; O1-LABEL: AtomicLoadAdd:
; O1: # %bb.0: # %entry
; O1-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadAdd)))
; O1-NEXT: daddu $1, $1, $25
; O1-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadAdd)))
; O1-NEXT: ld $1, %got_disp(x)($1)
; O1-NEXT: .LBB0_1: # %entry
; O1-NEXT: # =>This Inner Loop Header: Depth=1
; O1-NEXT: lld $2, 0($1)
; O1-NEXT: daddu $3, $2, $4
; O1-NEXT: scd $3, 0($1)
; O1-NEXT: beqz $3, .LBB0_1
; O1-NEXT: nop
; O1-NEXT: # %bb.2: # %entry
; O1-NEXT: jr $ra
; O1-NEXT: nop
;
; O2-LABEL: AtomicLoadAdd:
; O2: # %bb.0: # %entry
; O2-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadAdd)))
; O2-NEXT: daddu $1, $1, $25
; O2-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadAdd)))
; O2-NEXT: ld $1, %got_disp(x)($1)
; O2-NEXT: .LBB0_1: # %entry
; O2-NEXT: # =>This Inner Loop Header: Depth=1
; O2-NEXT: lld $2, 0($1)
; O2-NEXT: daddu $3, $2, $4
; O2-NEXT: scd $3, 0($1)
; O2-NEXT: beqz $3, .LBB0_1
; O2-NEXT: nop
; O2-NEXT: # %bb.2: # %entry
; O2-NEXT: jr $ra
; O2-NEXT: nop
;
; O3-LABEL: AtomicLoadAdd:
; O3: # %bb.0: # %entry
; O3-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadAdd)))
; O3-NEXT: daddu $1, $1, $25
; O3-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadAdd)))
; O3-NEXT: ld $1, %got_disp(x)($1)
; O3-NEXT: .LBB0_1: # %entry
; O3-NEXT: # =>This Inner Loop Header: Depth=1
; O3-NEXT: lld $2, 0($1)
; O3-NEXT: daddu $3, $2, $4
; O3-NEXT: scd $3, 0($1)
; O3-NEXT: beqz $3, .LBB0_1
; O3-NEXT: nop
; O3-NEXT: # %bb.2: # %entry
; O3-NEXT: jr $ra
; O3-NEXT: nop
;
; MIPS64EB-LABEL: AtomicLoadAdd:
; MIPS64EB: # %bb.0: # %entry
; MIPS64EB-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadAdd)))
; MIPS64EB-NEXT: daddu $1, $1, $25
; MIPS64EB-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadAdd)))
; MIPS64EB-NEXT: ld $1, %got_disp(x)($1)
; MIPS64EB-NEXT: .LBB0_1: # %entry
; MIPS64EB-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64EB-NEXT: lld $2, 0($1)
; MIPS64EB-NEXT: daddu $3, $2, $4
; MIPS64EB-NEXT: scd $3, 0($1)
; MIPS64EB-NEXT: beqz $3, .LBB0_1
; MIPS64EB-NEXT: nop
; MIPS64EB-NEXT: # %bb.2: # %entry
; MIPS64EB-NEXT: jr $ra
; MIPS64EB-NEXT: nop
entry:
%0 = atomicrmw add i64* @x, i64 %incr monotonic
ret i64 %0
}
define i64 @AtomicLoadSub(i64 signext %incr) nounwind {
; MIPS4-LABEL: AtomicLoadSub:
; MIPS4: # %bb.0: # %entry
; MIPS4-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadSub)))
; MIPS4-NEXT: daddu $1, $1, $25
; MIPS4-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadSub)))
; MIPS4-NEXT: ld $1, %got_disp(x)($1)
; MIPS4-NEXT: .LBB1_1: # %entry
; MIPS4-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS4-NEXT: lld $2, 0($1)
; MIPS4-NEXT: dsubu $3, $2, $4
; MIPS4-NEXT: scd $3, 0($1)
; MIPS4-NEXT: beqz $3, .LBB1_1
; MIPS4-NEXT: nop
; MIPS4-NEXT: # %bb.2: # %entry
; MIPS4-NEXT: jr $ra
; MIPS4-NEXT: nop
;
; MIPS64-LABEL: AtomicLoadSub:
; MIPS64: # %bb.0: # %entry
; MIPS64-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadSub)))
; MIPS64-NEXT: daddu $1, $1, $25
; MIPS64-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadSub)))
; MIPS64-NEXT: ld $1, %got_disp(x)($1)
; MIPS64-NEXT: .LBB1_1: # %entry
; MIPS64-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64-NEXT: lld $2, 0($1)
; MIPS64-NEXT: dsubu $3, $2, $4
; MIPS64-NEXT: scd $3, 0($1)
; MIPS64-NEXT: beqz $3, .LBB1_1
; MIPS64-NEXT: nop
; MIPS64-NEXT: # %bb.2: # %entry
; MIPS64-NEXT: jr $ra
; MIPS64-NEXT: nop
;
; MIPS64R2-LABEL: AtomicLoadSub:
; MIPS64R2: # %bb.0: # %entry
; MIPS64R2-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadSub)))
; MIPS64R2-NEXT: daddu $1, $1, $25
; MIPS64R2-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadSub)))
; MIPS64R2-NEXT: ld $1, %got_disp(x)($1)
; MIPS64R2-NEXT: .LBB1_1: # %entry
; MIPS64R2-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64R2-NEXT: lld $2, 0($1)
; MIPS64R2-NEXT: dsubu $3, $2, $4
; MIPS64R2-NEXT: scd $3, 0($1)
; MIPS64R2-NEXT: beqz $3, .LBB1_1
; MIPS64R2-NEXT: nop
; MIPS64R2-NEXT: # %bb.2: # %entry
; MIPS64R2-NEXT: jr $ra
; MIPS64R2-NEXT: nop
;
; MIPS64R6-LABEL: AtomicLoadSub:
; MIPS64R6: # %bb.0: # %entry
; MIPS64R6-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadSub)))
; MIPS64R6-NEXT: daddu $1, $1, $25
; MIPS64R6-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadSub)))
; MIPS64R6-NEXT: ld $1, %got_disp(x)($1)
; MIPS64R6-NEXT: .LBB1_1: # %entry
; MIPS64R6-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64R6-NEXT: lld $2, 0($1)
; MIPS64R6-NEXT: dsubu $3, $2, $4
; MIPS64R6-NEXT: scd $3, 0($1)
; MIPS64R6-NEXT: beqzc $3, .LBB1_1
; MIPS64R6-NEXT: nop
; MIPS64R6-NEXT: # %bb.2: # %entry
; MIPS64R6-NEXT: jrc $ra
;
; MIPS64R6O0-LABEL: AtomicLoadSub:
; MIPS64R6O0: # %bb.0: # %entry
; MIPS64R6O0-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadSub)))
; MIPS64R6O0-NEXT: daddu $1, $1, $25
; MIPS64R6O0-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadSub)))
; MIPS64R6O0-NEXT: ld $1, %got_disp(x)($1)
; MIPS64R6O0-NEXT: .LBB1_1: # %entry
; MIPS64R6O0-NEXT: # =>This Inner Loop Header: Depth=1
[FastISel] Skip creating unnecessary vregs for arguments This behavior was added in r130928 for both FastISel and SD, and then disabled in r131156 for FastISel. This re-enables it for FastISel with the corresponding fix. This is triggered only when FastISel can't lower the arguments and falls back to SelectionDAG for it. FastISel contains a map of "register fixups" where at the end of the selection phase it replaces all uses of a register with another register that FastISel sometimes pre-assigned. Code at the end of SelectionDAGISel::runOnMachineFunction is doing the replacement at the very end of the function, while other pieces that come in before that look through the MachineFunction and assume everything is done. In this case, the real issue is that the code emitting COPY instructions for the liveins (physreg to vreg) (EmitLiveInCopies) is checking if the vreg assigned to the physreg is used, and if it's not, it will skip the COPY. If a register wasn't replaced with its assigned fixup yet, the copy will be skipped and we'll end up with uses of undefined registers. This fix moves the replacement of registers before the emission of copies for the live-ins. The initial motivation for this fix is to enable tail calls for swiftself functions, which were blocked because we couldn't prove that the swiftself argument (which is callee-save) comes from a function argument (live-in), because there was an extra copy (vreg to vreg). A few tests are affected by this: * llvm/test/CodeGen/AArch64/swifterror.ll: we used to spill x21 (callee-save) but never reload it because it's attached to the return. We now don't even spill it anymore. * llvm/test/CodeGen/*/swiftself.ll: we tail-call now. * llvm/test/CodeGen/AMDGPU/mubuf-legalize-operands.ll: I believe this test was not really testing the right thing, but it worked because the same registers were re-used. * llvm/test/CodeGen/ARM/cmpxchg-O0.ll: regalloc changes * llvm/test/CodeGen/ARM/swifterror.ll: get rid of a copy * llvm/test/CodeGen/Mips/*: get rid of spills and copies * llvm/test/CodeGen/SystemZ/swift-return.ll: smaller stack * llvm/test/CodeGen/X86/atomic-unordered.ll: smaller stack * llvm/test/CodeGen/X86/swifterror.ll: same as AArch64 * llvm/test/DebugInfo/X86/dbg-declare-arg.ll: stack size changed Differential Revision: https://reviews.llvm.org/D62361 llvm-svn: 362963
2019-06-10 18:53:37 +02:00
; MIPS64R6O0-NEXT: lld $2, 0($1)
; MIPS64R6O0-NEXT: dsubu $3, $2, $4
; MIPS64R6O0-NEXT: scd $3, 0($1)
; MIPS64R6O0-NEXT: beqzc $3, .LBB1_1
; MIPS64R6O0-NEXT: nop
; MIPS64R6O0-NEXT: # %bb.2: # %entry
; MIPS64R6O0-NEXT: jrc $ra
;
; O1-LABEL: AtomicLoadSub:
; O1: # %bb.0: # %entry
; O1-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadSub)))
; O1-NEXT: daddu $1, $1, $25
; O1-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadSub)))
; O1-NEXT: ld $1, %got_disp(x)($1)
; O1-NEXT: .LBB1_1: # %entry
; O1-NEXT: # =>This Inner Loop Header: Depth=1
; O1-NEXT: lld $2, 0($1)
; O1-NEXT: dsubu $3, $2, $4
; O1-NEXT: scd $3, 0($1)
; O1-NEXT: beqz $3, .LBB1_1
; O1-NEXT: nop
; O1-NEXT: # %bb.2: # %entry
; O1-NEXT: jr $ra
; O1-NEXT: nop
;
; O2-LABEL: AtomicLoadSub:
; O2: # %bb.0: # %entry
; O2-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadSub)))
; O2-NEXT: daddu $1, $1, $25
; O2-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadSub)))
; O2-NEXT: ld $1, %got_disp(x)($1)
; O2-NEXT: .LBB1_1: # %entry
; O2-NEXT: # =>This Inner Loop Header: Depth=1
; O2-NEXT: lld $2, 0($1)
; O2-NEXT: dsubu $3, $2, $4
; O2-NEXT: scd $3, 0($1)
; O2-NEXT: beqz $3, .LBB1_1
; O2-NEXT: nop
; O2-NEXT: # %bb.2: # %entry
; O2-NEXT: jr $ra
; O2-NEXT: nop
;
; O3-LABEL: AtomicLoadSub:
; O3: # %bb.0: # %entry
; O3-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadSub)))
; O3-NEXT: daddu $1, $1, $25
; O3-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadSub)))
; O3-NEXT: ld $1, %got_disp(x)($1)
; O3-NEXT: .LBB1_1: # %entry
; O3-NEXT: # =>This Inner Loop Header: Depth=1
; O3-NEXT: lld $2, 0($1)
; O3-NEXT: dsubu $3, $2, $4
; O3-NEXT: scd $3, 0($1)
; O3-NEXT: beqz $3, .LBB1_1
; O3-NEXT: nop
; O3-NEXT: # %bb.2: # %entry
; O3-NEXT: jr $ra
; O3-NEXT: nop
;
; MIPS64EB-LABEL: AtomicLoadSub:
; MIPS64EB: # %bb.0: # %entry
; MIPS64EB-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadSub)))
; MIPS64EB-NEXT: daddu $1, $1, $25
; MIPS64EB-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadSub)))
; MIPS64EB-NEXT: ld $1, %got_disp(x)($1)
; MIPS64EB-NEXT: .LBB1_1: # %entry
; MIPS64EB-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64EB-NEXT: lld $2, 0($1)
; MIPS64EB-NEXT: dsubu $3, $2, $4
; MIPS64EB-NEXT: scd $3, 0($1)
; MIPS64EB-NEXT: beqz $3, .LBB1_1
; MIPS64EB-NEXT: nop
; MIPS64EB-NEXT: # %bb.2: # %entry
; MIPS64EB-NEXT: jr $ra
; MIPS64EB-NEXT: nop
entry:
%0 = atomicrmw sub i64* @x, i64 %incr monotonic
ret i64 %0
}
define i64 @AtomicLoadAnd(i64 signext %incr) nounwind {
; MIPS4-LABEL: AtomicLoadAnd:
; MIPS4: # %bb.0: # %entry
; MIPS4-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadAnd)))
; MIPS4-NEXT: daddu $1, $1, $25
; MIPS4-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadAnd)))
; MIPS4-NEXT: ld $1, %got_disp(x)($1)
; MIPS4-NEXT: .LBB2_1: # %entry
; MIPS4-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS4-NEXT: lld $2, 0($1)
; MIPS4-NEXT: and $3, $2, $4
; MIPS4-NEXT: scd $3, 0($1)
; MIPS4-NEXT: beqz $3, .LBB2_1
; MIPS4-NEXT: nop
; MIPS4-NEXT: # %bb.2: # %entry
; MIPS4-NEXT: jr $ra
; MIPS4-NEXT: nop
;
; MIPS64-LABEL: AtomicLoadAnd:
; MIPS64: # %bb.0: # %entry
; MIPS64-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadAnd)))
; MIPS64-NEXT: daddu $1, $1, $25
; MIPS64-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadAnd)))
; MIPS64-NEXT: ld $1, %got_disp(x)($1)
; MIPS64-NEXT: .LBB2_1: # %entry
; MIPS64-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64-NEXT: lld $2, 0($1)
; MIPS64-NEXT: and $3, $2, $4
; MIPS64-NEXT: scd $3, 0($1)
; MIPS64-NEXT: beqz $3, .LBB2_1
; MIPS64-NEXT: nop
; MIPS64-NEXT: # %bb.2: # %entry
; MIPS64-NEXT: jr $ra
; MIPS64-NEXT: nop
;
; MIPS64R2-LABEL: AtomicLoadAnd:
; MIPS64R2: # %bb.0: # %entry
; MIPS64R2-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadAnd)))
; MIPS64R2-NEXT: daddu $1, $1, $25
; MIPS64R2-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadAnd)))
; MIPS64R2-NEXT: ld $1, %got_disp(x)($1)
; MIPS64R2-NEXT: .LBB2_1: # %entry
; MIPS64R2-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64R2-NEXT: lld $2, 0($1)
; MIPS64R2-NEXT: and $3, $2, $4
; MIPS64R2-NEXT: scd $3, 0($1)
; MIPS64R2-NEXT: beqz $3, .LBB2_1
; MIPS64R2-NEXT: nop
; MIPS64R2-NEXT: # %bb.2: # %entry
; MIPS64R2-NEXT: jr $ra
; MIPS64R2-NEXT: nop
;
; MIPS64R6-LABEL: AtomicLoadAnd:
; MIPS64R6: # %bb.0: # %entry
; MIPS64R6-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadAnd)))
; MIPS64R6-NEXT: daddu $1, $1, $25
; MIPS64R6-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadAnd)))
; MIPS64R6-NEXT: ld $1, %got_disp(x)($1)
; MIPS64R6-NEXT: .LBB2_1: # %entry
; MIPS64R6-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64R6-NEXT: lld $2, 0($1)
; MIPS64R6-NEXT: and $3, $2, $4
; MIPS64R6-NEXT: scd $3, 0($1)
; MIPS64R6-NEXT: beqzc $3, .LBB2_1
; MIPS64R6-NEXT: nop
; MIPS64R6-NEXT: # %bb.2: # %entry
; MIPS64R6-NEXT: jrc $ra
;
; MIPS64R6O0-LABEL: AtomicLoadAnd:
; MIPS64R6O0: # %bb.0: # %entry
; MIPS64R6O0-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadAnd)))
; MIPS64R6O0-NEXT: daddu $1, $1, $25
; MIPS64R6O0-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadAnd)))
; MIPS64R6O0-NEXT: ld $1, %got_disp(x)($1)
; MIPS64R6O0-NEXT: .LBB2_1: # %entry
; MIPS64R6O0-NEXT: # =>This Inner Loop Header: Depth=1
[FastISel] Skip creating unnecessary vregs for arguments This behavior was added in r130928 for both FastISel and SD, and then disabled in r131156 for FastISel. This re-enables it for FastISel with the corresponding fix. This is triggered only when FastISel can't lower the arguments and falls back to SelectionDAG for it. FastISel contains a map of "register fixups" where at the end of the selection phase it replaces all uses of a register with another register that FastISel sometimes pre-assigned. Code at the end of SelectionDAGISel::runOnMachineFunction is doing the replacement at the very end of the function, while other pieces that come in before that look through the MachineFunction and assume everything is done. In this case, the real issue is that the code emitting COPY instructions for the liveins (physreg to vreg) (EmitLiveInCopies) is checking if the vreg assigned to the physreg is used, and if it's not, it will skip the COPY. If a register wasn't replaced with its assigned fixup yet, the copy will be skipped and we'll end up with uses of undefined registers. This fix moves the replacement of registers before the emission of copies for the live-ins. The initial motivation for this fix is to enable tail calls for swiftself functions, which were blocked because we couldn't prove that the swiftself argument (which is callee-save) comes from a function argument (live-in), because there was an extra copy (vreg to vreg). A few tests are affected by this: * llvm/test/CodeGen/AArch64/swifterror.ll: we used to spill x21 (callee-save) but never reload it because it's attached to the return. We now don't even spill it anymore. * llvm/test/CodeGen/*/swiftself.ll: we tail-call now. * llvm/test/CodeGen/AMDGPU/mubuf-legalize-operands.ll: I believe this test was not really testing the right thing, but it worked because the same registers were re-used. * llvm/test/CodeGen/ARM/cmpxchg-O0.ll: regalloc changes * llvm/test/CodeGen/ARM/swifterror.ll: get rid of a copy * llvm/test/CodeGen/Mips/*: get rid of spills and copies * llvm/test/CodeGen/SystemZ/swift-return.ll: smaller stack * llvm/test/CodeGen/X86/atomic-unordered.ll: smaller stack * llvm/test/CodeGen/X86/swifterror.ll: same as AArch64 * llvm/test/DebugInfo/X86/dbg-declare-arg.ll: stack size changed Differential Revision: https://reviews.llvm.org/D62361 llvm-svn: 362963
2019-06-10 18:53:37 +02:00
; MIPS64R6O0-NEXT: lld $2, 0($1)
; MIPS64R6O0-NEXT: and $3, $2, $4
; MIPS64R6O0-NEXT: scd $3, 0($1)
; MIPS64R6O0-NEXT: beqzc $3, .LBB2_1
; MIPS64R6O0-NEXT: nop
; MIPS64R6O0-NEXT: # %bb.2: # %entry
; MIPS64R6O0-NEXT: jrc $ra
;
; O1-LABEL: AtomicLoadAnd:
; O1: # %bb.0: # %entry
; O1-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadAnd)))
; O1-NEXT: daddu $1, $1, $25
; O1-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadAnd)))
; O1-NEXT: ld $1, %got_disp(x)($1)
; O1-NEXT: .LBB2_1: # %entry
; O1-NEXT: # =>This Inner Loop Header: Depth=1
; O1-NEXT: lld $2, 0($1)
; O1-NEXT: and $3, $2, $4
; O1-NEXT: scd $3, 0($1)
; O1-NEXT: beqz $3, .LBB2_1
; O1-NEXT: nop
; O1-NEXT: # %bb.2: # %entry
; O1-NEXT: jr $ra
; O1-NEXT: nop
;
; O2-LABEL: AtomicLoadAnd:
; O2: # %bb.0: # %entry
; O2-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadAnd)))
; O2-NEXT: daddu $1, $1, $25
; O2-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadAnd)))
; O2-NEXT: ld $1, %got_disp(x)($1)
; O2-NEXT: .LBB2_1: # %entry
; O2-NEXT: # =>This Inner Loop Header: Depth=1
; O2-NEXT: lld $2, 0($1)
; O2-NEXT: and $3, $2, $4
; O2-NEXT: scd $3, 0($1)
; O2-NEXT: beqz $3, .LBB2_1
; O2-NEXT: nop
; O2-NEXT: # %bb.2: # %entry
; O2-NEXT: jr $ra
; O2-NEXT: nop
;
; O3-LABEL: AtomicLoadAnd:
; O3: # %bb.0: # %entry
; O3-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadAnd)))
; O3-NEXT: daddu $1, $1, $25
; O3-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadAnd)))
; O3-NEXT: ld $1, %got_disp(x)($1)
; O3-NEXT: .LBB2_1: # %entry
; O3-NEXT: # =>This Inner Loop Header: Depth=1
; O3-NEXT: lld $2, 0($1)
; O3-NEXT: and $3, $2, $4
; O3-NEXT: scd $3, 0($1)
; O3-NEXT: beqz $3, .LBB2_1
; O3-NEXT: nop
; O3-NEXT: # %bb.2: # %entry
; O3-NEXT: jr $ra
; O3-NEXT: nop
;
; MIPS64EB-LABEL: AtomicLoadAnd:
; MIPS64EB: # %bb.0: # %entry
; MIPS64EB-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadAnd)))
; MIPS64EB-NEXT: daddu $1, $1, $25
; MIPS64EB-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadAnd)))
; MIPS64EB-NEXT: ld $1, %got_disp(x)($1)
; MIPS64EB-NEXT: .LBB2_1: # %entry
; MIPS64EB-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64EB-NEXT: lld $2, 0($1)
; MIPS64EB-NEXT: and $3, $2, $4
; MIPS64EB-NEXT: scd $3, 0($1)
; MIPS64EB-NEXT: beqz $3, .LBB2_1
; MIPS64EB-NEXT: nop
; MIPS64EB-NEXT: # %bb.2: # %entry
; MIPS64EB-NEXT: jr $ra
; MIPS64EB-NEXT: nop
entry:
%0 = atomicrmw and i64* @x, i64 %incr monotonic
ret i64 %0
}
define i64 @AtomicLoadOr(i64 signext %incr) nounwind {
; MIPS4-LABEL: AtomicLoadOr:
; MIPS4: # %bb.0: # %entry
; MIPS4-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadOr)))
; MIPS4-NEXT: daddu $1, $1, $25
; MIPS4-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadOr)))
; MIPS4-NEXT: ld $1, %got_disp(x)($1)
; MIPS4-NEXT: .LBB3_1: # %entry
; MIPS4-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS4-NEXT: lld $2, 0($1)
; MIPS4-NEXT: or $3, $2, $4
; MIPS4-NEXT: scd $3, 0($1)
; MIPS4-NEXT: beqz $3, .LBB3_1
; MIPS4-NEXT: nop
; MIPS4-NEXT: # %bb.2: # %entry
; MIPS4-NEXT: jr $ra
; MIPS4-NEXT: nop
;
; MIPS64-LABEL: AtomicLoadOr:
; MIPS64: # %bb.0: # %entry
; MIPS64-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadOr)))
; MIPS64-NEXT: daddu $1, $1, $25
; MIPS64-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadOr)))
; MIPS64-NEXT: ld $1, %got_disp(x)($1)
; MIPS64-NEXT: .LBB3_1: # %entry
; MIPS64-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64-NEXT: lld $2, 0($1)
; MIPS64-NEXT: or $3, $2, $4
; MIPS64-NEXT: scd $3, 0($1)
; MIPS64-NEXT: beqz $3, .LBB3_1
; MIPS64-NEXT: nop
; MIPS64-NEXT: # %bb.2: # %entry
; MIPS64-NEXT: jr $ra
; MIPS64-NEXT: nop
;
; MIPS64R2-LABEL: AtomicLoadOr:
; MIPS64R2: # %bb.0: # %entry
; MIPS64R2-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadOr)))
; MIPS64R2-NEXT: daddu $1, $1, $25
; MIPS64R2-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadOr)))
; MIPS64R2-NEXT: ld $1, %got_disp(x)($1)
; MIPS64R2-NEXT: .LBB3_1: # %entry
; MIPS64R2-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64R2-NEXT: lld $2, 0($1)
; MIPS64R2-NEXT: or $3, $2, $4
; MIPS64R2-NEXT: scd $3, 0($1)
; MIPS64R2-NEXT: beqz $3, .LBB3_1
; MIPS64R2-NEXT: nop
; MIPS64R2-NEXT: # %bb.2: # %entry
; MIPS64R2-NEXT: jr $ra
; MIPS64R2-NEXT: nop
;
; MIPS64R6-LABEL: AtomicLoadOr:
; MIPS64R6: # %bb.0: # %entry
; MIPS64R6-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadOr)))
; MIPS64R6-NEXT: daddu $1, $1, $25
; MIPS64R6-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadOr)))
; MIPS64R6-NEXT: ld $1, %got_disp(x)($1)
; MIPS64R6-NEXT: .LBB3_1: # %entry
; MIPS64R6-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64R6-NEXT: lld $2, 0($1)
; MIPS64R6-NEXT: or $3, $2, $4
; MIPS64R6-NEXT: scd $3, 0($1)
; MIPS64R6-NEXT: beqzc $3, .LBB3_1
; MIPS64R6-NEXT: nop
; MIPS64R6-NEXT: # %bb.2: # %entry
; MIPS64R6-NEXT: jrc $ra
;
; MIPS64R6O0-LABEL: AtomicLoadOr:
; MIPS64R6O0: # %bb.0: # %entry
; MIPS64R6O0-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadOr)))
; MIPS64R6O0-NEXT: daddu $1, $1, $25
; MIPS64R6O0-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadOr)))
; MIPS64R6O0-NEXT: ld $1, %got_disp(x)($1)
; MIPS64R6O0-NEXT: .LBB3_1: # %entry
; MIPS64R6O0-NEXT: # =>This Inner Loop Header: Depth=1
[FastISel] Skip creating unnecessary vregs for arguments This behavior was added in r130928 for both FastISel and SD, and then disabled in r131156 for FastISel. This re-enables it for FastISel with the corresponding fix. This is triggered only when FastISel can't lower the arguments and falls back to SelectionDAG for it. FastISel contains a map of "register fixups" where at the end of the selection phase it replaces all uses of a register with another register that FastISel sometimes pre-assigned. Code at the end of SelectionDAGISel::runOnMachineFunction is doing the replacement at the very end of the function, while other pieces that come in before that look through the MachineFunction and assume everything is done. In this case, the real issue is that the code emitting COPY instructions for the liveins (physreg to vreg) (EmitLiveInCopies) is checking if the vreg assigned to the physreg is used, and if it's not, it will skip the COPY. If a register wasn't replaced with its assigned fixup yet, the copy will be skipped and we'll end up with uses of undefined registers. This fix moves the replacement of registers before the emission of copies for the live-ins. The initial motivation for this fix is to enable tail calls for swiftself functions, which were blocked because we couldn't prove that the swiftself argument (which is callee-save) comes from a function argument (live-in), because there was an extra copy (vreg to vreg). A few tests are affected by this: * llvm/test/CodeGen/AArch64/swifterror.ll: we used to spill x21 (callee-save) but never reload it because it's attached to the return. We now don't even spill it anymore. * llvm/test/CodeGen/*/swiftself.ll: we tail-call now. * llvm/test/CodeGen/AMDGPU/mubuf-legalize-operands.ll: I believe this test was not really testing the right thing, but it worked because the same registers were re-used. * llvm/test/CodeGen/ARM/cmpxchg-O0.ll: regalloc changes * llvm/test/CodeGen/ARM/swifterror.ll: get rid of a copy * llvm/test/CodeGen/Mips/*: get rid of spills and copies * llvm/test/CodeGen/SystemZ/swift-return.ll: smaller stack * llvm/test/CodeGen/X86/atomic-unordered.ll: smaller stack * llvm/test/CodeGen/X86/swifterror.ll: same as AArch64 * llvm/test/DebugInfo/X86/dbg-declare-arg.ll: stack size changed Differential Revision: https://reviews.llvm.org/D62361 llvm-svn: 362963
2019-06-10 18:53:37 +02:00
; MIPS64R6O0-NEXT: lld $2, 0($1)
; MIPS64R6O0-NEXT: or $3, $2, $4
; MIPS64R6O0-NEXT: scd $3, 0($1)
; MIPS64R6O0-NEXT: beqzc $3, .LBB3_1
; MIPS64R6O0-NEXT: nop
; MIPS64R6O0-NEXT: # %bb.2: # %entry
; MIPS64R6O0-NEXT: jrc $ra
;
; O1-LABEL: AtomicLoadOr:
; O1: # %bb.0: # %entry
; O1-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadOr)))
; O1-NEXT: daddu $1, $1, $25
; O1-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadOr)))
; O1-NEXT: ld $1, %got_disp(x)($1)
; O1-NEXT: .LBB3_1: # %entry
; O1-NEXT: # =>This Inner Loop Header: Depth=1
; O1-NEXT: lld $2, 0($1)
; O1-NEXT: or $3, $2, $4
; O1-NEXT: scd $3, 0($1)
; O1-NEXT: beqz $3, .LBB3_1
; O1-NEXT: nop
; O1-NEXT: # %bb.2: # %entry
; O1-NEXT: jr $ra
; O1-NEXT: nop
;
; O2-LABEL: AtomicLoadOr:
; O2: # %bb.0: # %entry
; O2-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadOr)))
; O2-NEXT: daddu $1, $1, $25
; O2-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadOr)))
; O2-NEXT: ld $1, %got_disp(x)($1)
; O2-NEXT: .LBB3_1: # %entry
; O2-NEXT: # =>This Inner Loop Header: Depth=1
; O2-NEXT: lld $2, 0($1)
; O2-NEXT: or $3, $2, $4
; O2-NEXT: scd $3, 0($1)
; O2-NEXT: beqz $3, .LBB3_1
; O2-NEXT: nop
; O2-NEXT: # %bb.2: # %entry
; O2-NEXT: jr $ra
; O2-NEXT: nop
;
; O3-LABEL: AtomicLoadOr:
; O3: # %bb.0: # %entry
; O3-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadOr)))
; O3-NEXT: daddu $1, $1, $25
; O3-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadOr)))
; O3-NEXT: ld $1, %got_disp(x)($1)
; O3-NEXT: .LBB3_1: # %entry
; O3-NEXT: # =>This Inner Loop Header: Depth=1
; O3-NEXT: lld $2, 0($1)
; O3-NEXT: or $3, $2, $4
; O3-NEXT: scd $3, 0($1)
; O3-NEXT: beqz $3, .LBB3_1
; O3-NEXT: nop
; O3-NEXT: # %bb.2: # %entry
; O3-NEXT: jr $ra
; O3-NEXT: nop
;
; MIPS64EB-LABEL: AtomicLoadOr:
; MIPS64EB: # %bb.0: # %entry
; MIPS64EB-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadOr)))
; MIPS64EB-NEXT: daddu $1, $1, $25
; MIPS64EB-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadOr)))
; MIPS64EB-NEXT: ld $1, %got_disp(x)($1)
; MIPS64EB-NEXT: .LBB3_1: # %entry
; MIPS64EB-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64EB-NEXT: lld $2, 0($1)
; MIPS64EB-NEXT: or $3, $2, $4
; MIPS64EB-NEXT: scd $3, 0($1)
; MIPS64EB-NEXT: beqz $3, .LBB3_1
; MIPS64EB-NEXT: nop
; MIPS64EB-NEXT: # %bb.2: # %entry
; MIPS64EB-NEXT: jr $ra
; MIPS64EB-NEXT: nop
entry:
%0 = atomicrmw or i64* @x, i64 %incr monotonic
ret i64 %0
}
define i64 @AtomicLoadXor(i64 signext %incr) nounwind {
; MIPS4-LABEL: AtomicLoadXor:
; MIPS4: # %bb.0: # %entry
; MIPS4-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadXor)))
; MIPS4-NEXT: daddu $1, $1, $25
; MIPS4-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadXor)))
; MIPS4-NEXT: ld $1, %got_disp(x)($1)
; MIPS4-NEXT: .LBB4_1: # %entry
; MIPS4-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS4-NEXT: lld $2, 0($1)
; MIPS4-NEXT: xor $3, $2, $4
; MIPS4-NEXT: scd $3, 0($1)
; MIPS4-NEXT: beqz $3, .LBB4_1
; MIPS4-NEXT: nop
; MIPS4-NEXT: # %bb.2: # %entry
; MIPS4-NEXT: jr $ra
; MIPS4-NEXT: nop
;
; MIPS64-LABEL: AtomicLoadXor:
; MIPS64: # %bb.0: # %entry
; MIPS64-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadXor)))
; MIPS64-NEXT: daddu $1, $1, $25
; MIPS64-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadXor)))
; MIPS64-NEXT: ld $1, %got_disp(x)($1)
; MIPS64-NEXT: .LBB4_1: # %entry
; MIPS64-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64-NEXT: lld $2, 0($1)
; MIPS64-NEXT: xor $3, $2, $4
; MIPS64-NEXT: scd $3, 0($1)
; MIPS64-NEXT: beqz $3, .LBB4_1
; MIPS64-NEXT: nop
; MIPS64-NEXT: # %bb.2: # %entry
; MIPS64-NEXT: jr $ra
; MIPS64-NEXT: nop
;
; MIPS64R2-LABEL: AtomicLoadXor:
; MIPS64R2: # %bb.0: # %entry
; MIPS64R2-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadXor)))
; MIPS64R2-NEXT: daddu $1, $1, $25
; MIPS64R2-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadXor)))
; MIPS64R2-NEXT: ld $1, %got_disp(x)($1)
; MIPS64R2-NEXT: .LBB4_1: # %entry
; MIPS64R2-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64R2-NEXT: lld $2, 0($1)
; MIPS64R2-NEXT: xor $3, $2, $4
; MIPS64R2-NEXT: scd $3, 0($1)
; MIPS64R2-NEXT: beqz $3, .LBB4_1
; MIPS64R2-NEXT: nop
; MIPS64R2-NEXT: # %bb.2: # %entry
; MIPS64R2-NEXT: jr $ra
; MIPS64R2-NEXT: nop
;
; MIPS64R6-LABEL: AtomicLoadXor:
; MIPS64R6: # %bb.0: # %entry
; MIPS64R6-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadXor)))
; MIPS64R6-NEXT: daddu $1, $1, $25
; MIPS64R6-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadXor)))
; MIPS64R6-NEXT: ld $1, %got_disp(x)($1)
; MIPS64R6-NEXT: .LBB4_1: # %entry
; MIPS64R6-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64R6-NEXT: lld $2, 0($1)
; MIPS64R6-NEXT: xor $3, $2, $4
; MIPS64R6-NEXT: scd $3, 0($1)
; MIPS64R6-NEXT: beqzc $3, .LBB4_1
; MIPS64R6-NEXT: nop
; MIPS64R6-NEXT: # %bb.2: # %entry
; MIPS64R6-NEXT: jrc $ra
;
; MIPS64R6O0-LABEL: AtomicLoadXor:
; MIPS64R6O0: # %bb.0: # %entry
; MIPS64R6O0-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadXor)))
; MIPS64R6O0-NEXT: daddu $1, $1, $25
; MIPS64R6O0-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadXor)))
; MIPS64R6O0-NEXT: ld $1, %got_disp(x)($1)
; MIPS64R6O0-NEXT: .LBB4_1: # %entry
; MIPS64R6O0-NEXT: # =>This Inner Loop Header: Depth=1
[FastISel] Skip creating unnecessary vregs for arguments This behavior was added in r130928 for both FastISel and SD, and then disabled in r131156 for FastISel. This re-enables it for FastISel with the corresponding fix. This is triggered only when FastISel can't lower the arguments and falls back to SelectionDAG for it. FastISel contains a map of "register fixups" where at the end of the selection phase it replaces all uses of a register with another register that FastISel sometimes pre-assigned. Code at the end of SelectionDAGISel::runOnMachineFunction is doing the replacement at the very end of the function, while other pieces that come in before that look through the MachineFunction and assume everything is done. In this case, the real issue is that the code emitting COPY instructions for the liveins (physreg to vreg) (EmitLiveInCopies) is checking if the vreg assigned to the physreg is used, and if it's not, it will skip the COPY. If a register wasn't replaced with its assigned fixup yet, the copy will be skipped and we'll end up with uses of undefined registers. This fix moves the replacement of registers before the emission of copies for the live-ins. The initial motivation for this fix is to enable tail calls for swiftself functions, which were blocked because we couldn't prove that the swiftself argument (which is callee-save) comes from a function argument (live-in), because there was an extra copy (vreg to vreg). A few tests are affected by this: * llvm/test/CodeGen/AArch64/swifterror.ll: we used to spill x21 (callee-save) but never reload it because it's attached to the return. We now don't even spill it anymore. * llvm/test/CodeGen/*/swiftself.ll: we tail-call now. * llvm/test/CodeGen/AMDGPU/mubuf-legalize-operands.ll: I believe this test was not really testing the right thing, but it worked because the same registers were re-used. * llvm/test/CodeGen/ARM/cmpxchg-O0.ll: regalloc changes * llvm/test/CodeGen/ARM/swifterror.ll: get rid of a copy * llvm/test/CodeGen/Mips/*: get rid of spills and copies * llvm/test/CodeGen/SystemZ/swift-return.ll: smaller stack * llvm/test/CodeGen/X86/atomic-unordered.ll: smaller stack * llvm/test/CodeGen/X86/swifterror.ll: same as AArch64 * llvm/test/DebugInfo/X86/dbg-declare-arg.ll: stack size changed Differential Revision: https://reviews.llvm.org/D62361 llvm-svn: 362963
2019-06-10 18:53:37 +02:00
; MIPS64R6O0-NEXT: lld $2, 0($1)
; MIPS64R6O0-NEXT: xor $3, $2, $4
; MIPS64R6O0-NEXT: scd $3, 0($1)
; MIPS64R6O0-NEXT: beqzc $3, .LBB4_1
; MIPS64R6O0-NEXT: nop
; MIPS64R6O0-NEXT: # %bb.2: # %entry
; MIPS64R6O0-NEXT: jrc $ra
;
; O1-LABEL: AtomicLoadXor:
; O1: # %bb.0: # %entry
; O1-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadXor)))
; O1-NEXT: daddu $1, $1, $25
; O1-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadXor)))
; O1-NEXT: ld $1, %got_disp(x)($1)
; O1-NEXT: .LBB4_1: # %entry
; O1-NEXT: # =>This Inner Loop Header: Depth=1
; O1-NEXT: lld $2, 0($1)
; O1-NEXT: xor $3, $2, $4
; O1-NEXT: scd $3, 0($1)
; O1-NEXT: beqz $3, .LBB4_1
; O1-NEXT: nop
; O1-NEXT: # %bb.2: # %entry
; O1-NEXT: jr $ra
; O1-NEXT: nop
;
; O2-LABEL: AtomicLoadXor:
; O2: # %bb.0: # %entry
; O2-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadXor)))
; O2-NEXT: daddu $1, $1, $25
; O2-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadXor)))
; O2-NEXT: ld $1, %got_disp(x)($1)
; O2-NEXT: .LBB4_1: # %entry
; O2-NEXT: # =>This Inner Loop Header: Depth=1
; O2-NEXT: lld $2, 0($1)
; O2-NEXT: xor $3, $2, $4
; O2-NEXT: scd $3, 0($1)
; O2-NEXT: beqz $3, .LBB4_1
; O2-NEXT: nop
; O2-NEXT: # %bb.2: # %entry
; O2-NEXT: jr $ra
; O2-NEXT: nop
;
; O3-LABEL: AtomicLoadXor:
; O3: # %bb.0: # %entry
; O3-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadXor)))
; O3-NEXT: daddu $1, $1, $25
; O3-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadXor)))
; O3-NEXT: ld $1, %got_disp(x)($1)
; O3-NEXT: .LBB4_1: # %entry
; O3-NEXT: # =>This Inner Loop Header: Depth=1
; O3-NEXT: lld $2, 0($1)
; O3-NEXT: xor $3, $2, $4
; O3-NEXT: scd $3, 0($1)
; O3-NEXT: beqz $3, .LBB4_1
; O3-NEXT: nop
; O3-NEXT: # %bb.2: # %entry
; O3-NEXT: jr $ra
; O3-NEXT: nop
;
; MIPS64EB-LABEL: AtomicLoadXor:
; MIPS64EB: # %bb.0: # %entry
; MIPS64EB-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadXor)))
; MIPS64EB-NEXT: daddu $1, $1, $25
; MIPS64EB-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadXor)))
; MIPS64EB-NEXT: ld $1, %got_disp(x)($1)
; MIPS64EB-NEXT: .LBB4_1: # %entry
; MIPS64EB-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64EB-NEXT: lld $2, 0($1)
; MIPS64EB-NEXT: xor $3, $2, $4
; MIPS64EB-NEXT: scd $3, 0($1)
; MIPS64EB-NEXT: beqz $3, .LBB4_1
; MIPS64EB-NEXT: nop
; MIPS64EB-NEXT: # %bb.2: # %entry
; MIPS64EB-NEXT: jr $ra
; MIPS64EB-NEXT: nop
entry:
%0 = atomicrmw xor i64* @x, i64 %incr monotonic
ret i64 %0
}
define i64 @AtomicLoadNand(i64 signext %incr) nounwind {
; MIPS4-LABEL: AtomicLoadNand:
; MIPS4: # %bb.0: # %entry
; MIPS4-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadNand)))
; MIPS4-NEXT: daddu $1, $1, $25
; MIPS4-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadNand)))
; MIPS4-NEXT: ld $1, %got_disp(x)($1)
; MIPS4-NEXT: .LBB5_1: # %entry
; MIPS4-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS4-NEXT: lld $2, 0($1)
; MIPS4-NEXT: and $3, $2, $4
; MIPS4-NEXT: nor $3, $zero, $3
; MIPS4-NEXT: scd $3, 0($1)
; MIPS4-NEXT: beqz $3, .LBB5_1
; MIPS4-NEXT: nop
; MIPS4-NEXT: # %bb.2: # %entry
; MIPS4-NEXT: jr $ra
; MIPS4-NEXT: nop
;
; MIPS64-LABEL: AtomicLoadNand:
; MIPS64: # %bb.0: # %entry
; MIPS64-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadNand)))
; MIPS64-NEXT: daddu $1, $1, $25
; MIPS64-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadNand)))
; MIPS64-NEXT: ld $1, %got_disp(x)($1)
; MIPS64-NEXT: .LBB5_1: # %entry
; MIPS64-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64-NEXT: lld $2, 0($1)
; MIPS64-NEXT: and $3, $2, $4
; MIPS64-NEXT: nor $3, $zero, $3
; MIPS64-NEXT: scd $3, 0($1)
; MIPS64-NEXT: beqz $3, .LBB5_1
; MIPS64-NEXT: nop
; MIPS64-NEXT: # %bb.2: # %entry
; MIPS64-NEXT: jr $ra
; MIPS64-NEXT: nop
;
; MIPS64R2-LABEL: AtomicLoadNand:
; MIPS64R2: # %bb.0: # %entry
; MIPS64R2-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadNand)))
; MIPS64R2-NEXT: daddu $1, $1, $25
; MIPS64R2-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadNand)))
; MIPS64R2-NEXT: ld $1, %got_disp(x)($1)
; MIPS64R2-NEXT: .LBB5_1: # %entry
; MIPS64R2-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64R2-NEXT: lld $2, 0($1)
; MIPS64R2-NEXT: and $3, $2, $4
; MIPS64R2-NEXT: nor $3, $zero, $3
; MIPS64R2-NEXT: scd $3, 0($1)
; MIPS64R2-NEXT: beqz $3, .LBB5_1
; MIPS64R2-NEXT: nop
; MIPS64R2-NEXT: # %bb.2: # %entry
; MIPS64R2-NEXT: jr $ra
; MIPS64R2-NEXT: nop
;
; MIPS64R6-LABEL: AtomicLoadNand:
; MIPS64R6: # %bb.0: # %entry
; MIPS64R6-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadNand)))
; MIPS64R6-NEXT: daddu $1, $1, $25
; MIPS64R6-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadNand)))
; MIPS64R6-NEXT: ld $1, %got_disp(x)($1)
; MIPS64R6-NEXT: .LBB5_1: # %entry
; MIPS64R6-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64R6-NEXT: lld $2, 0($1)
; MIPS64R6-NEXT: and $3, $2, $4
; MIPS64R6-NEXT: nor $3, $zero, $3
; MIPS64R6-NEXT: scd $3, 0($1)
; MIPS64R6-NEXT: beqzc $3, .LBB5_1
; MIPS64R6-NEXT: nop
; MIPS64R6-NEXT: # %bb.2: # %entry
; MIPS64R6-NEXT: jrc $ra
;
; MIPS64R6O0-LABEL: AtomicLoadNand:
; MIPS64R6O0: # %bb.0: # %entry
; MIPS64R6O0-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadNand)))
; MIPS64R6O0-NEXT: daddu $1, $1, $25
; MIPS64R6O0-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadNand)))
; MIPS64R6O0-NEXT: ld $1, %got_disp(x)($1)
; MIPS64R6O0-NEXT: .LBB5_1: # %entry
; MIPS64R6O0-NEXT: # =>This Inner Loop Header: Depth=1
[FastISel] Skip creating unnecessary vregs for arguments This behavior was added in r130928 for both FastISel and SD, and then disabled in r131156 for FastISel. This re-enables it for FastISel with the corresponding fix. This is triggered only when FastISel can't lower the arguments and falls back to SelectionDAG for it. FastISel contains a map of "register fixups" where at the end of the selection phase it replaces all uses of a register with another register that FastISel sometimes pre-assigned. Code at the end of SelectionDAGISel::runOnMachineFunction is doing the replacement at the very end of the function, while other pieces that come in before that look through the MachineFunction and assume everything is done. In this case, the real issue is that the code emitting COPY instructions for the liveins (physreg to vreg) (EmitLiveInCopies) is checking if the vreg assigned to the physreg is used, and if it's not, it will skip the COPY. If a register wasn't replaced with its assigned fixup yet, the copy will be skipped and we'll end up with uses of undefined registers. This fix moves the replacement of registers before the emission of copies for the live-ins. The initial motivation for this fix is to enable tail calls for swiftself functions, which were blocked because we couldn't prove that the swiftself argument (which is callee-save) comes from a function argument (live-in), because there was an extra copy (vreg to vreg). A few tests are affected by this: * llvm/test/CodeGen/AArch64/swifterror.ll: we used to spill x21 (callee-save) but never reload it because it's attached to the return. We now don't even spill it anymore. * llvm/test/CodeGen/*/swiftself.ll: we tail-call now. * llvm/test/CodeGen/AMDGPU/mubuf-legalize-operands.ll: I believe this test was not really testing the right thing, but it worked because the same registers were re-used. * llvm/test/CodeGen/ARM/cmpxchg-O0.ll: regalloc changes * llvm/test/CodeGen/ARM/swifterror.ll: get rid of a copy * llvm/test/CodeGen/Mips/*: get rid of spills and copies * llvm/test/CodeGen/SystemZ/swift-return.ll: smaller stack * llvm/test/CodeGen/X86/atomic-unordered.ll: smaller stack * llvm/test/CodeGen/X86/swifterror.ll: same as AArch64 * llvm/test/DebugInfo/X86/dbg-declare-arg.ll: stack size changed Differential Revision: https://reviews.llvm.org/D62361 llvm-svn: 362963
2019-06-10 18:53:37 +02:00
; MIPS64R6O0-NEXT: lld $2, 0($1)
; MIPS64R6O0-NEXT: and $3, $2, $4
; MIPS64R6O0-NEXT: nor $3, $zero, $3
; MIPS64R6O0-NEXT: scd $3, 0($1)
; MIPS64R6O0-NEXT: beqzc $3, .LBB5_1
; MIPS64R6O0-NEXT: nop
; MIPS64R6O0-NEXT: # %bb.2: # %entry
; MIPS64R6O0-NEXT: jrc $ra
;
; O1-LABEL: AtomicLoadNand:
; O1: # %bb.0: # %entry
; O1-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadNand)))
; O1-NEXT: daddu $1, $1, $25
; O1-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadNand)))
; O1-NEXT: ld $1, %got_disp(x)($1)
; O1-NEXT: .LBB5_1: # %entry
; O1-NEXT: # =>This Inner Loop Header: Depth=1
; O1-NEXT: lld $2, 0($1)
; O1-NEXT: and $3, $2, $4
; O1-NEXT: nor $3, $zero, $3
; O1-NEXT: scd $3, 0($1)
; O1-NEXT: beqz $3, .LBB5_1
; O1-NEXT: nop
; O1-NEXT: # %bb.2: # %entry
; O1-NEXT: jr $ra
; O1-NEXT: nop
;
; O2-LABEL: AtomicLoadNand:
; O2: # %bb.0: # %entry
; O2-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadNand)))
; O2-NEXT: daddu $1, $1, $25
; O2-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadNand)))
; O2-NEXT: ld $1, %got_disp(x)($1)
; O2-NEXT: .LBB5_1: # %entry
; O2-NEXT: # =>This Inner Loop Header: Depth=1
; O2-NEXT: lld $2, 0($1)
; O2-NEXT: and $3, $2, $4
; O2-NEXT: nor $3, $zero, $3
; O2-NEXT: scd $3, 0($1)
; O2-NEXT: beqz $3, .LBB5_1
; O2-NEXT: nop
; O2-NEXT: # %bb.2: # %entry
; O2-NEXT: jr $ra
; O2-NEXT: nop
;
; O3-LABEL: AtomicLoadNand:
; O3: # %bb.0: # %entry
; O3-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadNand)))
; O3-NEXT: daddu $1, $1, $25
; O3-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadNand)))
; O3-NEXT: ld $1, %got_disp(x)($1)
; O3-NEXT: .LBB5_1: # %entry
; O3-NEXT: # =>This Inner Loop Header: Depth=1
; O3-NEXT: lld $2, 0($1)
; O3-NEXT: and $3, $2, $4
; O3-NEXT: nor $3, $zero, $3
; O3-NEXT: scd $3, 0($1)
; O3-NEXT: beqz $3, .LBB5_1
; O3-NEXT: nop
; O3-NEXT: # %bb.2: # %entry
; O3-NEXT: jr $ra
; O3-NEXT: nop
;
; MIPS64EB-LABEL: AtomicLoadNand:
; MIPS64EB: # %bb.0: # %entry
; MIPS64EB-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadNand)))
; MIPS64EB-NEXT: daddu $1, $1, $25
; MIPS64EB-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadNand)))
; MIPS64EB-NEXT: ld $1, %got_disp(x)($1)
; MIPS64EB-NEXT: .LBB5_1: # %entry
; MIPS64EB-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64EB-NEXT: lld $2, 0($1)
; MIPS64EB-NEXT: and $3, $2, $4
; MIPS64EB-NEXT: nor $3, $zero, $3
; MIPS64EB-NEXT: scd $3, 0($1)
; MIPS64EB-NEXT: beqz $3, .LBB5_1
; MIPS64EB-NEXT: nop
; MIPS64EB-NEXT: # %bb.2: # %entry
; MIPS64EB-NEXT: jr $ra
; MIPS64EB-NEXT: nop
entry:
%0 = atomicrmw nand i64* @x, i64 %incr monotonic
ret i64 %0
}
define i64 @AtomicSwap64(i64 signext %newval) nounwind {
; MIPS4-LABEL: AtomicSwap64:
; MIPS4: # %bb.0: # %entry
; MIPS4-NEXT: daddiu $sp, $sp, -16
; MIPS4-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicSwap64)))
; MIPS4-NEXT: daddu $1, $1, $25
; MIPS4-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicSwap64)))
; MIPS4-NEXT: sd $4, 8($sp)
; MIPS4-NEXT: ld $1, %got_disp(x)($1)
; MIPS4-NEXT: .LBB6_1: # %entry
; MIPS4-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS4-NEXT: lld $2, 0($1)
; MIPS4-NEXT: move $3, $4
; MIPS4-NEXT: scd $3, 0($1)
; MIPS4-NEXT: beqz $3, .LBB6_1
; MIPS4-NEXT: nop
; MIPS4-NEXT: # %bb.2: # %entry
; MIPS4-NEXT: jr $ra
; MIPS4-NEXT: daddiu $sp, $sp, 16
;
; MIPS64-LABEL: AtomicSwap64:
; MIPS64: # %bb.0: # %entry
; MIPS64-NEXT: daddiu $sp, $sp, -16
; MIPS64-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicSwap64)))
; MIPS64-NEXT: daddu $1, $1, $25
; MIPS64-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicSwap64)))
; MIPS64-NEXT: sd $4, 8($sp)
; MIPS64-NEXT: ld $1, %got_disp(x)($1)
; MIPS64-NEXT: .LBB6_1: # %entry
; MIPS64-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64-NEXT: lld $2, 0($1)
; MIPS64-NEXT: move $3, $4
; MIPS64-NEXT: scd $3, 0($1)
; MIPS64-NEXT: beqz $3, .LBB6_1
; MIPS64-NEXT: nop
; MIPS64-NEXT: # %bb.2: # %entry
; MIPS64-NEXT: jr $ra
; MIPS64-NEXT: daddiu $sp, $sp, 16
;
; MIPS64R2-LABEL: AtomicSwap64:
; MIPS64R2: # %bb.0: # %entry
; MIPS64R2-NEXT: daddiu $sp, $sp, -16
; MIPS64R2-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicSwap64)))
; MIPS64R2-NEXT: daddu $1, $1, $25
; MIPS64R2-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicSwap64)))
; MIPS64R2-NEXT: sd $4, 8($sp)
; MIPS64R2-NEXT: ld $1, %got_disp(x)($1)
; MIPS64R2-NEXT: .LBB6_1: # %entry
; MIPS64R2-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64R2-NEXT: lld $2, 0($1)
; MIPS64R2-NEXT: move $3, $4
; MIPS64R2-NEXT: scd $3, 0($1)
; MIPS64R2-NEXT: beqz $3, .LBB6_1
; MIPS64R2-NEXT: nop
; MIPS64R2-NEXT: # %bb.2: # %entry
; MIPS64R2-NEXT: jr $ra
; MIPS64R2-NEXT: daddiu $sp, $sp, 16
;
; MIPS64R6-LABEL: AtomicSwap64:
; MIPS64R6: # %bb.0: # %entry
; MIPS64R6-NEXT: daddiu $sp, $sp, -16
; MIPS64R6-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicSwap64)))
; MIPS64R6-NEXT: daddu $1, $1, $25
; MIPS64R6-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicSwap64)))
; MIPS64R6-NEXT: sd $4, 8($sp)
; MIPS64R6-NEXT: ld $1, %got_disp(x)($1)
; MIPS64R6-NEXT: .LBB6_1: # %entry
; MIPS64R6-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64R6-NEXT: lld $2, 0($1)
; MIPS64R6-NEXT: move $3, $4
; MIPS64R6-NEXT: scd $3, 0($1)
; MIPS64R6-NEXT: beqzc $3, .LBB6_1
; MIPS64R6-NEXT: nop
; MIPS64R6-NEXT: # %bb.2: # %entry
; MIPS64R6-NEXT: jr $ra
; MIPS64R6-NEXT: daddiu $sp, $sp, 16
;
; MIPS64R6O0-LABEL: AtomicSwap64:
; MIPS64R6O0: # %bb.0: # %entry
; MIPS64R6O0-NEXT: daddiu $sp, $sp, -16
; MIPS64R6O0-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicSwap64)))
; MIPS64R6O0-NEXT: daddu $1, $1, $25
; MIPS64R6O0-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicSwap64)))
; MIPS64R6O0-NEXT: sd $4, 8($sp)
[FastISel] Skip creating unnecessary vregs for arguments This behavior was added in r130928 for both FastISel and SD, and then disabled in r131156 for FastISel. This re-enables it for FastISel with the corresponding fix. This is triggered only when FastISel can't lower the arguments and falls back to SelectionDAG for it. FastISel contains a map of "register fixups" where at the end of the selection phase it replaces all uses of a register with another register that FastISel sometimes pre-assigned. Code at the end of SelectionDAGISel::runOnMachineFunction is doing the replacement at the very end of the function, while other pieces that come in before that look through the MachineFunction and assume everything is done. In this case, the real issue is that the code emitting COPY instructions for the liveins (physreg to vreg) (EmitLiveInCopies) is checking if the vreg assigned to the physreg is used, and if it's not, it will skip the COPY. If a register wasn't replaced with its assigned fixup yet, the copy will be skipped and we'll end up with uses of undefined registers. This fix moves the replacement of registers before the emission of copies for the live-ins. The initial motivation for this fix is to enable tail calls for swiftself functions, which were blocked because we couldn't prove that the swiftself argument (which is callee-save) comes from a function argument (live-in), because there was an extra copy (vreg to vreg). A few tests are affected by this: * llvm/test/CodeGen/AArch64/swifterror.ll: we used to spill x21 (callee-save) but never reload it because it's attached to the return. We now don't even spill it anymore. * llvm/test/CodeGen/*/swiftself.ll: we tail-call now. * llvm/test/CodeGen/AMDGPU/mubuf-legalize-operands.ll: I believe this test was not really testing the right thing, but it worked because the same registers were re-used. * llvm/test/CodeGen/ARM/cmpxchg-O0.ll: regalloc changes * llvm/test/CodeGen/ARM/swifterror.ll: get rid of a copy * llvm/test/CodeGen/Mips/*: get rid of spills and copies * llvm/test/CodeGen/SystemZ/swift-return.ll: smaller stack * llvm/test/CodeGen/X86/atomic-unordered.ll: smaller stack * llvm/test/CodeGen/X86/swifterror.ll: same as AArch64 * llvm/test/DebugInfo/X86/dbg-declare-arg.ll: stack size changed Differential Revision: https://reviews.llvm.org/D62361 llvm-svn: 362963
2019-06-10 18:53:37 +02:00
; MIPS64R6O0-NEXT: ld $2, 8($sp)
; MIPS64R6O0-NEXT: ld $1, %got_disp(x)($1)
; MIPS64R6O0-NEXT: .LBB6_1: # %entry
; MIPS64R6O0-NEXT: # =>This Inner Loop Header: Depth=1
[FastISel] Skip creating unnecessary vregs for arguments This behavior was added in r130928 for both FastISel and SD, and then disabled in r131156 for FastISel. This re-enables it for FastISel with the corresponding fix. This is triggered only when FastISel can't lower the arguments and falls back to SelectionDAG for it. FastISel contains a map of "register fixups" where at the end of the selection phase it replaces all uses of a register with another register that FastISel sometimes pre-assigned. Code at the end of SelectionDAGISel::runOnMachineFunction is doing the replacement at the very end of the function, while other pieces that come in before that look through the MachineFunction and assume everything is done. In this case, the real issue is that the code emitting COPY instructions for the liveins (physreg to vreg) (EmitLiveInCopies) is checking if the vreg assigned to the physreg is used, and if it's not, it will skip the COPY. If a register wasn't replaced with its assigned fixup yet, the copy will be skipped and we'll end up with uses of undefined registers. This fix moves the replacement of registers before the emission of copies for the live-ins. The initial motivation for this fix is to enable tail calls for swiftself functions, which were blocked because we couldn't prove that the swiftself argument (which is callee-save) comes from a function argument (live-in), because there was an extra copy (vreg to vreg). A few tests are affected by this: * llvm/test/CodeGen/AArch64/swifterror.ll: we used to spill x21 (callee-save) but never reload it because it's attached to the return. We now don't even spill it anymore. * llvm/test/CodeGen/*/swiftself.ll: we tail-call now. * llvm/test/CodeGen/AMDGPU/mubuf-legalize-operands.ll: I believe this test was not really testing the right thing, but it worked because the same registers were re-used. * llvm/test/CodeGen/ARM/cmpxchg-O0.ll: regalloc changes * llvm/test/CodeGen/ARM/swifterror.ll: get rid of a copy * llvm/test/CodeGen/Mips/*: get rid of spills and copies * llvm/test/CodeGen/SystemZ/swift-return.ll: smaller stack * llvm/test/CodeGen/X86/atomic-unordered.ll: smaller stack * llvm/test/CodeGen/X86/swifterror.ll: same as AArch64 * llvm/test/DebugInfo/X86/dbg-declare-arg.ll: stack size changed Differential Revision: https://reviews.llvm.org/D62361 llvm-svn: 362963
2019-06-10 18:53:37 +02:00
; MIPS64R6O0-NEXT: lld $3, 0($1)
; MIPS64R6O0-NEXT: move $4, $2
; MIPS64R6O0-NEXT: scd $4, 0($1)
; MIPS64R6O0-NEXT: beqzc $4, .LBB6_1
; MIPS64R6O0-NEXT: # %bb.2: # %entry
[FastISel] Skip creating unnecessary vregs for arguments This behavior was added in r130928 for both FastISel and SD, and then disabled in r131156 for FastISel. This re-enables it for FastISel with the corresponding fix. This is triggered only when FastISel can't lower the arguments and falls back to SelectionDAG for it. FastISel contains a map of "register fixups" where at the end of the selection phase it replaces all uses of a register with another register that FastISel sometimes pre-assigned. Code at the end of SelectionDAGISel::runOnMachineFunction is doing the replacement at the very end of the function, while other pieces that come in before that look through the MachineFunction and assume everything is done. In this case, the real issue is that the code emitting COPY instructions for the liveins (physreg to vreg) (EmitLiveInCopies) is checking if the vreg assigned to the physreg is used, and if it's not, it will skip the COPY. If a register wasn't replaced with its assigned fixup yet, the copy will be skipped and we'll end up with uses of undefined registers. This fix moves the replacement of registers before the emission of copies for the live-ins. The initial motivation for this fix is to enable tail calls for swiftself functions, which were blocked because we couldn't prove that the swiftself argument (which is callee-save) comes from a function argument (live-in), because there was an extra copy (vreg to vreg). A few tests are affected by this: * llvm/test/CodeGen/AArch64/swifterror.ll: we used to spill x21 (callee-save) but never reload it because it's attached to the return. We now don't even spill it anymore. * llvm/test/CodeGen/*/swiftself.ll: we tail-call now. * llvm/test/CodeGen/AMDGPU/mubuf-legalize-operands.ll: I believe this test was not really testing the right thing, but it worked because the same registers were re-used. * llvm/test/CodeGen/ARM/cmpxchg-O0.ll: regalloc changes * llvm/test/CodeGen/ARM/swifterror.ll: get rid of a copy * llvm/test/CodeGen/Mips/*: get rid of spills and copies * llvm/test/CodeGen/SystemZ/swift-return.ll: smaller stack * llvm/test/CodeGen/X86/atomic-unordered.ll: smaller stack * llvm/test/CodeGen/X86/swifterror.ll: same as AArch64 * llvm/test/DebugInfo/X86/dbg-declare-arg.ll: stack size changed Differential Revision: https://reviews.llvm.org/D62361 llvm-svn: 362963
2019-06-10 18:53:37 +02:00
; MIPS64R6O0-NEXT: move $2, $3
; MIPS64R6O0-NEXT: daddiu $sp, $sp, 16
; MIPS64R6O0-NEXT: jrc $ra
;
; O1-LABEL: AtomicSwap64:
; O1: # %bb.0: # %entry
; O1-NEXT: daddiu $sp, $sp, -16
; O1-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicSwap64)))
; O1-NEXT: daddu $1, $1, $25
; O1-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicSwap64)))
; O1-NEXT: sd $4, 8($sp)
; O1-NEXT: ld $1, %got_disp(x)($1)
; O1-NEXT: .LBB6_1: # %entry
; O1-NEXT: # =>This Inner Loop Header: Depth=1
; O1-NEXT: lld $2, 0($1)
; O1-NEXT: move $3, $4
; O1-NEXT: scd $3, 0($1)
; O1-NEXT: beqz $3, .LBB6_1
; O1-NEXT: nop
; O1-NEXT: # %bb.2: # %entry
; O1-NEXT: jr $ra
; O1-NEXT: daddiu $sp, $sp, 16
;
; O2-LABEL: AtomicSwap64:
; O2: # %bb.0: # %entry
; O2-NEXT: daddiu $sp, $sp, -16
; O2-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicSwap64)))
; O2-NEXT: daddu $1, $1, $25
; O2-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicSwap64)))
; O2-NEXT: sd $4, 8($sp)
; O2-NEXT: ld $1, %got_disp(x)($1)
; O2-NEXT: .LBB6_1: # %entry
; O2-NEXT: # =>This Inner Loop Header: Depth=1
; O2-NEXT: lld $2, 0($1)
; O2-NEXT: move $3, $4
; O2-NEXT: scd $3, 0($1)
; O2-NEXT: beqz $3, .LBB6_1
; O2-NEXT: nop
; O2-NEXT: # %bb.2: # %entry
; O2-NEXT: jr $ra
; O2-NEXT: daddiu $sp, $sp, 16
;
; O3-LABEL: AtomicSwap64:
; O3: # %bb.0: # %entry
; O3-NEXT: daddiu $sp, $sp, -16
; O3-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicSwap64)))
; O3-NEXT: sd $4, 8($sp)
; O3-NEXT: daddu $1, $1, $25
; O3-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicSwap64)))
; O3-NEXT: ld $1, %got_disp(x)($1)
; O3-NEXT: .LBB6_1: # %entry
; O3-NEXT: # =>This Inner Loop Header: Depth=1
; O3-NEXT: lld $2, 0($1)
; O3-NEXT: move $3, $4
; O3-NEXT: scd $3, 0($1)
; O3-NEXT: beqz $3, .LBB6_1
; O3-NEXT: nop
; O3-NEXT: # %bb.2: # %entry
; O3-NEXT: jr $ra
; O3-NEXT: daddiu $sp, $sp, 16
;
; MIPS64EB-LABEL: AtomicSwap64:
; MIPS64EB: # %bb.0: # %entry
; MIPS64EB-NEXT: daddiu $sp, $sp, -16
; MIPS64EB-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicSwap64)))
; MIPS64EB-NEXT: daddu $1, $1, $25
; MIPS64EB-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicSwap64)))
; MIPS64EB-NEXT: sd $4, 8($sp)
; MIPS64EB-NEXT: ld $1, %got_disp(x)($1)
; MIPS64EB-NEXT: .LBB6_1: # %entry
; MIPS64EB-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64EB-NEXT: lld $2, 0($1)
; MIPS64EB-NEXT: move $3, $4
; MIPS64EB-NEXT: scd $3, 0($1)
; MIPS64EB-NEXT: beqz $3, .LBB6_1
; MIPS64EB-NEXT: nop
; MIPS64EB-NEXT: # %bb.2: # %entry
; MIPS64EB-NEXT: jr $ra
; MIPS64EB-NEXT: daddiu $sp, $sp, 16
entry:
%newval.addr = alloca i64, align 4
store i64 %newval, i64* %newval.addr, align 4
%tmp = load i64, i64* %newval.addr, align 4
%0 = atomicrmw xchg i64* @x, i64 %tmp monotonic
ret i64 %0
}
define i64 @AtomicCmpSwap64(i64 signext %oldval, i64 signext %newval) nounwind {
; MIPS4-LABEL: AtomicCmpSwap64:
; MIPS4: # %bb.0: # %entry
; MIPS4-NEXT: daddiu $sp, $sp, -16
; MIPS4-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicCmpSwap64)))
; MIPS4-NEXT: daddu $1, $1, $25
; MIPS4-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicCmpSwap64)))
; MIPS4-NEXT: sd $5, 8($sp)
; MIPS4-NEXT: ld $1, %got_disp(x)($1)
; MIPS4-NEXT: .LBB7_1: # %entry
; MIPS4-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS4-NEXT: lld $2, 0($1)
; MIPS4-NEXT: bne $2, $4, .LBB7_3
; MIPS4-NEXT: nop
; MIPS4-NEXT: # %bb.2: # %entry
; MIPS4-NEXT: # in Loop: Header=BB7_1 Depth=1
; MIPS4-NEXT: move $3, $5
; MIPS4-NEXT: scd $3, 0($1)
; MIPS4-NEXT: beqz $3, .LBB7_1
; MIPS4-NEXT: nop
; MIPS4-NEXT: .LBB7_3: # %entry
; MIPS4-NEXT: jr $ra
; MIPS4-NEXT: daddiu $sp, $sp, 16
;
; MIPS64-LABEL: AtomicCmpSwap64:
; MIPS64: # %bb.0: # %entry
; MIPS64-NEXT: daddiu $sp, $sp, -16
; MIPS64-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicCmpSwap64)))
; MIPS64-NEXT: daddu $1, $1, $25
; MIPS64-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicCmpSwap64)))
; MIPS64-NEXT: sd $5, 8($sp)
; MIPS64-NEXT: ld $1, %got_disp(x)($1)
; MIPS64-NEXT: .LBB7_1: # %entry
; MIPS64-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64-NEXT: lld $2, 0($1)
; MIPS64-NEXT: bne $2, $4, .LBB7_3
; MIPS64-NEXT: nop
; MIPS64-NEXT: # %bb.2: # %entry
; MIPS64-NEXT: # in Loop: Header=BB7_1 Depth=1
; MIPS64-NEXT: move $3, $5
; MIPS64-NEXT: scd $3, 0($1)
; MIPS64-NEXT: beqz $3, .LBB7_1
; MIPS64-NEXT: nop
; MIPS64-NEXT: .LBB7_3: # %entry
; MIPS64-NEXT: jr $ra
; MIPS64-NEXT: daddiu $sp, $sp, 16
;
; MIPS64R2-LABEL: AtomicCmpSwap64:
; MIPS64R2: # %bb.0: # %entry
; MIPS64R2-NEXT: daddiu $sp, $sp, -16
; MIPS64R2-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicCmpSwap64)))
; MIPS64R2-NEXT: daddu $1, $1, $25
; MIPS64R2-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicCmpSwap64)))
; MIPS64R2-NEXT: sd $5, 8($sp)
; MIPS64R2-NEXT: ld $1, %got_disp(x)($1)
; MIPS64R2-NEXT: .LBB7_1: # %entry
; MIPS64R2-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64R2-NEXT: lld $2, 0($1)
; MIPS64R2-NEXT: bne $2, $4, .LBB7_3
; MIPS64R2-NEXT: nop
; MIPS64R2-NEXT: # %bb.2: # %entry
; MIPS64R2-NEXT: # in Loop: Header=BB7_1 Depth=1
; MIPS64R2-NEXT: move $3, $5
; MIPS64R2-NEXT: scd $3, 0($1)
; MIPS64R2-NEXT: beqz $3, .LBB7_1
; MIPS64R2-NEXT: nop
; MIPS64R2-NEXT: .LBB7_3: # %entry
; MIPS64R2-NEXT: jr $ra
; MIPS64R2-NEXT: daddiu $sp, $sp, 16
;
; MIPS64R6-LABEL: AtomicCmpSwap64:
; MIPS64R6: # %bb.0: # %entry
; MIPS64R6-NEXT: daddiu $sp, $sp, -16
; MIPS64R6-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicCmpSwap64)))
; MIPS64R6-NEXT: daddu $1, $1, $25
; MIPS64R6-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicCmpSwap64)))
; MIPS64R6-NEXT: sd $5, 8($sp)
; MIPS64R6-NEXT: ld $1, %got_disp(x)($1)
; MIPS64R6-NEXT: .LBB7_1: # %entry
; MIPS64R6-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64R6-NEXT: lld $2, 0($1)
; MIPS64R6-NEXT: bnec $2, $4, .LBB7_3
; MIPS64R6-NEXT: # %bb.2: # %entry
; MIPS64R6-NEXT: # in Loop: Header=BB7_1 Depth=1
; MIPS64R6-NEXT: move $3, $5
; MIPS64R6-NEXT: scd $3, 0($1)
; MIPS64R6-NEXT: beqzc $3, .LBB7_1
; MIPS64R6-NEXT: nop
; MIPS64R6-NEXT: .LBB7_3: # %entry
; MIPS64R6-NEXT: jr $ra
; MIPS64R6-NEXT: daddiu $sp, $sp, 16
;
; MIPS64R6O0-LABEL: AtomicCmpSwap64:
; MIPS64R6O0: # %bb.0: # %entry
; MIPS64R6O0-NEXT: daddiu $sp, $sp, -16
; MIPS64R6O0-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicCmpSwap64)))
; MIPS64R6O0-NEXT: daddu $1, $1, $25
; MIPS64R6O0-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicCmpSwap64)))
; MIPS64R6O0-NEXT: sd $5, 8($sp)
[FastISel] Skip creating unnecessary vregs for arguments This behavior was added in r130928 for both FastISel and SD, and then disabled in r131156 for FastISel. This re-enables it for FastISel with the corresponding fix. This is triggered only when FastISel can't lower the arguments and falls back to SelectionDAG for it. FastISel contains a map of "register fixups" where at the end of the selection phase it replaces all uses of a register with another register that FastISel sometimes pre-assigned. Code at the end of SelectionDAGISel::runOnMachineFunction is doing the replacement at the very end of the function, while other pieces that come in before that look through the MachineFunction and assume everything is done. In this case, the real issue is that the code emitting COPY instructions for the liveins (physreg to vreg) (EmitLiveInCopies) is checking if the vreg assigned to the physreg is used, and if it's not, it will skip the COPY. If a register wasn't replaced with its assigned fixup yet, the copy will be skipped and we'll end up with uses of undefined registers. This fix moves the replacement of registers before the emission of copies for the live-ins. The initial motivation for this fix is to enable tail calls for swiftself functions, which were blocked because we couldn't prove that the swiftself argument (which is callee-save) comes from a function argument (live-in), because there was an extra copy (vreg to vreg). A few tests are affected by this: * llvm/test/CodeGen/AArch64/swifterror.ll: we used to spill x21 (callee-save) but never reload it because it's attached to the return. We now don't even spill it anymore. * llvm/test/CodeGen/*/swiftself.ll: we tail-call now. * llvm/test/CodeGen/AMDGPU/mubuf-legalize-operands.ll: I believe this test was not really testing the right thing, but it worked because the same registers were re-used. * llvm/test/CodeGen/ARM/cmpxchg-O0.ll: regalloc changes * llvm/test/CodeGen/ARM/swifterror.ll: get rid of a copy * llvm/test/CodeGen/Mips/*: get rid of spills and copies * llvm/test/CodeGen/SystemZ/swift-return.ll: smaller stack * llvm/test/CodeGen/X86/atomic-unordered.ll: smaller stack * llvm/test/CodeGen/X86/swifterror.ll: same as AArch64 * llvm/test/DebugInfo/X86/dbg-declare-arg.ll: stack size changed Differential Revision: https://reviews.llvm.org/D62361 llvm-svn: 362963
2019-06-10 18:53:37 +02:00
; MIPS64R6O0-NEXT: ld $2, 8($sp)
; MIPS64R6O0-NEXT: ld $1, %got_disp(x)($1)
; MIPS64R6O0-NEXT: .LBB7_1: # %entry
; MIPS64R6O0-NEXT: # =>This Inner Loop Header: Depth=1
[FastISel] Skip creating unnecessary vregs for arguments This behavior was added in r130928 for both FastISel and SD, and then disabled in r131156 for FastISel. This re-enables it for FastISel with the corresponding fix. This is triggered only when FastISel can't lower the arguments and falls back to SelectionDAG for it. FastISel contains a map of "register fixups" where at the end of the selection phase it replaces all uses of a register with another register that FastISel sometimes pre-assigned. Code at the end of SelectionDAGISel::runOnMachineFunction is doing the replacement at the very end of the function, while other pieces that come in before that look through the MachineFunction and assume everything is done. In this case, the real issue is that the code emitting COPY instructions for the liveins (physreg to vreg) (EmitLiveInCopies) is checking if the vreg assigned to the physreg is used, and if it's not, it will skip the COPY. If a register wasn't replaced with its assigned fixup yet, the copy will be skipped and we'll end up with uses of undefined registers. This fix moves the replacement of registers before the emission of copies for the live-ins. The initial motivation for this fix is to enable tail calls for swiftself functions, which were blocked because we couldn't prove that the swiftself argument (which is callee-save) comes from a function argument (live-in), because there was an extra copy (vreg to vreg). A few tests are affected by this: * llvm/test/CodeGen/AArch64/swifterror.ll: we used to spill x21 (callee-save) but never reload it because it's attached to the return. We now don't even spill it anymore. * llvm/test/CodeGen/*/swiftself.ll: we tail-call now. * llvm/test/CodeGen/AMDGPU/mubuf-legalize-operands.ll: I believe this test was not really testing the right thing, but it worked because the same registers were re-used. * llvm/test/CodeGen/ARM/cmpxchg-O0.ll: regalloc changes * llvm/test/CodeGen/ARM/swifterror.ll: get rid of a copy * llvm/test/CodeGen/Mips/*: get rid of spills and copies * llvm/test/CodeGen/SystemZ/swift-return.ll: smaller stack * llvm/test/CodeGen/X86/atomic-unordered.ll: smaller stack * llvm/test/CodeGen/X86/swifterror.ll: same as AArch64 * llvm/test/DebugInfo/X86/dbg-declare-arg.ll: stack size changed Differential Revision: https://reviews.llvm.org/D62361 llvm-svn: 362963
2019-06-10 18:53:37 +02:00
; MIPS64R6O0-NEXT: lld $3, 0($1)
; MIPS64R6O0-NEXT: bnec $3, $4, .LBB7_3
; MIPS64R6O0-NEXT: # %bb.2: # %entry
; MIPS64R6O0-NEXT: # in Loop: Header=BB7_1 Depth=1
[FastISel] Skip creating unnecessary vregs for arguments This behavior was added in r130928 for both FastISel and SD, and then disabled in r131156 for FastISel. This re-enables it for FastISel with the corresponding fix. This is triggered only when FastISel can't lower the arguments and falls back to SelectionDAG for it. FastISel contains a map of "register fixups" where at the end of the selection phase it replaces all uses of a register with another register that FastISel sometimes pre-assigned. Code at the end of SelectionDAGISel::runOnMachineFunction is doing the replacement at the very end of the function, while other pieces that come in before that look through the MachineFunction and assume everything is done. In this case, the real issue is that the code emitting COPY instructions for the liveins (physreg to vreg) (EmitLiveInCopies) is checking if the vreg assigned to the physreg is used, and if it's not, it will skip the COPY. If a register wasn't replaced with its assigned fixup yet, the copy will be skipped and we'll end up with uses of undefined registers. This fix moves the replacement of registers before the emission of copies for the live-ins. The initial motivation for this fix is to enable tail calls for swiftself functions, which were blocked because we couldn't prove that the swiftself argument (which is callee-save) comes from a function argument (live-in), because there was an extra copy (vreg to vreg). A few tests are affected by this: * llvm/test/CodeGen/AArch64/swifterror.ll: we used to spill x21 (callee-save) but never reload it because it's attached to the return. We now don't even spill it anymore. * llvm/test/CodeGen/*/swiftself.ll: we tail-call now. * llvm/test/CodeGen/AMDGPU/mubuf-legalize-operands.ll: I believe this test was not really testing the right thing, but it worked because the same registers were re-used. * llvm/test/CodeGen/ARM/cmpxchg-O0.ll: regalloc changes * llvm/test/CodeGen/ARM/swifterror.ll: get rid of a copy * llvm/test/CodeGen/Mips/*: get rid of spills and copies * llvm/test/CodeGen/SystemZ/swift-return.ll: smaller stack * llvm/test/CodeGen/X86/atomic-unordered.ll: smaller stack * llvm/test/CodeGen/X86/swifterror.ll: same as AArch64 * llvm/test/DebugInfo/X86/dbg-declare-arg.ll: stack size changed Differential Revision: https://reviews.llvm.org/D62361 llvm-svn: 362963
2019-06-10 18:53:37 +02:00
; MIPS64R6O0-NEXT: move $5, $2
; MIPS64R6O0-NEXT: scd $5, 0($1)
; MIPS64R6O0-NEXT: beqzc $5, .LBB7_1
; MIPS64R6O0-NEXT: .LBB7_3: # %entry
[FastISel] Skip creating unnecessary vregs for arguments This behavior was added in r130928 for both FastISel and SD, and then disabled in r131156 for FastISel. This re-enables it for FastISel with the corresponding fix. This is triggered only when FastISel can't lower the arguments and falls back to SelectionDAG for it. FastISel contains a map of "register fixups" where at the end of the selection phase it replaces all uses of a register with another register that FastISel sometimes pre-assigned. Code at the end of SelectionDAGISel::runOnMachineFunction is doing the replacement at the very end of the function, while other pieces that come in before that look through the MachineFunction and assume everything is done. In this case, the real issue is that the code emitting COPY instructions for the liveins (physreg to vreg) (EmitLiveInCopies) is checking if the vreg assigned to the physreg is used, and if it's not, it will skip the COPY. If a register wasn't replaced with its assigned fixup yet, the copy will be skipped and we'll end up with uses of undefined registers. This fix moves the replacement of registers before the emission of copies for the live-ins. The initial motivation for this fix is to enable tail calls for swiftself functions, which were blocked because we couldn't prove that the swiftself argument (which is callee-save) comes from a function argument (live-in), because there was an extra copy (vreg to vreg). A few tests are affected by this: * llvm/test/CodeGen/AArch64/swifterror.ll: we used to spill x21 (callee-save) but never reload it because it's attached to the return. We now don't even spill it anymore. * llvm/test/CodeGen/*/swiftself.ll: we tail-call now. * llvm/test/CodeGen/AMDGPU/mubuf-legalize-operands.ll: I believe this test was not really testing the right thing, but it worked because the same registers were re-used. * llvm/test/CodeGen/ARM/cmpxchg-O0.ll: regalloc changes * llvm/test/CodeGen/ARM/swifterror.ll: get rid of a copy * llvm/test/CodeGen/Mips/*: get rid of spills and copies * llvm/test/CodeGen/SystemZ/swift-return.ll: smaller stack * llvm/test/CodeGen/X86/atomic-unordered.ll: smaller stack * llvm/test/CodeGen/X86/swifterror.ll: same as AArch64 * llvm/test/DebugInfo/X86/dbg-declare-arg.ll: stack size changed Differential Revision: https://reviews.llvm.org/D62361 llvm-svn: 362963
2019-06-10 18:53:37 +02:00
; MIPS64R6O0-NEXT: move $2, $3
; MIPS64R6O0-NEXT: daddiu $sp, $sp, 16
; MIPS64R6O0-NEXT: jrc $ra
;
; O1-LABEL: AtomicCmpSwap64:
; O1: # %bb.0: # %entry
; O1-NEXT: daddiu $sp, $sp, -16
; O1-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicCmpSwap64)))
; O1-NEXT: daddu $1, $1, $25
; O1-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicCmpSwap64)))
; O1-NEXT: sd $5, 8($sp)
; O1-NEXT: ld $1, %got_disp(x)($1)
; O1-NEXT: .LBB7_1: # %entry
; O1-NEXT: # =>This Inner Loop Header: Depth=1
; O1-NEXT: lld $2, 0($1)
; O1-NEXT: bne $2, $4, .LBB7_3
; O1-NEXT: nop
; O1-NEXT: # %bb.2: # %entry
; O1-NEXT: # in Loop: Header=BB7_1 Depth=1
; O1-NEXT: move $3, $5
; O1-NEXT: scd $3, 0($1)
; O1-NEXT: beqz $3, .LBB7_1
; O1-NEXT: nop
; O1-NEXT: .LBB7_3: # %entry
; O1-NEXT: jr $ra
; O1-NEXT: daddiu $sp, $sp, 16
;
; O2-LABEL: AtomicCmpSwap64:
; O2: # %bb.0: # %entry
; O2-NEXT: daddiu $sp, $sp, -16
; O2-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicCmpSwap64)))
; O2-NEXT: daddu $1, $1, $25
; O2-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicCmpSwap64)))
; O2-NEXT: sd $5, 8($sp)
; O2-NEXT: ld $1, %got_disp(x)($1)
; O2-NEXT: .LBB7_1: # %entry
; O2-NEXT: # =>This Inner Loop Header: Depth=1
; O2-NEXT: lld $2, 0($1)
; O2-NEXT: bne $2, $4, .LBB7_3
; O2-NEXT: nop
; O2-NEXT: # %bb.2: # %entry
; O2-NEXT: # in Loop: Header=BB7_1 Depth=1
; O2-NEXT: move $3, $5
; O2-NEXT: scd $3, 0($1)
; O2-NEXT: beqz $3, .LBB7_1
; O2-NEXT: nop
; O2-NEXT: .LBB7_3: # %entry
; O2-NEXT: jr $ra
; O2-NEXT: daddiu $sp, $sp, 16
;
; O3-LABEL: AtomicCmpSwap64:
; O3: # %bb.0: # %entry
; O3-NEXT: daddiu $sp, $sp, -16
; O3-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicCmpSwap64)))
; O3-NEXT: sd $5, 8($sp)
; O3-NEXT: daddu $1, $1, $25
; O3-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicCmpSwap64)))
; O3-NEXT: ld $1, %got_disp(x)($1)
; O3-NEXT: .LBB7_1: # %entry
; O3-NEXT: # =>This Inner Loop Header: Depth=1
; O3-NEXT: lld $2, 0($1)
; O3-NEXT: bne $2, $4, .LBB7_3
; O3-NEXT: nop
; O3-NEXT: # %bb.2: # %entry
; O3-NEXT: # in Loop: Header=BB7_1 Depth=1
; O3-NEXT: move $3, $5
; O3-NEXT: scd $3, 0($1)
; O3-NEXT: beqz $3, .LBB7_1
; O3-NEXT: nop
; O3-NEXT: .LBB7_3: # %entry
; O3-NEXT: jr $ra
; O3-NEXT: daddiu $sp, $sp, 16
;
; MIPS64EB-LABEL: AtomicCmpSwap64:
; MIPS64EB: # %bb.0: # %entry
; MIPS64EB-NEXT: daddiu $sp, $sp, -16
; MIPS64EB-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicCmpSwap64)))
; MIPS64EB-NEXT: daddu $1, $1, $25
; MIPS64EB-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicCmpSwap64)))
; MIPS64EB-NEXT: sd $5, 8($sp)
; MIPS64EB-NEXT: ld $1, %got_disp(x)($1)
; MIPS64EB-NEXT: .LBB7_1: # %entry
; MIPS64EB-NEXT: # =>This Inner Loop Header: Depth=1
; MIPS64EB-NEXT: lld $2, 0($1)
; MIPS64EB-NEXT: bne $2, $4, .LBB7_3
; MIPS64EB-NEXT: nop
; MIPS64EB-NEXT: # %bb.2: # %entry
; MIPS64EB-NEXT: # in Loop: Header=BB7_1 Depth=1
; MIPS64EB-NEXT: move $3, $5
; MIPS64EB-NEXT: scd $3, 0($1)
; MIPS64EB-NEXT: beqz $3, .LBB7_1
; MIPS64EB-NEXT: nop
; MIPS64EB-NEXT: .LBB7_3: # %entry
; MIPS64EB-NEXT: jr $ra
; MIPS64EB-NEXT: daddiu $sp, $sp, 16
entry:
%newval.addr = alloca i64, align 4
store i64 %newval, i64* %newval.addr, align 4
%tmp = load i64, i64* %newval.addr, align 4
%0 = cmpxchg i64* @x, i64 %oldval, i64 %tmp monotonic monotonic
%1 = extractvalue { i64, i1 } %0, 0
ret i64 %1
}