1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-24 11:42:57 +01:00
llvm-mirror/test/CodeGen/Mips/atomicCmpSwapPW.ll
Francis Visoiu Mistrih f191e5a702 [FastISel] Skip creating unnecessary vregs for arguments
This behavior was added in r130928 for both FastISel and SD, and then
disabled in r131156 for FastISel.

This re-enables it for FastISel with the corresponding fix.

This is triggered only when FastISel can't lower the arguments and falls
back to SelectionDAG for it.

FastISel contains a map of "register fixups" where at the end of the
selection phase it replaces all uses of a register with another
register that FastISel sometimes pre-assigned. Code at the end of
SelectionDAGISel::runOnMachineFunction is doing the replacement at the
very end of the function, while other pieces that come in before that
look through the MachineFunction and assume everything is done. In this
case, the real issue is that the code emitting COPY instructions for the
liveins (physreg to vreg) (EmitLiveInCopies) is checking if the vreg
assigned to the physreg is used, and if it's not, it will skip the COPY.
If a register wasn't replaced with its assigned fixup yet, the copy will
be skipped and we'll end up with uses of undefined registers.

This fix moves the replacement of registers before the emission of
copies for the live-ins.

The initial motivation for this fix is to enable tail calls for
swiftself functions, which were blocked because we couldn't prove that
the swiftself argument (which is callee-save) comes from a function
argument (live-in), because there was an extra copy (vreg to vreg).

A few tests are affected by this:

* llvm/test/CodeGen/AArch64/swifterror.ll: we used to spill x21
(callee-save) but never reload it because it's attached to the return.
We now don't even spill it anymore.
* llvm/test/CodeGen/*/swiftself.ll: we tail-call now.
* llvm/test/CodeGen/AMDGPU/mubuf-legalize-operands.ll: I believe this
test was not really testing the right thing, but it worked because the
same registers were re-used.
* llvm/test/CodeGen/ARM/cmpxchg-O0.ll: regalloc changes
* llvm/test/CodeGen/ARM/swifterror.ll: get rid of a copy
* llvm/test/CodeGen/Mips/*: get rid of spills and copies
* llvm/test/CodeGen/SystemZ/swift-return.ll: smaller stack
* llvm/test/CodeGen/X86/atomic-unordered.ll: smaller stack
* llvm/test/CodeGen/X86/swifterror.ll: same as AArch64
* llvm/test/DebugInfo/X86/dbg-declare-arg.ll: stack size changed

Differential Revision: https://reviews.llvm.org/D62361

llvm-svn: 362963
2019-06-10 16:53:37 +00:00

92 lines
2.9 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -O0 -mtriple=mipsel-unknown-linux-gnu -mcpu=mips32r2 -target-abi=o32 < %s -filetype=asm -o - \
; RUN: | FileCheck -check-prefixes=O32 %s
; RUN: llc -O0 -mtriple=mips64el-unknown-linux-gnu -mcpu=mips64r2 -target-abi=n32 < %s -filetype=asm -o - \
; RUN: | FileCheck -check-prefixes=N32,ALL %s
; RUN: llc -O0 -mtriple=mips64el-unknown-linux-gnu -mcpu=mips64r2 -target-abi=n64 < %s -filetype=asm -o - \
; RUN: | FileCheck -check-prefixes=N64 %s
@sym = external global i32 *
define void @foo(i32 %new, i32 %old) {
; O32-LABEL: foo:
; O32: # %bb.0: # %entry
; O32-NEXT: lui $1, %hi(sym)
; O32-NEXT: lw $1, %lo(sym)($1)
; O32-NEXT: sync
; O32-NEXT: $BB0_1: # %entry
; O32-NEXT: # =>This Inner Loop Header: Depth=1
; O32-NEXT: ll $2, 0($1)
; O32-NEXT: bne $2, $4, $BB0_3
; O32-NEXT: nop
; O32-NEXT: # %bb.2: # %entry
; O32-NEXT: # in Loop: Header=BB0_1 Depth=1
; O32-NEXT: move $3, $5
; O32-NEXT: sc $3, 0($1)
; O32-NEXT: beqz $3, $BB0_1
; O32-NEXT: nop
; O32-NEXT: $BB0_3: # %entry
; O32-NEXT: sync
; O32-NEXT: jr $ra
; O32-NEXT: nop
;
; N32-LABEL: foo:
; N32: # %bb.0: # %entry
; N32-NEXT: # kill: def $a1 killed $a1 killed $a1_64
; N32-NEXT: sll $1, $5, 0
; N32-NEXT: # kill: def $a0 killed $a0 killed $a0_64
; N32-NEXT: sll $2, $4, 0
; N32-NEXT: lui $3, %hi(sym)
; N32-NEXT: lw $3, %lo(sym)($3)
; N32-NEXT: sync
; N32-NEXT: .LBB0_1: # %entry
; N32-NEXT: # =>This Inner Loop Header: Depth=1
; N32-NEXT: ll $4, 0($3)
; N32-NEXT: bne $4, $2, .LBB0_3
; N32-NEXT: nop
; N32-NEXT: # %bb.2: # %entry
; N32-NEXT: # in Loop: Header=BB0_1 Depth=1
; N32-NEXT: move $5, $1
; N32-NEXT: sc $5, 0($3)
; N32-NEXT: beqz $5, .LBB0_1
; N32-NEXT: nop
; N32-NEXT: .LBB0_3: # %entry
; N32-NEXT: sync
; N32-NEXT: jr $ra
; N32-NEXT: nop
;
; N64-LABEL: foo:
; N64: # %bb.0: # %entry
; N64-NEXT: # kill: def $a1 killed $a1 killed $a1_64
; N64-NEXT: sll $1, $5, 0
; N64-NEXT: # kill: def $a0 killed $a0 killed $a0_64
; N64-NEXT: sll $2, $4, 0
; N64-NEXT: lui $3, %highest(sym)
; N64-NEXT: daddiu $3, $3, %higher(sym)
; N64-NEXT: dsll $3, $3, 16
; N64-NEXT: daddiu $3, $3, %hi(sym)
; N64-NEXT: dsll $3, $3, 16
; N64-NEXT: ld $3, %lo(sym)($3)
; N64-NEXT: sync
; N64-NEXT: .LBB0_1: # %entry
; N64-NEXT: # =>This Inner Loop Header: Depth=1
; N64-NEXT: ll $4, 0($3)
; N64-NEXT: bne $4, $2, .LBB0_3
; N64-NEXT: nop
; N64-NEXT: # %bb.2: # %entry
; N64-NEXT: # in Loop: Header=BB0_1 Depth=1
; N64-NEXT: move $5, $1
; N64-NEXT: sc $5, 0($3)
; N64-NEXT: beqz $5, .LBB0_1
; N64-NEXT: nop
; N64-NEXT: .LBB0_3: # %entry
; N64-NEXT: sync
; N64-NEXT: jr $ra
; N64-NEXT: nop
entry:
%0 = load i32 *, i32 ** @sym
cmpxchg i32 * %0, i32 %new, i32 %old seq_cst seq_cst
ret void
}