1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 20:51:52 +01:00
llvm-mirror/test/CodeGen/X86/fold-vector-sext-crash2.ll
Simon Pilgrim d654e7d40c [X86] Handle COPYs of physregs better (regalloc hints)
Enable enableMultipleCopyHints() on X86.

Original Patch by @jonpa:

While enabling the mischeduler for SystemZ, it was discovered that for some reason a test needed one extra seemingly needless COPY (test/CodeGen/SystemZ/call-03.ll). The handling for that is resulted in this patch, which improves the register coalescing by providing not just one copy hint, but a sorted list of copy hints. On SystemZ, this gives ~12500 less register moves on SPEC, as well as marginally less spilling.

Instead of improving just the SystemZ backend, the improvement has been implemented in common-code (calculateSpillWeightAndHint(). This gives a lot of test failures, but since this should be a general improvement I hope that the involved targets will help and review the test updates.

Differential Revision: https://reviews.llvm.org/D38128

llvm-svn: 342578
2018-09-19 18:59:08 +00:00

156 lines
4.9 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s -check-prefix=X32
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s -check-prefix=X64
; DAGCombiner crashes during sext folding
define <2 x i256> @test_sext1() {
; X32-LABEL: test_sext1:
; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl $-1, 60(%eax)
; X32-NEXT: movl $-1, 56(%eax)
; X32-NEXT: movl $-1, 52(%eax)
; X32-NEXT: movl $-1, 48(%eax)
; X32-NEXT: movl $-1, 44(%eax)
; X32-NEXT: movl $-1, 40(%eax)
; X32-NEXT: movl $-1, 36(%eax)
; X32-NEXT: movl $-99, 32(%eax)
; X32-NEXT: movl $0, 28(%eax)
; X32-NEXT: movl $0, 24(%eax)
; X32-NEXT: movl $0, 20(%eax)
; X32-NEXT: movl $0, 16(%eax)
; X32-NEXT: movl $0, 12(%eax)
; X32-NEXT: movl $0, 8(%eax)
; X32-NEXT: movl $0, 4(%eax)
; X32-NEXT: movl $0, (%eax)
; X32-NEXT: retl $4
;
; X64-LABEL: test_sext1:
; X64: # %bb.0:
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: xorps %xmm0, %xmm0
; X64-NEXT: movaps %xmm0, 16(%rdi)
; X64-NEXT: movaps %xmm0, (%rdi)
; X64-NEXT: movq $-1, 56(%rdi)
; X64-NEXT: movq $-1, 48(%rdi)
; X64-NEXT: movq $-1, 40(%rdi)
; X64-NEXT: movq $-99, 32(%rdi)
; X64-NEXT: retq
%Se = sext <2 x i8> <i8 -100, i8 -99> to <2 x i256>
%Shuff = shufflevector <2 x i256> zeroinitializer, <2 x i256> %Se, <2 x i32> <i32 1, i32 3>
ret <2 x i256> %Shuff
}
define <2 x i256> @test_sext2() {
; X32-LABEL: test_sext2:
; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl $-1, 60(%eax)
; X32-NEXT: movl $-1, 56(%eax)
; X32-NEXT: movl $-1, 52(%eax)
; X32-NEXT: movl $-1, 48(%eax)
; X32-NEXT: movl $-1, 44(%eax)
; X32-NEXT: movl $-1, 40(%eax)
; X32-NEXT: movl $-1, 36(%eax)
; X32-NEXT: movl $-1999, 32(%eax) # imm = 0xF831
; X32-NEXT: movl $0, 28(%eax)
; X32-NEXT: movl $0, 24(%eax)
; X32-NEXT: movl $0, 20(%eax)
; X32-NEXT: movl $0, 16(%eax)
; X32-NEXT: movl $0, 12(%eax)
; X32-NEXT: movl $0, 8(%eax)
; X32-NEXT: movl $0, 4(%eax)
; X32-NEXT: movl $0, (%eax)
; X32-NEXT: retl $4
;
; X64-LABEL: test_sext2:
; X64: # %bb.0:
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: xorps %xmm0, %xmm0
; X64-NEXT: movaps %xmm0, 16(%rdi)
; X64-NEXT: movaps %xmm0, (%rdi)
; X64-NEXT: movq $-1, 56(%rdi)
; X64-NEXT: movq $-1, 48(%rdi)
; X64-NEXT: movq $-1, 40(%rdi)
; X64-NEXT: movq $-1999, 32(%rdi) # imm = 0xF831
; X64-NEXT: retq
%Se = sext <2 x i128> <i128 -2000, i128 -1999> to <2 x i256>
%Shuff = shufflevector <2 x i256> zeroinitializer, <2 x i256> %Se, <2 x i32> <i32 1, i32 3>
ret <2 x i256> %Shuff
}
define <2 x i256> @test_zext1() {
; X32-LABEL: test_zext1:
; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl $0, 60(%eax)
; X32-NEXT: movl $0, 56(%eax)
; X32-NEXT: movl $0, 52(%eax)
; X32-NEXT: movl $0, 48(%eax)
; X32-NEXT: movl $0, 44(%eax)
; X32-NEXT: movl $0, 40(%eax)
; X32-NEXT: movl $0, 36(%eax)
; X32-NEXT: movl $254, 32(%eax)
; X32-NEXT: movl $0, 28(%eax)
; X32-NEXT: movl $0, 24(%eax)
; X32-NEXT: movl $0, 20(%eax)
; X32-NEXT: movl $0, 16(%eax)
; X32-NEXT: movl $0, 12(%eax)
; X32-NEXT: movl $0, 8(%eax)
; X32-NEXT: movl $0, 4(%eax)
; X32-NEXT: movl $0, (%eax)
; X32-NEXT: retl $4
;
; X64-LABEL: test_zext1:
; X64: # %bb.0:
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: xorps %xmm0, %xmm0
; X64-NEXT: movaps %xmm0, 48(%rdi)
; X64-NEXT: movaps %xmm0, 16(%rdi)
; X64-NEXT: movaps %xmm0, (%rdi)
; X64-NEXT: movq $0, 40(%rdi)
; X64-NEXT: movq $254, 32(%rdi)
; X64-NEXT: retq
%Se = zext <2 x i8> <i8 -1, i8 -2> to <2 x i256>
%Shuff = shufflevector <2 x i256> zeroinitializer, <2 x i256> %Se, <2 x i32> <i32 1, i32 3>
ret <2 x i256> %Shuff
}
define <2 x i256> @test_zext2() {
; X32-LABEL: test_zext2:
; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl $0, 60(%eax)
; X32-NEXT: movl $0, 56(%eax)
; X32-NEXT: movl $0, 52(%eax)
; X32-NEXT: movl $0, 48(%eax)
; X32-NEXT: movl $-1, 44(%eax)
; X32-NEXT: movl $-1, 40(%eax)
; X32-NEXT: movl $-1, 36(%eax)
; X32-NEXT: movl $-2, 32(%eax)
; X32-NEXT: movl $0, 28(%eax)
; X32-NEXT: movl $0, 24(%eax)
; X32-NEXT: movl $0, 20(%eax)
; X32-NEXT: movl $0, 16(%eax)
; X32-NEXT: movl $0, 12(%eax)
; X32-NEXT: movl $0, 8(%eax)
; X32-NEXT: movl $0, 4(%eax)
; X32-NEXT: movl $0, (%eax)
; X32-NEXT: retl $4
;
; X64-LABEL: test_zext2:
; X64: # %bb.0:
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: xorps %xmm0, %xmm0
; X64-NEXT: movaps %xmm0, 48(%rdi)
; X64-NEXT: movaps %xmm0, 16(%rdi)
; X64-NEXT: movaps %xmm0, (%rdi)
; X64-NEXT: movq $-1, 40(%rdi)
; X64-NEXT: movq $-2, 32(%rdi)
; X64-NEXT: retq
%Se = zext <2 x i128> <i128 -1, i128 -2> to <2 x i256>
%Shuff = shufflevector <2 x i256> zeroinitializer, <2 x i256> %Se, <2 x i32> <i32 1, i32 3>
ret <2 x i256> %Shuff
}