1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 20:51:52 +01:00
llvm-mirror/test/CodeGen/X86/fast-isel-bitcast-crash.ll
Craig Topper a4615b0a89 [X86] Emit a reg-reg copy for fast isel of vector bitcasts.
Previously we just updated a map and moved on. But it possible
we cached known bits information with the vreg that can be used by
another basic block. If the other basic block has a different view
of the VT these known bits won't make sense.

By emitting a copy we ensure we have different vregs before and
after the bitcast. This prevents the known bits from being used
with the wrong type.

Differential Revision: https://reviews.llvm.org/D82517
2020-06-24 20:15:21 -07:00

45 lines
1.7 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -fast-isel -O1 | FileCheck %s
; This used to crash due to the bitcast in the entry block reusing the vreg
; from its input. This resulted in known bits being calculated on the v2i64
; type. But the second basic block tried to use them with a v8i16 type. This
; was fixed by emitting a reg-reg copy for the bitcast so the vreg type will
; be seen the same in both basic blocks.
; We need the entry block to fall out of fast isel after selecting the bitcast.
; The shuffle vector guarantees that. The zext gives us a useful known bits
; value. We also need the second basic block to fall out of fast isel which the
; intrinsic guarantees.
define <8 x i16> @bitcast_crash(i32 %arg, <8 x i16> %x, i1 %c) {
; CHECK-LABEL: bitcast_crash:
; CHECK: # %bb.0: # %bb
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: movq %rax, %xmm1
; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
; CHECK-NEXT: testb $1, %sil
; CHECK-NEXT: je .LBB0_2
; CHECK-NEXT: # %bb.1: # %bb1
; CHECK-NEXT: psraw %xmm1, %xmm0
; CHECK-NEXT: retq
; CHECK-NEXT: .LBB0_2: # %bb2
; CHECK-NEXT: movdqa %xmm1, %xmm0
; CHECK-NEXT: retq
bb:
%tmp = zext i32 %arg to i64
%tmp1 = insertelement <2 x i64> undef, i64 %tmp, i32 0
%tmp2 = shufflevector <2 x i64> %tmp1, <2 x i64> undef, <2 x i32> zeroinitializer
%tmp5 = bitcast <2 x i64> %tmp2 to <8 x i16>
br i1 %c, label %bb1, label %bb2
bb1: ; preds = %bb8, %bb6
%tmp9 = call <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16> %x, <8 x i16> %tmp5)
ret <8 x i16> %tmp9
bb2:
ret <8 x i16> %tmp5
}
declare <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16>, <8 x i16>)