1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-24 11:42:57 +01:00
llvm-mirror/test/CodeGen/X86/sse2-vector-shifts.ll
Lang Hames df2443e32e X86 vector element shift-by-immediate instructions take i8 immediates. Make
the instruction defenitions and ISEL reflect this.

Prior to this patch these instructions took an i32i8imm, and the high bits were
dropped during encoding. This led to incorrect behavior for shifts by
immediates higher than 255. This patch fixes that issue by detecting large
immediate shifts and returning constant zero (for logical shifts) or capping
the shift amount at an encodable value (for arithmetic shifts).

Fixes <rdar://problem/14968098>

llvm-svn: 193096
2013-10-21 17:51:24 +00:00

248 lines
5.4 KiB
LLVM

; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=+sse2 -mcpu=corei7 | FileCheck %s
; SSE2 Logical Shift Left
define <8 x i16> @test_sllw_1(<8 x i16> %InVec) {
entry:
%shl = shl <8 x i16> %InVec, <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>
ret <8 x i16> %shl
}
; CHECK-LABEL: test_sllw_1:
; CHECK: psllw $0, %xmm0
; CHECK-NEXT: ret
define <8 x i16> @test_sllw_2(<8 x i16> %InVec) {
entry:
%shl = shl <8 x i16> %InVec, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
ret <8 x i16> %shl
}
; CHECK-LABEL: test_sllw_2:
; CHECK: paddw %xmm0, %xmm0
; CHECK-NEXT: ret
define <8 x i16> @test_sllw_3(<8 x i16> %InVec) {
entry:
%shl = shl <8 x i16> %InVec, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
ret <8 x i16> %shl
}
; CHECK-LABEL: test_sllw_3:
; CHECK: xorps %xmm0, %xmm0
; CHECK-NEXT: ret
define <4 x i32> @test_slld_1(<4 x i32> %InVec) {
entry:
%shl = shl <4 x i32> %InVec, <i32 0, i32 0, i32 0, i32 0>
ret <4 x i32> %shl
}
; CHECK-LABEL: test_slld_1:
; CHECK: pslld $0, %xmm0
; CHECK-NEXT: ret
define <4 x i32> @test_slld_2(<4 x i32> %InVec) {
entry:
%shl = shl <4 x i32> %InVec, <i32 1, i32 1, i32 1, i32 1>
ret <4 x i32> %shl
}
; CHECK-LABEL: test_slld_2:
; CHECK: paddd %xmm0, %xmm0
; CHECK-NEXT: ret
define <4 x i32> @test_slld_3(<4 x i32> %InVec) {
entry:
%shl = shl <4 x i32> %InVec, <i32 32, i32 32, i32 32, i32 32>
ret <4 x i32> %shl
}
; CHECK-LABEL: test_slld_3:
; CHECK: xorps %xmm0, %xmm0
; CHECK-NEXT: ret
define <2 x i64> @test_sllq_1(<2 x i64> %InVec) {
entry:
%shl = shl <2 x i64> %InVec, <i64 0, i64 0>
ret <2 x i64> %shl
}
; CHECK-LABEL: test_sllq_1:
; CHECK: psllq $0, %xmm0
; CHECK-NEXT: ret
define <2 x i64> @test_sllq_2(<2 x i64> %InVec) {
entry:
%shl = shl <2 x i64> %InVec, <i64 1, i64 1>
ret <2 x i64> %shl
}
; CHECK-LABEL: test_sllq_2:
; CHECK: paddq %xmm0, %xmm0
; CHECK-NEXT: ret
define <2 x i64> @test_sllq_3(<2 x i64> %InVec) {
entry:
%shl = shl <2 x i64> %InVec, <i64 64, i64 64>
ret <2 x i64> %shl
}
; CHECK-LABEL: test_sllq_3:
; CHECK: xorps %xmm0, %xmm0
; CHECK-NEXT: ret
; SSE2 Arithmetic Shift
define <8 x i16> @test_sraw_1(<8 x i16> %InVec) {
entry:
%shl = ashr <8 x i16> %InVec, <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>
ret <8 x i16> %shl
}
; CHECK-LABEL: test_sraw_1:
; CHECK: psraw $0, %xmm0
; CHECK-NEXT: ret
define <8 x i16> @test_sraw_2(<8 x i16> %InVec) {
entry:
%shl = ashr <8 x i16> %InVec, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
ret <8 x i16> %shl
}
; CHECK-LABEL: test_sraw_2:
; CHECK: psraw $1, %xmm0
; CHECK-NEXT: ret
define <8 x i16> @test_sraw_3(<8 x i16> %InVec) {
entry:
%shl = ashr <8 x i16> %InVec, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
ret <8 x i16> %shl
}
; CHECK-LABEL: test_sraw_3:
; CHECK: psraw $15, %xmm0
; CHECK-NEXT: ret
define <4 x i32> @test_srad_1(<4 x i32> %InVec) {
entry:
%shl = ashr <4 x i32> %InVec, <i32 0, i32 0, i32 0, i32 0>
ret <4 x i32> %shl
}
; CHECK-LABEL: test_srad_1:
; CHECK: psrad $0, %xmm0
; CHECK-NEXT: ret
define <4 x i32> @test_srad_2(<4 x i32> %InVec) {
entry:
%shl = ashr <4 x i32> %InVec, <i32 1, i32 1, i32 1, i32 1>
ret <4 x i32> %shl
}
; CHECK-LABEL: test_srad_2:
; CHECK: psrad $1, %xmm0
; CHECK-NEXT: ret
define <4 x i32> @test_srad_3(<4 x i32> %InVec) {
entry:
%shl = ashr <4 x i32> %InVec, <i32 32, i32 32, i32 32, i32 32>
ret <4 x i32> %shl
}
; CHECK-LABEL: test_srad_3:
; CHECK: psrad $31, %xmm0
; CHECK-NEXT: ret
; SSE Logical Shift Right
define <8 x i16> @test_srlw_1(<8 x i16> %InVec) {
entry:
%shl = lshr <8 x i16> %InVec, <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>
ret <8 x i16> %shl
}
; CHECK-LABEL: test_srlw_1:
; CHECK: psrlw $0, %xmm0
; CHECK-NEXT: ret
define <8 x i16> @test_srlw_2(<8 x i16> %InVec) {
entry:
%shl = lshr <8 x i16> %InVec, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
ret <8 x i16> %shl
}
; CHECK-LABEL: test_srlw_2:
; CHECK: psrlw $1, %xmm0
; CHECK-NEXT: ret
define <8 x i16> @test_srlw_3(<8 x i16> %InVec) {
entry:
%shl = lshr <8 x i16> %InVec, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
ret <8 x i16> %shl
}
; CHECK-LABEL: test_srlw_3:
; CHECK: xorps %xmm0, %xmm0
; CHECK-NEXT: ret
define <4 x i32> @test_srld_1(<4 x i32> %InVec) {
entry:
%shl = lshr <4 x i32> %InVec, <i32 0, i32 0, i32 0, i32 0>
ret <4 x i32> %shl
}
; CHECK-LABEL: test_srld_1:
; CHECK: psrld $0, %xmm0
; CHECK-NEXT: ret
define <4 x i32> @test_srld_2(<4 x i32> %InVec) {
entry:
%shl = lshr <4 x i32> %InVec, <i32 1, i32 1, i32 1, i32 1>
ret <4 x i32> %shl
}
; CHECK-LABEL: test_srld_2:
; CHECK: psrld $1, %xmm0
; CHECK-NEXT: ret
define <4 x i32> @test_srld_3(<4 x i32> %InVec) {
entry:
%shl = lshr <4 x i32> %InVec, <i32 32, i32 32, i32 32, i32 32>
ret <4 x i32> %shl
}
; CHECK-LABEL: test_srld_3:
; CHECK: xorps %xmm0, %xmm0
; CHECK-NEXT: ret
define <2 x i64> @test_srlq_1(<2 x i64> %InVec) {
entry:
%shl = lshr <2 x i64> %InVec, <i64 0, i64 0>
ret <2 x i64> %shl
}
; CHECK-LABEL: test_srlq_1:
; CHECK: psrlq $0, %xmm0
; CHECK-NEXT: ret
define <2 x i64> @test_srlq_2(<2 x i64> %InVec) {
entry:
%shl = lshr <2 x i64> %InVec, <i64 1, i64 1>
ret <2 x i64> %shl
}
; CHECK-LABEL: test_srlq_2:
; CHECK: psrlq $1, %xmm0
; CHECK-NEXT: ret
define <2 x i64> @test_srlq_3(<2 x i64> %InVec) {
entry:
%shl = lshr <2 x i64> %InVec, <i64 64, i64 64>
ret <2 x i64> %shl
}
; CHECK-LABEL: test_srlq_3:
; CHECK: xorps %xmm0, %xmm0
; CHECK-NEXT: ret