mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-23 11:13:28 +01:00
21418c85a1
If the mask input to getV4X86ShuffleImm8 only refers to a single source element (+ undefs) then canonicalize to a full broadcast. getV4X86ShuffleImm8 defaults to inline values for undefs, which can be useful for shuffle widening/narrowing but does leave SimplifyDemanded* calls thinking the shuffle depends on unnecessary elements. I'm still investigating what we should do more generally to avoid these undemanded elements, but broadcast cases was a simpler win.
134 lines
4.1 KiB
LLVM
134 lines
4.1 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=i686-unknown -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X32
|
|
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X64
|
|
|
|
define i32 @test0(<1 x i64>* %v4) nounwind {
|
|
; X32-LABEL: test0:
|
|
; X32: # %bb.0: # %entry
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: pshufw $238, (%eax), %mm0 # mm0 = mem[2,3,2,3]
|
|
; X32-NEXT: movd %mm0, %eax
|
|
; X32-NEXT: addl $32, %eax
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test0:
|
|
; X64: # %bb.0: # %entry
|
|
; X64-NEXT: pshufw $238, (%rdi), %mm0 # mm0 = mem[2,3,2,3]
|
|
; X64-NEXT: movd %mm0, %eax
|
|
; X64-NEXT: addl $32, %eax
|
|
; X64-NEXT: retq
|
|
entry:
|
|
%v5 = load <1 x i64>, <1 x i64>* %v4, align 8
|
|
%v12 = bitcast <1 x i64> %v5 to <4 x i16>
|
|
%v13 = bitcast <4 x i16> %v12 to x86_mmx
|
|
%v14 = tail call x86_mmx @llvm.x86.sse.pshuf.w(x86_mmx %v13, i8 -18)
|
|
%v15 = bitcast x86_mmx %v14 to <4 x i16>
|
|
%v16 = bitcast <4 x i16> %v15 to <1 x i64>
|
|
%v17 = extractelement <1 x i64> %v16, i32 0
|
|
%v18 = bitcast i64 %v17 to <2 x i32>
|
|
%v19 = extractelement <2 x i32> %v18, i32 0
|
|
%v20 = add i32 %v19, 32
|
|
ret i32 %v20
|
|
}
|
|
|
|
define i32 @test1(i32* nocapture readonly %ptr) nounwind {
|
|
; X32-LABEL: test1:
|
|
; X32: # %bb.0: # %entry
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: movd (%eax), %mm0
|
|
; X32-NEXT: pshufw $232, %mm0, %mm0 # mm0 = mm0[0,2,2,3]
|
|
; X32-NEXT: movd %mm0, %eax
|
|
; X32-NEXT: emms
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test1:
|
|
; X64: # %bb.0: # %entry
|
|
; X64-NEXT: movd (%rdi), %mm0
|
|
; X64-NEXT: pshufw $232, %mm0, %mm0 # mm0 = mm0[0,2,2,3]
|
|
; X64-NEXT: movd %mm0, %eax
|
|
; X64-NEXT: emms
|
|
; X64-NEXT: retq
|
|
entry:
|
|
%0 = load i32, i32* %ptr, align 4
|
|
%1 = insertelement <2 x i32> undef, i32 %0, i32 0
|
|
%2 = insertelement <2 x i32> %1, i32 0, i32 1
|
|
%3 = bitcast <2 x i32> %2 to x86_mmx
|
|
%4 = bitcast x86_mmx %3 to i64
|
|
%5 = bitcast i64 %4 to <4 x i16>
|
|
%6 = bitcast <4 x i16> %5 to x86_mmx
|
|
%7 = tail call x86_mmx @llvm.x86.sse.pshuf.w(x86_mmx %6, i8 -24)
|
|
%8 = bitcast x86_mmx %7 to <4 x i16>
|
|
%9 = bitcast <4 x i16> %8 to <1 x i64>
|
|
%10 = extractelement <1 x i64> %9, i32 0
|
|
%11 = bitcast i64 %10 to <2 x i32>
|
|
%12 = extractelement <2 x i32> %11, i32 0
|
|
tail call void @llvm.x86.mmx.emms()
|
|
ret i32 %12
|
|
}
|
|
|
|
define i32 @test2(i32* nocapture readonly %ptr) nounwind {
|
|
; X32-LABEL: test2:
|
|
; X32: # %bb.0: # %entry
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: pshufw $232, (%eax), %mm0 # mm0 = mem[0,2,2,3]
|
|
; X32-NEXT: movd %mm0, %eax
|
|
; X32-NEXT: emms
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test2:
|
|
; X64: # %bb.0: # %entry
|
|
; X64-NEXT: pshufw $232, (%rdi), %mm0 # mm0 = mem[0,2,2,3]
|
|
; X64-NEXT: movd %mm0, %eax
|
|
; X64-NEXT: emms
|
|
; X64-NEXT: retq
|
|
entry:
|
|
%0 = bitcast i32* %ptr to x86_mmx*
|
|
%1 = load x86_mmx, x86_mmx* %0, align 8
|
|
%2 = tail call x86_mmx @llvm.x86.sse.pshuf.w(x86_mmx %1, i8 -24)
|
|
%3 = bitcast x86_mmx %2 to <4 x i16>
|
|
%4 = bitcast <4 x i16> %3 to <1 x i64>
|
|
%5 = extractelement <1 x i64> %4, i32 0
|
|
%6 = bitcast i64 %5 to <2 x i32>
|
|
%7 = extractelement <2 x i32> %6, i32 0
|
|
tail call void @llvm.x86.mmx.emms()
|
|
ret i32 %7
|
|
}
|
|
|
|
define i32 @test3(x86_mmx %a) nounwind {
|
|
; X32-LABEL: test3:
|
|
; X32: # %bb.0:
|
|
; X32-NEXT: movd %mm0, %eax
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test3:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: movd %mm0, %eax
|
|
; X64-NEXT: retq
|
|
%tmp0 = bitcast x86_mmx %a to <2 x i32>
|
|
%tmp1 = extractelement <2 x i32> %tmp0, i32 0
|
|
ret i32 %tmp1
|
|
}
|
|
|
|
; Verify we don't muck with extractelts from the upper lane.
|
|
define i32 @test4(x86_mmx %a) nounwind {
|
|
; X32-LABEL: test4:
|
|
; X32: # %bb.0:
|
|
; X32-NEXT: movq2dq %mm0, %xmm0
|
|
; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
|
|
; X32-NEXT: movd %xmm0, %eax
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test4:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: movq2dq %mm0, %xmm0
|
|
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
|
|
; X64-NEXT: movd %xmm0, %eax
|
|
; X64-NEXT: retq
|
|
%tmp0 = bitcast x86_mmx %a to <2 x i32>
|
|
%tmp1 = extractelement <2 x i32> %tmp0, i32 1
|
|
ret i32 %tmp1
|
|
}
|
|
|
|
declare x86_mmx @llvm.x86.sse.pshuf.w(x86_mmx, i8)
|
|
declare void @llvm.x86.mmx.emms()
|