mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-25 12:12:47 +01:00
4131403905
If extload is legal, following transform (zext (select c, load1, load2)) -> (select c, zextload1, zextload2) can save one ext instruction. Differential Revision: https://reviews.llvm.org/D95086
147 lines
5.1 KiB
LLVM
147 lines
5.1 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s
|
|
|
|
; (zext(select c, load1, load2)) -> (select c, zextload1, zextload2)
|
|
define i64 @zext_scalar(i8* %p, i1 zeroext %c) {
|
|
; CHECK-LABEL: zext_scalar:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: movzbl (%rdi), %ecx
|
|
; CHECK-NEXT: movzbl 1(%rdi), %eax
|
|
; CHECK-NEXT: testl %esi, %esi
|
|
; CHECK-NEXT: cmoveq %rcx, %rax
|
|
; CHECK-NEXT: retq
|
|
%ld1 = load volatile i8, i8* %p
|
|
%arrayidx1 = getelementptr inbounds i8, i8* %p, i64 1
|
|
%ld2 = load volatile i8, i8* %arrayidx1
|
|
%cond.v = select i1 %c, i8 %ld2, i8 %ld1
|
|
%cond = zext i8 %cond.v to i64
|
|
ret i64 %cond
|
|
}
|
|
|
|
define i64 @zext_scalar2(i8* %p, i16* %q, i1 zeroext %c) {
|
|
; CHECK-LABEL: zext_scalar2:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: movzbl (%rdi), %ecx
|
|
; CHECK-NEXT: movzwl (%rsi), %eax
|
|
; CHECK-NEXT: testl %edx, %edx
|
|
; CHECK-NEXT: cmoveq %rcx, %rax
|
|
; CHECK-NEXT: retq
|
|
%ld1 = load volatile i8, i8* %p
|
|
%ext_ld1 = zext i8 %ld1 to i16
|
|
%ld2 = load volatile i16, i16* %q
|
|
%cond.v = select i1 %c, i16 %ld2, i16 %ext_ld1
|
|
%cond = zext i16 %cond.v to i64
|
|
ret i64 %cond
|
|
}
|
|
|
|
; Don't fold the ext if there is a load with conflicting ext type.
|
|
define i64 @zext_scalar_neg(i8* %p, i16* %q, i1 zeroext %c) {
|
|
; CHECK-LABEL: zext_scalar_neg:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: movsbl (%rdi), %eax
|
|
; CHECK-NEXT: testl %edx, %edx
|
|
; CHECK-NEXT: je .LBB2_2
|
|
; CHECK-NEXT: # %bb.1:
|
|
; CHECK-NEXT: movzwl (%rsi), %eax
|
|
; CHECK-NEXT: .LBB2_2:
|
|
; CHECK-NEXT: movzwl %ax, %eax
|
|
; CHECK-NEXT: retq
|
|
%ld1 = load volatile i8, i8* %p
|
|
%ext_ld1 = sext i8 %ld1 to i16
|
|
%ld2 = load volatile i16, i16* %q
|
|
%cond.v = select i1 %c, i16 %ld2, i16 %ext_ld1
|
|
%cond = zext i16 %cond.v to i64
|
|
ret i64 %cond
|
|
}
|
|
|
|
; (sext(select c, load1, load2)) -> (select c, sextload1, sextload2)
|
|
define i64 @sext_scalar(i8* %p, i1 zeroext %c) {
|
|
; CHECK-LABEL: sext_scalar:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: movsbq (%rdi), %rcx
|
|
; CHECK-NEXT: movsbq 1(%rdi), %rax
|
|
; CHECK-NEXT: testl %esi, %esi
|
|
; CHECK-NEXT: cmoveq %rcx, %rax
|
|
; CHECK-NEXT: retq
|
|
%ld1 = load volatile i8, i8* %p
|
|
%arrayidx1 = getelementptr inbounds i8, i8* %p, i64 1
|
|
%ld2 = load volatile i8, i8* %arrayidx1
|
|
%cond.v = select i1 %c, i8 %ld2, i8 %ld1
|
|
%cond = sext i8 %cond.v to i64
|
|
ret i64 %cond
|
|
}
|
|
|
|
; Same as zext_scalar, but operate on vectors.
|
|
define <2 x i64> @zext_vector_i1(<2 x i32>* %p, i1 zeroext %c) {
|
|
; CHECK-LABEL: zext_vector_i1:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: pmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
|
|
; CHECK-NEXT: pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
|
|
; CHECK-NEXT: testl %esi, %esi
|
|
; CHECK-NEXT: jne .LBB4_2
|
|
; CHECK-NEXT: # %bb.1:
|
|
; CHECK-NEXT: movdqa %xmm1, %xmm0
|
|
; CHECK-NEXT: .LBB4_2:
|
|
; CHECK-NEXT: retq
|
|
%ld1 = load volatile <2 x i32>, <2 x i32>* %p
|
|
%arrayidx1 = getelementptr inbounds <2 x i32>, <2 x i32>* %p, i64 1
|
|
%ld2 = load volatile <2 x i32>, <2 x i32>* %arrayidx1
|
|
%cond.v = select i1 %c, <2 x i32> %ld2, <2 x i32> %ld1
|
|
%cond = zext <2 x i32> %cond.v to <2 x i64>
|
|
ret <2 x i64> %cond
|
|
}
|
|
|
|
define <2 x i64> @zext_vector_v2i1(<2 x i32>* %p, <2 x i1> %c) {
|
|
; CHECK-LABEL: zext_vector_v2i1:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: psllq $63, %xmm0
|
|
; CHECK-NEXT: pmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
|
|
; CHECK-NEXT: pmovzxdq {{.*#+}} xmm2 = mem[0],zero,mem[1],zero
|
|
; CHECK-NEXT: blendvpd %xmm0, %xmm2, %xmm1
|
|
; CHECK-NEXT: movapd %xmm1, %xmm0
|
|
; CHECK-NEXT: retq
|
|
%ld1 = load volatile <2 x i32>, <2 x i32>* %p
|
|
%arrayidx1 = getelementptr inbounds <2 x i32>, <2 x i32>* %p, i64 1
|
|
%ld2 = load volatile <2 x i32>, <2 x i32>* %arrayidx1
|
|
%cond.v = select <2 x i1> %c, <2 x i32> %ld2, <2 x i32> %ld1
|
|
%cond = zext <2 x i32> %cond.v to <2 x i64>
|
|
ret <2 x i64> %cond
|
|
}
|
|
|
|
; Same as sext_scalar, but operate on vectors.
|
|
define <2 x i64> @sext_vector_i1(<2 x i32>* %p, i1 zeroext %c) {
|
|
; CHECK-LABEL: sext_vector_i1:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: pmovsxdq (%rdi), %xmm1
|
|
; CHECK-NEXT: pmovsxdq 8(%rdi), %xmm0
|
|
; CHECK-NEXT: testl %esi, %esi
|
|
; CHECK-NEXT: jne .LBB6_2
|
|
; CHECK-NEXT: # %bb.1:
|
|
; CHECK-NEXT: movdqa %xmm1, %xmm0
|
|
; CHECK-NEXT: .LBB6_2:
|
|
; CHECK-NEXT: retq
|
|
%ld1 = load volatile <2 x i32>, <2 x i32>* %p
|
|
%arrayidx1 = getelementptr inbounds <2 x i32>, <2 x i32>* %p, i64 1
|
|
%ld2 = load volatile <2 x i32>, <2 x i32>* %arrayidx1
|
|
%cond.v = select i1 %c, <2 x i32> %ld2, <2 x i32> %ld1
|
|
%cond = sext <2 x i32> %cond.v to <2 x i64>
|
|
ret <2 x i64> %cond
|
|
}
|
|
|
|
define <2 x i64> @sext_vector_v2i1(<2 x i32>* %p, <2 x i1> %c) {
|
|
; CHECK-LABEL: sext_vector_v2i1:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: psllq $63, %xmm0
|
|
; CHECK-NEXT: pmovsxdq (%rdi), %xmm1
|
|
; CHECK-NEXT: pmovsxdq 8(%rdi), %xmm2
|
|
; CHECK-NEXT: blendvpd %xmm0, %xmm2, %xmm1
|
|
; CHECK-NEXT: movapd %xmm1, %xmm0
|
|
; CHECK-NEXT: retq
|
|
%ld1 = load volatile <2 x i32>, <2 x i32>* %p
|
|
%arrayidx1 = getelementptr inbounds <2 x i32>, <2 x i32>* %p, i64 1
|
|
%ld2 = load volatile <2 x i32>, <2 x i32>* %arrayidx1
|
|
%cond.v = select <2 x i1> %c, <2 x i32> %ld2, <2 x i32> %ld1
|
|
%cond = sext <2 x i32> %cond.v to <2 x i64>
|
|
ret <2 x i64> %cond
|
|
}
|