1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-19 19:12:56 +02:00

[x86] enable machine combiner reassociations for 128-bit vector min/max

llvm-svn: 245715
This commit is contained in:
Sanjay Patel 2015-08-21 18:06:49 +00:00
parent 25f1840e3c
commit 2e11124ece
2 changed files with 104 additions and 0 deletions

View File

@ -6394,12 +6394,20 @@ static bool isAssociativeAndCommutative(const MachineInstr &Inst) {
// Normal min/max instructions are not commutative because of NaN and signed
// zero semantics, but these are. Thus, there's no need to check for global
// relaxed math; the instructions themselves have the properties we need.
case X86::MAXCPDrr:
case X86::MAXCPSrr:
case X86::MAXCSDrr:
case X86::MAXCSSrr:
case X86::MINCPDrr:
case X86::MINCPSrr:
case X86::MINCSDrr:
case X86::MINCSSrr:
case X86::VMAXCPDrr:
case X86::VMAXCPSrr:
case X86::VMAXCSDrr:
case X86::VMAXCSSrr:
case X86::VMINCPDrr:
case X86::VMINCPSrr:
case X86::VMINCSDrr:
case X86::VMINCSSrr:
return true;

View File

@ -454,3 +454,99 @@ define double @reassociate_maxs_double(double %x0, double %x1, double %x2, doubl
ret double %sel2
}
; Verify that SSE and AVX 128-bit vector single-precision minimum ops are reassociated.
define <4 x float> @reassociate_mins_v4f32(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) {
; SSE-LABEL: reassociate_mins_v4f32:
; SSE: # BB#0:
; SSE-NEXT: addps %xmm1, %xmm0
; SSE-NEXT: minps %xmm3, %xmm2
; SSE-NEXT: minps %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: reassociate_mins_v4f32:
; AVX: # BB#0:
; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vminps %xmm3, %xmm2, %xmm1
; AVX-NEXT: vminps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%t0 = fadd <4 x float> %x0, %x1
%cmp1 = fcmp olt <4 x float> %x2, %t0
%sel1 = select <4 x i1> %cmp1, <4 x float> %x2, <4 x float> %t0
%cmp2 = fcmp olt <4 x float> %x3, %sel1
%sel2 = select <4 x i1> %cmp2, <4 x float> %x3, <4 x float> %sel1
ret <4 x float> %sel2
}
; Verify that SSE and AVX 128-bit vector single-precision maximum ops are reassociated.
define <4 x float> @reassociate_maxs_v4f32(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) {
; SSE-LABEL: reassociate_maxs_v4f32:
; SSE: # BB#0:
; SSE-NEXT: addps %xmm1, %xmm0
; SSE-NEXT: maxps %xmm3, %xmm2
; SSE-NEXT: maxps %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: reassociate_maxs_v4f32:
; AVX: # BB#0:
; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmaxps %xmm3, %xmm2, %xmm1
; AVX-NEXT: vmaxps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%t0 = fadd <4 x float> %x0, %x1
%cmp1 = fcmp ogt <4 x float> %x2, %t0
%sel1 = select <4 x i1> %cmp1, <4 x float> %x2, <4 x float> %t0
%cmp2 = fcmp ogt <4 x float> %x3, %sel1
%sel2 = select <4 x i1> %cmp2, <4 x float> %x3, <4 x float> %sel1
ret <4 x float> %sel2
}
; Verify that SSE and AVX 128-bit vector double-precision minimum ops are reassociated.
define <2 x double> @reassociate_mins_v2f64(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, <2 x double> %x3) {
; SSE-LABEL: reassociate_mins_v2f64:
; SSE: # BB#0:
; SSE-NEXT: addpd %xmm1, %xmm0
; SSE-NEXT: minpd %xmm3, %xmm2
; SSE-NEXT: minpd %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: reassociate_mins_v2f64:
; AVX: # BB#0:
; AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vminpd %xmm3, %xmm2, %xmm1
; AVX-NEXT: vminpd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%t0 = fadd <2 x double> %x0, %x1
%cmp1 = fcmp olt <2 x double> %x2, %t0
%sel1 = select <2 x i1> %cmp1, <2 x double> %x2, <2 x double> %t0
%cmp2 = fcmp olt <2 x double> %x3, %sel1
%sel2 = select <2 x i1> %cmp2, <2 x double> %x3, <2 x double> %sel1
ret <2 x double> %sel2
}
; Verify that SSE and AVX 128-bit vector double-precision maximum ops are reassociated.
define <2 x double> @reassociate_maxs_v2f64(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, <2 x double> %x3) {
; SSE-LABEL: reassociate_maxs_v2f64:
; SSE: # BB#0:
; SSE-NEXT: addpd %xmm1, %xmm0
; SSE-NEXT: maxpd %xmm3, %xmm2
; SSE-NEXT: maxpd %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: reassociate_maxs_v2f64:
; AVX: # BB#0:
; AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmaxpd %xmm3, %xmm2, %xmm1
; AVX-NEXT: vmaxpd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%t0 = fadd <2 x double> %x0, %x1
%cmp1 = fcmp ogt <2 x double> %x2, %t0
%sel1 = select <2 x i1> %cmp1, <2 x double> %x2, <2 x double> %t0
%cmp2 = fcmp ogt <2 x double> %x3, %sel1
%sel2 = select <2 x i1> %cmp2, <2 x double> %x3, <2 x double> %sel1
ret <2 x double> %sel2
}