diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 89ca374880c..b89b2b6e720 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -27463,19 +27463,13 @@ static bool isCMOVPseudo(MachineInstr &MI) { case X86::CMOV_RFP32: case X86::CMOV_RFP64: case X86::CMOV_RFP80: - case X86::CMOV_V2F64: - case X86::CMOV_V2I64: - case X86::CMOV_V4F32: - case X86::CMOV_V4F64: - case X86::CMOV_V4I64: - case X86::CMOV_V16F32: - case X86::CMOV_V8F32: - case X86::CMOV_V8F64: - case X86::CMOV_V8I64: - case X86::CMOV_V8I1: - case X86::CMOV_V16I1: - case X86::CMOV_V32I1: - case X86::CMOV_V64I1: + case X86::CMOV_VR128: + case X86::CMOV_VR256: + case X86::CMOV_VR512: + case X86::CMOV_VK8: + case X86::CMOV_VK16: + case X86::CMOV_VK32: + case X86::CMOV_VK64: return true; default: @@ -29059,26 +29053,19 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, return EmitLoweredTLSCall(MI, BB); case X86::CMOV_FR32: case X86::CMOV_FR64: - case X86::CMOV_F128: case X86::CMOV_GR8: case X86::CMOV_GR16: case X86::CMOV_GR32: case X86::CMOV_RFP32: case X86::CMOV_RFP64: case X86::CMOV_RFP80: - case X86::CMOV_V2F64: - case X86::CMOV_V2I64: - case X86::CMOV_V4F32: - case X86::CMOV_V4F64: - case X86::CMOV_V4I64: - case X86::CMOV_V16F32: - case X86::CMOV_V8F32: - case X86::CMOV_V8F64: - case X86::CMOV_V8I64: - case X86::CMOV_V8I1: - case X86::CMOV_V16I1: - case X86::CMOV_V32I1: - case X86::CMOV_V64I1: + case X86::CMOV_VR128: + case X86::CMOV_VR256: + case X86::CMOV_VR512: + case X86::CMOV_VK8: + case X86::CMOV_VK16: + case X86::CMOV_VK32: + case X86::CMOV_VK64: return EmitLoweredSelect(MI, BB); case X86::RDFLAGS32: diff --git a/lib/Target/X86/X86InstrCompiler.td b/lib/Target/X86/X86InstrCompiler.td index ee3470a832b..5b55da781e0 100644 --- a/lib/Target/X86/X86InstrCompiler.td +++ b/lib/Target/X86/X86InstrCompiler.td @@ -590,22 +590,30 @@ let usesCustomInserter = 1, hasNoSchedulingInfo = 1, Uses = [EFLAGS] in { defm _FR32 : CMOVrr_PSEUDO; defm _FR64 : CMOVrr_PSEUDO; - defm _F128 : CMOVrr_PSEUDO; - defm _V4F32 : CMOVrr_PSEUDO; - defm _V2F64 : CMOVrr_PSEUDO; - defm _V2I64 : CMOVrr_PSEUDO; - defm _V8F32 : CMOVrr_PSEUDO; - defm _V4F64 : CMOVrr_PSEUDO; - defm _V4I64 : CMOVrr_PSEUDO; - defm _V8I64 : CMOVrr_PSEUDO; - defm _V8F64 : CMOVrr_PSEUDO; - defm _V16F32 : CMOVrr_PSEUDO; - defm _V8I1 : CMOVrr_PSEUDO; - defm _V16I1 : CMOVrr_PSEUDO; - defm _V32I1 : CMOVrr_PSEUDO; - defm _V64I1 : CMOVrr_PSEUDO; + defm _VR128 : CMOVrr_PSEUDO; + defm _VR256 : CMOVrr_PSEUDO; + defm _VR512 : CMOVrr_PSEUDO; + defm _VK8 : CMOVrr_PSEUDO; + defm _VK16 : CMOVrr_PSEUDO; + defm _VK32 : CMOVrr_PSEUDO; + defm _VK64 : CMOVrr_PSEUDO; } // usesCustomInserter = 1, hasNoSchedulingInfo = 1, Uses = [EFLAGS] +def : Pat<(f128 (X86cmov VR128:$t, VR128:$f, imm:$cond, EFLAGS)), + (CMOV_VR128 VR128:$t, VR128:$f, imm:$cond)>; +def : Pat<(v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond, EFLAGS)), + (CMOV_VR128 VR128:$t, VR128:$f, imm:$cond)>; +def : Pat<(v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond, EFLAGS)), + (CMOV_VR128 VR128:$t, VR128:$f, imm:$cond)>; +def : Pat<(v8f32 (X86cmov VR256:$t, VR256:$f, imm:$cond, EFLAGS)), + (CMOV_VR256 VR256:$t, VR256:$f, imm:$cond)>; +def : Pat<(v4f64 (X86cmov VR256:$t, VR256:$f, imm:$cond, EFLAGS)), + (CMOV_VR256 VR256:$t, VR256:$f, imm:$cond)>; +def : Pat<(v16f32 (X86cmov VR512:$t, VR512:$f, imm:$cond, EFLAGS)), + (CMOV_VR512 VR512:$t, VR512:$f, imm:$cond)>; +def : Pat<(v8f64 (X86cmov VR512:$t, VR512:$f, imm:$cond, EFLAGS)), + (CMOV_VR512 VR512:$t, VR512:$f, imm:$cond)>; + //===----------------------------------------------------------------------===// // Normal-Instructions-With-Lock-Prefix Pseudo Instructions //===----------------------------------------------------------------------===//