mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-24 03:33:20 +01:00
Commute x86 cmove instructions by swapping the operands and change the condition
to its inverse. Testing this as llcbeta llvm-svn: 42661
This commit is contained in:
parent
e0e36e4a0e
commit
9af50ee6ef
@ -22,8 +22,15 @@
|
||||
#include "llvm/CodeGen/LiveVariables.h"
|
||||
#include "llvm/CodeGen/SSARegMap.h"
|
||||
#include "llvm/Target/TargetOptions.h"
|
||||
#include "llvm/Support/CommandLine.h"
|
||||
using namespace llvm;
|
||||
|
||||
namespace {
|
||||
cl::opt<bool>
|
||||
EnableCommuteCMove("enable-x86-commute-cmove",
|
||||
cl::desc("Commute conditional moves by inverting conditions"));
|
||||
}
|
||||
|
||||
X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
|
||||
: TargetInstrInfo(X86Insts, array_lengthof(X86Insts)),
|
||||
TM(tm), RI(tm, *this) {
|
||||
@ -366,7 +373,6 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
|
||||
/// commute them.
|
||||
///
|
||||
MachineInstr *X86InstrInfo::commuteInstruction(MachineInstr *MI) const {
|
||||
// FIXME: Can commute cmoves by changing the condition!
|
||||
switch (MI->getOpcode()) {
|
||||
case X86::SHRD16rri8: // A = SHRD16rri8 B, C, I -> A = SHLD16rri8 C, B, (16-I)
|
||||
case X86::SHLD16rri8: // A = SHLD16rri8 B, C, I -> A = SHRD16rri8 C, B, (16-I)
|
||||
@ -394,6 +400,100 @@ MachineInstr *X86InstrInfo::commuteInstruction(MachineInstr *MI) const {
|
||||
return BuildMI(get(Opc), A).addReg(C, false, false, CisKill)
|
||||
.addReg(B, false, false, BisKill).addImm(Size-Amt);
|
||||
}
|
||||
case X86::CMOVB16rr:
|
||||
case X86::CMOVB32rr:
|
||||
case X86::CMOVB64rr:
|
||||
case X86::CMOVAE16rr:
|
||||
case X86::CMOVAE32rr:
|
||||
case X86::CMOVAE64rr:
|
||||
case X86::CMOVE16rr:
|
||||
case X86::CMOVE32rr:
|
||||
case X86::CMOVE64rr:
|
||||
case X86::CMOVNE16rr:
|
||||
case X86::CMOVNE32rr:
|
||||
case X86::CMOVNE64rr:
|
||||
case X86::CMOVBE16rr:
|
||||
case X86::CMOVBE32rr:
|
||||
case X86::CMOVBE64rr:
|
||||
case X86::CMOVA16rr:
|
||||
case X86::CMOVA32rr:
|
||||
case X86::CMOVA64rr:
|
||||
case X86::CMOVL16rr:
|
||||
case X86::CMOVL32rr:
|
||||
case X86::CMOVL64rr:
|
||||
case X86::CMOVGE16rr:
|
||||
case X86::CMOVGE32rr:
|
||||
case X86::CMOVGE64rr:
|
||||
case X86::CMOVLE16rr:
|
||||
case X86::CMOVLE32rr:
|
||||
case X86::CMOVLE64rr:
|
||||
case X86::CMOVG16rr:
|
||||
case X86::CMOVG32rr:
|
||||
case X86::CMOVG64rr:
|
||||
case X86::CMOVS16rr:
|
||||
case X86::CMOVS32rr:
|
||||
case X86::CMOVS64rr:
|
||||
case X86::CMOVNS16rr:
|
||||
case X86::CMOVNS32rr:
|
||||
case X86::CMOVNS64rr:
|
||||
case X86::CMOVP16rr:
|
||||
case X86::CMOVP32rr:
|
||||
case X86::CMOVP64rr:
|
||||
case X86::CMOVNP16rr:
|
||||
case X86::CMOVNP32rr:
|
||||
case X86::CMOVNP64rr: {
|
||||
if (!EnableCommuteCMove)
|
||||
return 0;
|
||||
unsigned Opc = 0;
|
||||
switch (MI->getOpcode()) {
|
||||
default: break;
|
||||
case X86::CMOVB16rr: Opc = X86::CMOVAE16rr; break;
|
||||
case X86::CMOVB32rr: Opc = X86::CMOVAE32rr; break;
|
||||
case X86::CMOVB64rr: Opc = X86::CMOVAE64rr; break;
|
||||
case X86::CMOVAE16rr: Opc = X86::CMOVB16rr; break;
|
||||
case X86::CMOVAE32rr: Opc = X86::CMOVB32rr; break;
|
||||
case X86::CMOVAE64rr: Opc = X86::CMOVB64rr; break;
|
||||
case X86::CMOVE16rr: Opc = X86::CMOVNE16rr; break;
|
||||
case X86::CMOVE32rr: Opc = X86::CMOVNE32rr; break;
|
||||
case X86::CMOVE64rr: Opc = X86::CMOVNE64rr; break;
|
||||
case X86::CMOVNE16rr: Opc = X86::CMOVE16rr; break;
|
||||
case X86::CMOVNE32rr: Opc = X86::CMOVE32rr; break;
|
||||
case X86::CMOVNE64rr: Opc = X86::CMOVE64rr; break;
|
||||
case X86::CMOVBE16rr: Opc = X86::CMOVA16rr; break;
|
||||
case X86::CMOVBE32rr: Opc = X86::CMOVA32rr; break;
|
||||
case X86::CMOVBE64rr: Opc = X86::CMOVA64rr; break;
|
||||
case X86::CMOVA16rr: Opc = X86::CMOVBE16rr; break;
|
||||
case X86::CMOVA32rr: Opc = X86::CMOVBE32rr; break;
|
||||
case X86::CMOVA64rr: Opc = X86::CMOVBE64rr; break;
|
||||
case X86::CMOVL16rr: Opc = X86::CMOVGE16rr; break;
|
||||
case X86::CMOVL32rr: Opc = X86::CMOVGE32rr; break;
|
||||
case X86::CMOVL64rr: Opc = X86::CMOVGE64rr; break;
|
||||
case X86::CMOVGE16rr: Opc = X86::CMOVL16rr; break;
|
||||
case X86::CMOVGE32rr: Opc = X86::CMOVL32rr; break;
|
||||
case X86::CMOVGE64rr: Opc = X86::CMOVL64rr; break;
|
||||
case X86::CMOVLE16rr: Opc = X86::CMOVG16rr; break;
|
||||
case X86::CMOVLE32rr: Opc = X86::CMOVG32rr; break;
|
||||
case X86::CMOVLE64rr: Opc = X86::CMOVG64rr; break;
|
||||
case X86::CMOVG16rr: Opc = X86::CMOVLE16rr; break;
|
||||
case X86::CMOVG32rr: Opc = X86::CMOVLE32rr; break;
|
||||
case X86::CMOVG64rr: Opc = X86::CMOVLE64rr; break;
|
||||
case X86::CMOVS16rr: Opc = X86::CMOVNS16rr; break;
|
||||
case X86::CMOVS32rr: Opc = X86::CMOVNS32rr; break;
|
||||
case X86::CMOVS64rr: Opc = X86::CMOVNS32rr; break;
|
||||
case X86::CMOVNS16rr: Opc = X86::CMOVS16rr; break;
|
||||
case X86::CMOVNS32rr: Opc = X86::CMOVS32rr; break;
|
||||
case X86::CMOVNS64rr: Opc = X86::CMOVS64rr; break;
|
||||
case X86::CMOVP16rr: Opc = X86::CMOVNP16rr; break;
|
||||
case X86::CMOVP32rr: Opc = X86::CMOVNP32rr; break;
|
||||
case X86::CMOVP64rr: Opc = X86::CMOVNP32rr; break;
|
||||
case X86::CMOVNP16rr: Opc = X86::CMOVP16rr; break;
|
||||
case X86::CMOVNP32rr: Opc = X86::CMOVP32rr; break;
|
||||
case X86::CMOVNP64rr: Opc = X86::CMOVP64rr; break;
|
||||
}
|
||||
|
||||
MI->setInstrDescriptor(get(Opc));
|
||||
// Fallthrough intended.
|
||||
}
|
||||
default:
|
||||
return TargetInstrInfo::commuteInstruction(MI);
|
||||
}
|
||||
|
@ -647,30 +647,19 @@ let isTwoAddress = 1 in {
|
||||
|
||||
// Conditional moves
|
||||
let Uses = [EFLAGS] in {
|
||||
let isCommutable = 1 in {
|
||||
def CMOVB16rr : I<0x42, MRMSrcReg, // if <u, GR16 = GR16
|
||||
(outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
|
||||
"cmovb\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
|
||||
X86_COND_B, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVB16rm : I<0x42, MRMSrcMem, // if <u, GR16 = [mem16]
|
||||
(outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
|
||||
"cmovb\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
|
||||
X86_COND_B, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVB32rr : I<0x42, MRMSrcReg, // if <u, GR32 = GR32
|
||||
(outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
|
||||
"cmovb\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
|
||||
X86_COND_B, EFLAGS))]>,
|
||||
TB;
|
||||
def CMOVB32rm : I<0x42, MRMSrcMem, // if <u, GR32 = [mem32]
|
||||
(outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
|
||||
"cmovb\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
|
||||
X86_COND_B, EFLAGS))]>,
|
||||
TB;
|
||||
|
||||
def CMOVAE16rr: I<0x43, MRMSrcReg, // if >=u, GR16 = GR16
|
||||
(outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
|
||||
@ -678,324 +667,327 @@ def CMOVAE16rr: I<0x43, MRMSrcReg, // if >=u, GR16 = GR16
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
|
||||
X86_COND_AE, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVAE16rm: I<0x43, MRMSrcMem, // if >=u, GR16 = [mem16]
|
||||
(outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
|
||||
"cmovae\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
|
||||
X86_COND_AE, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVAE32rr: I<0x43, MRMSrcReg, // if >=u, GR32 = GR32
|
||||
(outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
|
||||
"cmovae\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
|
||||
X86_COND_AE, EFLAGS))]>,
|
||||
TB;
|
||||
def CMOVAE32rm: I<0x43, MRMSrcMem, // if >=u, GR32 = [mem32]
|
||||
(outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
|
||||
"cmovae\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
|
||||
X86_COND_AE, EFLAGS))]>,
|
||||
TB;
|
||||
|
||||
def CMOVE16rr : I<0x44, MRMSrcReg, // if ==, GR16 = GR16
|
||||
(outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
|
||||
"cmove\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
|
||||
X86_COND_E, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVE16rm : I<0x44, MRMSrcMem, // if ==, GR16 = [mem16]
|
||||
(outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
|
||||
"cmove\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
|
||||
X86_COND_E, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVE32rr : I<0x44, MRMSrcReg, // if ==, GR32 = GR32
|
||||
(outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
|
||||
"cmove\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
|
||||
X86_COND_E, EFLAGS))]>,
|
||||
TB;
|
||||
def CMOVE32rm : I<0x44, MRMSrcMem, // if ==, GR32 = [mem32]
|
||||
(outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
|
||||
"cmove\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
|
||||
X86_COND_E, EFLAGS))]>,
|
||||
TB;
|
||||
|
||||
def CMOVNE16rr: I<0x45, MRMSrcReg, // if !=, GR16 = GR16
|
||||
(outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
|
||||
"cmovne\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
|
||||
X86_COND_NE, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVNE16rm: I<0x45, MRMSrcMem, // if !=, GR16 = [mem16]
|
||||
(outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
|
||||
"cmovne\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
|
||||
X86_COND_NE, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVNE32rr: I<0x45, MRMSrcReg, // if !=, GR32 = GR32
|
||||
(outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
|
||||
"cmovne\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
|
||||
X86_COND_NE, EFLAGS))]>,
|
||||
TB;
|
||||
def CMOVNE32rm: I<0x45, MRMSrcMem, // if !=, GR32 = [mem32]
|
||||
(outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
|
||||
"cmovne\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
|
||||
X86_COND_NE, EFLAGS))]>,
|
||||
TB;
|
||||
|
||||
def CMOVBE16rr: I<0x46, MRMSrcReg, // if <=u, GR16 = GR16
|
||||
(outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
|
||||
"cmovbe\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
|
||||
X86_COND_BE, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVBE16rm: I<0x46, MRMSrcMem, // if <=u, GR16 = [mem16]
|
||||
(outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
|
||||
"cmovbe\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
|
||||
X86_COND_BE, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVBE32rr: I<0x46, MRMSrcReg, // if <=u, GR32 = GR32
|
||||
(outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
|
||||
"cmovbe\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
|
||||
X86_COND_BE, EFLAGS))]>,
|
||||
TB;
|
||||
def CMOVBE32rm: I<0x46, MRMSrcMem, // if <=u, GR32 = [mem32]
|
||||
(outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
|
||||
"cmovbe\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
|
||||
X86_COND_BE, EFLAGS))]>,
|
||||
TB;
|
||||
|
||||
def CMOVA16rr : I<0x47, MRMSrcReg, // if >u, GR16 = GR16
|
||||
(outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
|
||||
"cmova\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
|
||||
X86_COND_A, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVA16rm : I<0x47, MRMSrcMem, // if >u, GR16 = [mem16]
|
||||
(outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
|
||||
"cmova\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
|
||||
X86_COND_A, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVA32rr : I<0x47, MRMSrcReg, // if >u, GR32 = GR32
|
||||
(outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
|
||||
"cmova\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
|
||||
X86_COND_A, EFLAGS))]>,
|
||||
TB;
|
||||
def CMOVA32rm : I<0x47, MRMSrcMem, // if >u, GR32 = [mem32]
|
||||
(outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
|
||||
"cmova\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
|
||||
X86_COND_A, EFLAGS))]>,
|
||||
TB;
|
||||
|
||||
def CMOVL16rr : I<0x4C, MRMSrcReg, // if <s, GR16 = GR16
|
||||
(outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
|
||||
"cmovl\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
|
||||
X86_COND_L, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVL16rm : I<0x4C, MRMSrcMem, // if <s, GR16 = [mem16]
|
||||
(outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
|
||||
"cmovl\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
|
||||
X86_COND_L, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVL32rr : I<0x4C, MRMSrcReg, // if <s, GR32 = GR32
|
||||
(outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
|
||||
"cmovl\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
|
||||
X86_COND_L, EFLAGS))]>,
|
||||
TB;
|
||||
def CMOVL32rm : I<0x4C, MRMSrcMem, // if <s, GR32 = [mem32]
|
||||
(outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
|
||||
"cmovl\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
|
||||
X86_COND_L, EFLAGS))]>,
|
||||
TB;
|
||||
|
||||
def CMOVGE16rr: I<0x4D, MRMSrcReg, // if >=s, GR16 = GR16
|
||||
(outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
|
||||
"cmovge\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
|
||||
X86_COND_GE, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVGE16rm: I<0x4D, MRMSrcMem, // if >=s, GR16 = [mem16]
|
||||
(outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
|
||||
"cmovge\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
|
||||
X86_COND_GE, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVGE32rr: I<0x4D, MRMSrcReg, // if >=s, GR32 = GR32
|
||||
(outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
|
||||
"cmovge\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
|
||||
X86_COND_GE, EFLAGS))]>,
|
||||
TB;
|
||||
def CMOVGE32rm: I<0x4D, MRMSrcMem, // if >=s, GR32 = [mem32]
|
||||
(outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
|
||||
"cmovge\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
|
||||
X86_COND_GE, EFLAGS))]>,
|
||||
TB;
|
||||
|
||||
def CMOVLE16rr: I<0x4E, MRMSrcReg, // if <=s, GR16 = GR16
|
||||
(outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
|
||||
"cmovle\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
|
||||
X86_COND_LE, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVLE16rm: I<0x4E, MRMSrcMem, // if <=s, GR16 = [mem16]
|
||||
(outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
|
||||
"cmovle\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
|
||||
X86_COND_LE, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVLE32rr: I<0x4E, MRMSrcReg, // if <=s, GR32 = GR32
|
||||
(outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
|
||||
"cmovle\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
|
||||
X86_COND_LE, EFLAGS))]>,
|
||||
TB;
|
||||
def CMOVLE32rm: I<0x4E, MRMSrcMem, // if <=s, GR32 = [mem32]
|
||||
(outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
|
||||
"cmovle\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
|
||||
X86_COND_LE, EFLAGS))]>,
|
||||
TB;
|
||||
|
||||
def CMOVG16rr : I<0x4F, MRMSrcReg, // if >s, GR16 = GR16
|
||||
(outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
|
||||
"cmovg\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
|
||||
X86_COND_G, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVG16rm : I<0x4F, MRMSrcMem, // if >s, GR16 = [mem16]
|
||||
(outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
|
||||
"cmovg\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
|
||||
X86_COND_G, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVG32rr : I<0x4F, MRMSrcReg, // if >s, GR32 = GR32
|
||||
(outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
|
||||
"cmovg\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
|
||||
X86_COND_G, EFLAGS))]>,
|
||||
TB;
|
||||
def CMOVG32rm : I<0x4F, MRMSrcMem, // if >s, GR32 = [mem32]
|
||||
(outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
|
||||
"cmovg\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
|
||||
X86_COND_G, EFLAGS))]>,
|
||||
TB;
|
||||
|
||||
def CMOVS16rr : I<0x48, MRMSrcReg, // if signed, GR16 = GR16
|
||||
(outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
|
||||
"cmovs\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
|
||||
X86_COND_S, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVS16rm : I<0x48, MRMSrcMem, // if signed, GR16 = [mem16]
|
||||
(outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
|
||||
"cmovs\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
|
||||
X86_COND_S, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVS32rr : I<0x48, MRMSrcReg, // if signed, GR32 = GR32
|
||||
(outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
|
||||
"cmovs\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
|
||||
X86_COND_S, EFLAGS))]>,
|
||||
TB;
|
||||
def CMOVS32rm : I<0x48, MRMSrcMem, // if signed, GR32 = [mem32]
|
||||
(outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
|
||||
"cmovs\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
|
||||
X86_COND_S, EFLAGS))]>,
|
||||
TB;
|
||||
|
||||
def CMOVNS16rr: I<0x49, MRMSrcReg, // if !signed, GR16 = GR16
|
||||
(outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
|
||||
"cmovns\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
|
||||
X86_COND_NS, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVNS16rm: I<0x49, MRMSrcMem, // if !signed, GR16 = [mem16]
|
||||
(outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
|
||||
"cmovns\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
|
||||
X86_COND_NS, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVNS32rr: I<0x49, MRMSrcReg, // if !signed, GR32 = GR32
|
||||
(outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
|
||||
"cmovns\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
|
||||
X86_COND_NS, EFLAGS))]>,
|
||||
TB;
|
||||
def CMOVNS32rm: I<0x49, MRMSrcMem, // if !signed, GR32 = [mem32]
|
||||
(outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
|
||||
"cmovns\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
|
||||
X86_COND_NS, EFLAGS))]>,
|
||||
TB;
|
||||
|
||||
def CMOVP16rr : I<0x4A, MRMSrcReg, // if parity, GR16 = GR16
|
||||
(outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
|
||||
"cmovp\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
|
||||
X86_COND_P, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVP16rm : I<0x4A, MRMSrcMem, // if parity, GR16 = [mem16]
|
||||
(outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
|
||||
"cmovp\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
|
||||
X86_COND_P, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVP32rr : I<0x4A, MRMSrcReg, // if parity, GR32 = GR32
|
||||
(outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
|
||||
"cmovp\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
|
||||
X86_COND_P, EFLAGS))]>,
|
||||
TB;
|
||||
def CMOVP32rm : I<0x4A, MRMSrcMem, // if parity, GR32 = [mem32]
|
||||
(outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
|
||||
"cmovp\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
|
||||
X86_COND_P, EFLAGS))]>,
|
||||
TB;
|
||||
|
||||
def CMOVNP16rr : I<0x4B, MRMSrcReg, // if !parity, GR16 = GR16
|
||||
(outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
|
||||
"cmovnp\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
|
||||
X86_COND_NP, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVNP16rm : I<0x4B, MRMSrcMem, // if !parity, GR16 = [mem16]
|
||||
(outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
|
||||
"cmovnp\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
|
||||
X86_COND_NP, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVNP32rr : I<0x4B, MRMSrcReg, // if !parity, GR32 = GR32
|
||||
(outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
|
||||
"cmovnp\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
|
||||
X86_COND_NP, EFLAGS))]>,
|
||||
TB;
|
||||
} // isCommutable = 1
|
||||
|
||||
def CMOVNP32rm : I<0x4B, MRMSrcMem, // if !parity, GR32 = [mem32]
|
||||
(outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
|
||||
"cmovnp\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
|
||||
X86_COND_NP, EFLAGS))]>,
|
||||
TB;
|
||||
|
||||
def CMOVB16rm : I<0x42, MRMSrcMem, // if <u, GR16 = [mem16]
|
||||
(outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
|
||||
"cmovb\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
|
||||
X86_COND_B, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVB32rm : I<0x42, MRMSrcMem, // if <u, GR32 = [mem32]
|
||||
(outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
|
||||
"cmovb\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
|
||||
X86_COND_B, EFLAGS))]>,
|
||||
TB;
|
||||
def CMOVAE16rm: I<0x43, MRMSrcMem, // if >=u, GR16 = [mem16]
|
||||
(outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
|
||||
"cmovae\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
|
||||
X86_COND_AE, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVAE32rm: I<0x43, MRMSrcMem, // if >=u, GR32 = [mem32]
|
||||
(outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
|
||||
"cmovae\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
|
||||
X86_COND_AE, EFLAGS))]>,
|
||||
TB;
|
||||
def CMOVE16rm : I<0x44, MRMSrcMem, // if ==, GR16 = [mem16]
|
||||
(outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
|
||||
"cmove\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
|
||||
X86_COND_E, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVE32rm : I<0x44, MRMSrcMem, // if ==, GR32 = [mem32]
|
||||
(outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
|
||||
"cmove\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
|
||||
X86_COND_E, EFLAGS))]>,
|
||||
TB;
|
||||
def CMOVNE16rm: I<0x45, MRMSrcMem, // if !=, GR16 = [mem16]
|
||||
(outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
|
||||
"cmovne\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
|
||||
X86_COND_NE, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVNE32rm: I<0x45, MRMSrcMem, // if !=, GR32 = [mem32]
|
||||
(outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
|
||||
"cmovne\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
|
||||
X86_COND_NE, EFLAGS))]>,
|
||||
TB;
|
||||
def CMOVBE16rm: I<0x46, MRMSrcMem, // if <=u, GR16 = [mem16]
|
||||
(outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
|
||||
"cmovbe\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
|
||||
X86_COND_BE, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVBE32rm: I<0x46, MRMSrcMem, // if <=u, GR32 = [mem32]
|
||||
(outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
|
||||
"cmovbe\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
|
||||
X86_COND_BE, EFLAGS))]>,
|
||||
TB;
|
||||
def CMOVA16rm : I<0x47, MRMSrcMem, // if >u, GR16 = [mem16]
|
||||
(outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
|
||||
"cmova\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
|
||||
X86_COND_A, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVA32rm : I<0x47, MRMSrcMem, // if >u, GR32 = [mem32]
|
||||
(outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
|
||||
"cmova\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
|
||||
X86_COND_A, EFLAGS))]>,
|
||||
TB;
|
||||
def CMOVL16rm : I<0x4C, MRMSrcMem, // if <s, GR16 = [mem16]
|
||||
(outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
|
||||
"cmovl\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
|
||||
X86_COND_L, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVL32rm : I<0x4C, MRMSrcMem, // if <s, GR32 = [mem32]
|
||||
(outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
|
||||
"cmovl\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
|
||||
X86_COND_L, EFLAGS))]>,
|
||||
TB;
|
||||
def CMOVGE16rm: I<0x4D, MRMSrcMem, // if >=s, GR16 = [mem16]
|
||||
(outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
|
||||
"cmovge\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
|
||||
X86_COND_GE, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVGE32rm: I<0x4D, MRMSrcMem, // if >=s, GR32 = [mem32]
|
||||
(outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
|
||||
"cmovge\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
|
||||
X86_COND_GE, EFLAGS))]>,
|
||||
TB;
|
||||
def CMOVLE16rm: I<0x4E, MRMSrcMem, // if <=s, GR16 = [mem16]
|
||||
(outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
|
||||
"cmovle\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
|
||||
X86_COND_LE, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVLE32rm: I<0x4E, MRMSrcMem, // if <=s, GR32 = [mem32]
|
||||
(outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
|
||||
"cmovle\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
|
||||
X86_COND_LE, EFLAGS))]>,
|
||||
TB;
|
||||
def CMOVG16rm : I<0x4F, MRMSrcMem, // if >s, GR16 = [mem16]
|
||||
(outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
|
||||
"cmovg\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
|
||||
X86_COND_G, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVG32rm : I<0x4F, MRMSrcMem, // if >s, GR32 = [mem32]
|
||||
(outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
|
||||
"cmovg\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
|
||||
X86_COND_G, EFLAGS))]>,
|
||||
TB;
|
||||
def CMOVS16rm : I<0x48, MRMSrcMem, // if signed, GR16 = [mem16]
|
||||
(outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
|
||||
"cmovs\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
|
||||
X86_COND_S, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVS32rm : I<0x48, MRMSrcMem, // if signed, GR32 = [mem32]
|
||||
(outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
|
||||
"cmovs\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
|
||||
X86_COND_S, EFLAGS))]>,
|
||||
TB;
|
||||
def CMOVNS16rm: I<0x49, MRMSrcMem, // if !signed, GR16 = [mem16]
|
||||
(outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
|
||||
"cmovns\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
|
||||
X86_COND_NS, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVNS32rm: I<0x49, MRMSrcMem, // if !signed, GR32 = [mem32]
|
||||
(outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
|
||||
"cmovns\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
|
||||
X86_COND_NS, EFLAGS))]>,
|
||||
TB;
|
||||
def CMOVP16rm : I<0x4A, MRMSrcMem, // if parity, GR16 = [mem16]
|
||||
(outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
|
||||
"cmovp\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
|
||||
X86_COND_P, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
def CMOVP32rm : I<0x4A, MRMSrcMem, // if parity, GR32 = [mem32]
|
||||
(outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
|
||||
"cmovp\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
|
||||
X86_COND_P, EFLAGS))]>,
|
||||
TB;
|
||||
def CMOVNP16rm : I<0x4B, MRMSrcMem, // if !parity, GR16 = [mem16]
|
||||
(outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
|
||||
"cmovnp\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
|
||||
X86_COND_NP, EFLAGS))]>,
|
||||
TB, OpSize;
|
||||
} // Uses = [EFLAGS]
|
||||
|
||||
|
||||
|
@ -778,141 +778,144 @@ def CMP64ri8 : RIi8<0x83, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2),
|
||||
|
||||
// Conditional moves
|
||||
let Uses = [EFLAGS], isTwoAddress = 1 in {
|
||||
let isCommutable = 1 in {
|
||||
def CMOVB64rr : RI<0x42, MRMSrcReg, // if <u, GR64 = GR64
|
||||
(outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
|
||||
"cmovb\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
|
||||
X86_COND_B, EFLAGS))]>, TB;
|
||||
def CMOVB64rm : RI<0x42, MRMSrcMem, // if <u, GR64 = [mem64]
|
||||
(outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
|
||||
"cmovb\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
|
||||
X86_COND_B, EFLAGS))]>, TB;
|
||||
def CMOVAE64rr: RI<0x43, MRMSrcReg, // if >=u, GR64 = GR64
|
||||
(outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
|
||||
"cmovae\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
|
||||
X86_COND_AE, EFLAGS))]>, TB;
|
||||
def CMOVAE64rm: RI<0x43, MRMSrcMem, // if >=u, GR64 = [mem64]
|
||||
(outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
|
||||
"cmovae\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
|
||||
X86_COND_AE, EFLAGS))]>, TB;
|
||||
def CMOVE64rr : RI<0x44, MRMSrcReg, // if ==, GR64 = GR64
|
||||
(outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
|
||||
"cmove\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
|
||||
X86_COND_E, EFLAGS))]>, TB;
|
||||
def CMOVE64rm : RI<0x44, MRMSrcMem, // if ==, GR64 = [mem64]
|
||||
(outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
|
||||
"cmove\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
|
||||
X86_COND_E, EFLAGS))]>, TB;
|
||||
def CMOVNE64rr: RI<0x45, MRMSrcReg, // if !=, GR64 = GR64
|
||||
(outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
|
||||
"cmovne\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
|
||||
X86_COND_NE, EFLAGS))]>, TB;
|
||||
def CMOVNE64rm: RI<0x45, MRMSrcMem, // if !=, GR64 = [mem64]
|
||||
(outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
|
||||
"cmovne\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
|
||||
X86_COND_NE, EFLAGS))]>, TB;
|
||||
def CMOVBE64rr: RI<0x46, MRMSrcReg, // if <=u, GR64 = GR64
|
||||
(outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
|
||||
"cmovbe\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
|
||||
X86_COND_BE, EFLAGS))]>, TB;
|
||||
def CMOVBE64rm: RI<0x46, MRMSrcMem, // if <=u, GR64 = [mem64]
|
||||
(outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
|
||||
"cmovbe\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
|
||||
X86_COND_BE, EFLAGS))]>, TB;
|
||||
def CMOVA64rr : RI<0x47, MRMSrcReg, // if >u, GR64 = GR64
|
||||
(outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
|
||||
"cmova\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
|
||||
X86_COND_A, EFLAGS))]>, TB;
|
||||
def CMOVA64rm : RI<0x47, MRMSrcMem, // if >u, GR64 = [mem64]
|
||||
(outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
|
||||
"cmova\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
|
||||
X86_COND_A, EFLAGS))]>, TB;
|
||||
def CMOVL64rr : RI<0x4C, MRMSrcReg, // if <s, GR64 = GR64
|
||||
(outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
|
||||
"cmovl\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
|
||||
X86_COND_L, EFLAGS))]>, TB;
|
||||
def CMOVL64rm : RI<0x4C, MRMSrcMem, // if <s, GR64 = [mem64]
|
||||
(outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
|
||||
"cmovl\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
|
||||
X86_COND_L, EFLAGS))]>, TB;
|
||||
def CMOVGE64rr: RI<0x4D, MRMSrcReg, // if >=s, GR64 = GR64
|
||||
(outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
|
||||
"cmovge\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
|
||||
X86_COND_GE, EFLAGS))]>, TB;
|
||||
def CMOVGE64rm: RI<0x4D, MRMSrcMem, // if >=s, GR64 = [mem64]
|
||||
(outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
|
||||
"cmovge\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
|
||||
X86_COND_GE, EFLAGS))]>, TB;
|
||||
def CMOVLE64rr: RI<0x4E, MRMSrcReg, // if <=s, GR64 = GR64
|
||||
(outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
|
||||
"cmovle\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
|
||||
X86_COND_LE, EFLAGS))]>, TB;
|
||||
def CMOVLE64rm: RI<0x4E, MRMSrcMem, // if <=s, GR64 = [mem64]
|
||||
(outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
|
||||
"cmovle\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
|
||||
X86_COND_LE, EFLAGS))]>, TB;
|
||||
def CMOVG64rr : RI<0x4F, MRMSrcReg, // if >s, GR64 = GR64
|
||||
(outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
|
||||
"cmovg\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
|
||||
X86_COND_G, EFLAGS))]>, TB;
|
||||
def CMOVG64rm : RI<0x4F, MRMSrcMem, // if >s, GR64 = [mem64]
|
||||
(outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
|
||||
"cmovg\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
|
||||
X86_COND_G, EFLAGS))]>, TB;
|
||||
def CMOVS64rr : RI<0x48, MRMSrcReg, // if signed, GR64 = GR64
|
||||
(outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
|
||||
"cmovs\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
|
||||
X86_COND_S, EFLAGS))]>, TB;
|
||||
def CMOVS64rm : RI<0x48, MRMSrcMem, // if signed, GR64 = [mem64]
|
||||
(outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
|
||||
"cmovs\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
|
||||
X86_COND_S, EFLAGS))]>, TB;
|
||||
def CMOVNS64rr: RI<0x49, MRMSrcReg, // if !signed, GR64 = GR64
|
||||
(outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
|
||||
"cmovns\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
|
||||
X86_COND_NS, EFLAGS))]>, TB;
|
||||
def CMOVNS64rm: RI<0x49, MRMSrcMem, // if !signed, GR64 = [mem64]
|
||||
(outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
|
||||
"cmovns\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
|
||||
X86_COND_NS, EFLAGS))]>, TB;
|
||||
def CMOVP64rr : RI<0x4A, MRMSrcReg, // if parity, GR64 = GR64
|
||||
(outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
|
||||
"cmovp\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
|
||||
X86_COND_P, EFLAGS))]>, TB;
|
||||
def CMOVP64rm : RI<0x4A, MRMSrcMem, // if parity, GR64 = [mem64]
|
||||
(outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
|
||||
"cmovp\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
|
||||
X86_COND_P, EFLAGS))]>, TB;
|
||||
def CMOVNP64rr : RI<0x4B, MRMSrcReg, // if !parity, GR64 = GR64
|
||||
(outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
|
||||
"cmovnp\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
|
||||
X86_COND_NP, EFLAGS))]>, TB;
|
||||
} // isCommutable = 1
|
||||
|
||||
def CMOVB64rm : RI<0x42, MRMSrcMem, // if <u, GR64 = [mem64]
|
||||
(outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
|
||||
"cmovb\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
|
||||
X86_COND_B, EFLAGS))]>, TB;
|
||||
def CMOVAE64rm: RI<0x43, MRMSrcMem, // if >=u, GR64 = [mem64]
|
||||
(outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
|
||||
"cmovae\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
|
||||
X86_COND_AE, EFLAGS))]>, TB;
|
||||
def CMOVE64rm : RI<0x44, MRMSrcMem, // if ==, GR64 = [mem64]
|
||||
(outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
|
||||
"cmove\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
|
||||
X86_COND_E, EFLAGS))]>, TB;
|
||||
def CMOVNE64rm: RI<0x45, MRMSrcMem, // if !=, GR64 = [mem64]
|
||||
(outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
|
||||
"cmovne\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
|
||||
X86_COND_NE, EFLAGS))]>, TB;
|
||||
def CMOVBE64rm: RI<0x46, MRMSrcMem, // if <=u, GR64 = [mem64]
|
||||
(outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
|
||||
"cmovbe\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
|
||||
X86_COND_BE, EFLAGS))]>, TB;
|
||||
def CMOVA64rm : RI<0x47, MRMSrcMem, // if >u, GR64 = [mem64]
|
||||
(outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
|
||||
"cmova\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
|
||||
X86_COND_A, EFLAGS))]>, TB;
|
||||
def CMOVL64rm : RI<0x4C, MRMSrcMem, // if <s, GR64 = [mem64]
|
||||
(outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
|
||||
"cmovl\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
|
||||
X86_COND_L, EFLAGS))]>, TB;
|
||||
def CMOVGE64rm: RI<0x4D, MRMSrcMem, // if >=s, GR64 = [mem64]
|
||||
(outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
|
||||
"cmovge\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
|
||||
X86_COND_GE, EFLAGS))]>, TB;
|
||||
def CMOVLE64rm: RI<0x4E, MRMSrcMem, // if <=s, GR64 = [mem64]
|
||||
(outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
|
||||
"cmovle\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
|
||||
X86_COND_LE, EFLAGS))]>, TB;
|
||||
def CMOVG64rm : RI<0x4F, MRMSrcMem, // if >s, GR64 = [mem64]
|
||||
(outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
|
||||
"cmovg\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
|
||||
X86_COND_G, EFLAGS))]>, TB;
|
||||
def CMOVS64rm : RI<0x48, MRMSrcMem, // if signed, GR64 = [mem64]
|
||||
(outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
|
||||
"cmovs\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
|
||||
X86_COND_S, EFLAGS))]>, TB;
|
||||
def CMOVNS64rm: RI<0x49, MRMSrcMem, // if !signed, GR64 = [mem64]
|
||||
(outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
|
||||
"cmovns\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
|
||||
X86_COND_NS, EFLAGS))]>, TB;
|
||||
def CMOVP64rm : RI<0x4A, MRMSrcMem, // if parity, GR64 = [mem64]
|
||||
(outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
|
||||
"cmovp\t{$src2, $dst|$dst, $src2}",
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
|
||||
X86_COND_P, EFLAGS))]>, TB;
|
||||
def CMOVNP64rm : RI<0x4B, MRMSrcMem, // if !parity, GR64 = [mem64]
|
||||
(outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
|
||||
"cmovnp\t{$src2, $dst|$dst, $src2}",
|
||||
|
Loading…
Reference in New Issue
Block a user