1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-20 03:23:01 +02:00

X86 integer register classes naming changes. Make them consistent with FP, vector classes.

llvm-svn: 28324
This commit is contained in:
Evan Cheng 2006-05-16 07:21:53 +00:00
parent d4a056116c
commit dc9b5f5fc0
10 changed files with 853 additions and 853 deletions

View File

@ -360,14 +360,14 @@ void X86ATTAsmPrinter::printMachineInstruction(const MachineInstr *MI) {
// See if a truncate instruction can be turned into a nop.
switch (MI->getOpcode()) {
default: break;
case X86::TRUNC_R32_R16:
case X86::TRUNC_R32_R8:
case X86::TRUNC_R16_R8: {
case X86::TRUNC_GR32_GR16:
case X86::TRUNC_GR32_GR8:
case X86::TRUNC_GR16_GR8: {
const MachineOperand &MO0 = MI->getOperand(0);
const MachineOperand &MO1 = MI->getOperand(1);
unsigned Reg0 = MO0.getReg();
unsigned Reg1 = MO1.getReg();
if (MI->getOpcode() == X86::TRUNC_R32_R16)
if (MI->getOpcode() == X86::TRUNC_GR32_GR16)
Reg1 = getX86SubSuperRegister(Reg1, MVT::i16);
else
Reg1 = getX86SubSuperRegister(Reg1, MVT::i8);

View File

@ -393,9 +393,9 @@ void Emitter::emitInstruction(const MachineInstr &MI) {
assert(0 && "psuedo instructions should be removed before code emission");
case X86::IMPLICIT_USE:
case X86::IMPLICIT_DEF:
case X86::IMPLICIT_DEF_R8:
case X86::IMPLICIT_DEF_R16:
case X86::IMPLICIT_DEF_R32:
case X86::IMPLICIT_DEF_GR8:
case X86::IMPLICIT_DEF_GR16:
case X86::IMPLICIT_DEF_GR32:
case X86::IMPLICIT_DEF_FR32:
case X86::IMPLICIT_DEF_FR64:
case X86::IMPLICIT_DEF_VR64:

View File

@ -509,7 +509,7 @@ SDOperand X86DAGToDAGISel::getGlobalBaseReg() {
SSARegMap *RegMap = BB->getParent()->getSSARegMap();
// FIXME: when we get to LP64, we will need to create the appropriate
// type of register here.
GlobalBaseReg = RegMap->createVirtualRegister(X86::R32RegisterClass);
GlobalBaseReg = RegMap->createVirtualRegister(X86::GR32RegisterClass);
BuildMI(FirstMBB, MBBI, X86::MovePCtoStack, 0);
BuildMI(FirstMBB, MBBI, X86::POP32r, 1, GlobalBaseReg);
}
@ -801,12 +801,12 @@ void X86DAGToDAGISel::Select(SDOperand &Result, SDOperand N) {
case MVT::i16:
Opc = X86::MOV16to16_;
VT = MVT::i16;
Opc2 = X86::TRUNC_R16_R8;
Opc2 = X86::TRUNC_GR16_GR8;
break;
case MVT::i32:
Opc = X86::MOV32to32_;
VT = MVT::i32;
Opc2 = X86::TRUNC_R32_R8;
Opc2 = X86::TRUNC_GR32_GR8;
break;
}

View File

@ -67,9 +67,9 @@ X86TargetLowering::X86TargetLowering(TargetMachine &TM)
addLegalAddressScale(3);
// Set up the register classes.
addRegisterClass(MVT::i8, X86::R8RegisterClass);
addRegisterClass(MVT::i16, X86::R16RegisterClass);
addRegisterClass(MVT::i32, X86::R32RegisterClass);
addRegisterClass(MVT::i8, X86::GR8RegisterClass);
addRegisterClass(MVT::i16, X86::GR16RegisterClass);
addRegisterClass(MVT::i32, X86::GR32RegisterClass);
// Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
// operation.
@ -940,33 +940,33 @@ X86TargetLowering::PreprocessFastCCArguments(std::vector<SDOperand>Args,
case MVT::i1:
case MVT::i8:
Reg = AddLiveIn(MF, NumIntRegs ? X86::DL : X86::AL,
X86::R8RegisterClass);
X86::GR8RegisterClass);
Loc.first.Kind = FALocInfo::LiveInRegLoc;
Loc.first.Loc = Reg;
Loc.first.Typ = MVT::i8;
break;
case MVT::i16:
Reg = AddLiveIn(MF, NumIntRegs ? X86::DX : X86::AX,
X86::R16RegisterClass);
X86::GR16RegisterClass);
Loc.first.Kind = FALocInfo::LiveInRegLoc;
Loc.first.Loc = Reg;
Loc.first.Typ = MVT::i16;
break;
case MVT::i32:
Reg = AddLiveIn(MF, NumIntRegs ? X86::EDX : X86::EAX,
X86::R32RegisterClass);
X86::GR32RegisterClass);
Loc.first.Kind = FALocInfo::LiveInRegLoc;
Loc.first.Loc = Reg;
Loc.first.Typ = MVT::i32;
break;
case MVT::i64:
Reg = AddLiveIn(MF, NumIntRegs ? X86::EDX : X86::EAX,
X86::R32RegisterClass);
X86::GR32RegisterClass);
Loc.first.Kind = FALocInfo::LiveInRegLoc;
Loc.first.Loc = Reg;
Loc.first.Typ = MVT::i32;
if (ObjIntRegs == 2) {
Reg = AddLiveIn(MF, X86::EDX, X86::R32RegisterClass);
Reg = AddLiveIn(MF, X86::EDX, X86::GR32RegisterClass);
Loc.second.Kind = FALocInfo::LiveInRegLoc;
Loc.second.Loc = Reg;
Loc.second.Typ = MVT::i32;
@ -1563,7 +1563,7 @@ X86TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
// Load the old value of the high byte of the control word...
unsigned OldCW =
F->getSSARegMap()->createVirtualRegister(X86::R16RegisterClass);
F->getSSARegMap()->createVirtualRegister(X86::GR16RegisterClass);
addFrameReference(BuildMI(BB, X86::MOV16rm, 4, OldCW), CWFrameIdx);
// Set the high part to be round to zero...
@ -2558,7 +2558,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) {
}
}
// Take advantage of the fact R32 to VR128 scalar_to_vector (i.e. movd)
// Take advantage of the fact GR32 to VR128 scalar_to_vector (i.e. movd)
// clears the upper bits.
// FIXME: we can do the same for v4f32 case when we know both parts of
// the lower half come from scalar_to_vector (loadf32). We should do
@ -2899,7 +2899,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) {
SDOperand
X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) {
// Transform it so it match pinsrw which expects a 16-bit value in a R32
// Transform it so it match pinsrw which expects a 16-bit value in a GR32
// as its second argument.
MVT::ValueType VT = Op.getValueType();
MVT::ValueType BaseVT = MVT::getVectorBaseType(VT);
@ -2930,7 +2930,7 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) {
Idx <<= 1;
if (MVT::isFloatingPoint(N1.getValueType())) {
if (N1.getOpcode() == ISD::LOAD) {
// Just load directly from f32mem to R32.
// Just load directly from f32mem to GR32.
N1 = DAG.getLoad(MVT::i32, N1.getOperand(0), N1.getOperand(1),
N1.getOperand(2));
} else {

File diff suppressed because it is too large Load Diff

View File

@ -36,7 +36,7 @@ def : Pat<(v4i16 (undef)), (IMPLICIT_DEF_VR64)>, Requires<[HasMMX]>;
def : Pat<(v2i32 (undef)), (IMPLICIT_DEF_VR64)>, Requires<[HasMMX]>;
// Move Instructions
def MOVD64rr : I<0x6E, MRMSrcReg, (ops VR64:$dst, R32:$src),
def MOVD64rr : I<0x6E, MRMSrcReg, (ops VR64:$dst, GR32:$src),
"movd {$src, $dst|$dst, $src}", []>, TB,
Requires<[HasMMX]>;
def MOVD64rm : I<0x6E, MRMSrcMem, (ops VR64:$dst, i32mem:$src),

View File

@ -488,33 +488,33 @@ def Int_MINSDrm : SD_Intrm<0x5D, "minsd {$src2, $dst|$dst, $src2}",
}
// Conversion instructions
def CVTTSS2SIrr: SSI<0x2C, MRMSrcReg, (ops R32:$dst, FR32:$src),
def CVTTSS2SIrr: SSI<0x2C, MRMSrcReg, (ops GR32:$dst, FR32:$src),
"cvttss2si {$src, $dst|$dst, $src}",
[(set R32:$dst, (fp_to_sint FR32:$src))]>;
def CVTTSS2SIrm: SSI<0x2C, MRMSrcMem, (ops R32:$dst, f32mem:$src),
[(set GR32:$dst, (fp_to_sint FR32:$src))]>;
def CVTTSS2SIrm: SSI<0x2C, MRMSrcMem, (ops GR32:$dst, f32mem:$src),
"cvttss2si {$src, $dst|$dst, $src}",
[(set R32:$dst, (fp_to_sint (loadf32 addr:$src)))]>;
def CVTTSD2SIrr: SDI<0x2C, MRMSrcReg, (ops R32:$dst, FR64:$src),
[(set GR32:$dst, (fp_to_sint (loadf32 addr:$src)))]>;
def CVTTSD2SIrr: SDI<0x2C, MRMSrcReg, (ops GR32:$dst, FR64:$src),
"cvttsd2si {$src, $dst|$dst, $src}",
[(set R32:$dst, (fp_to_sint FR64:$src))]>;
def CVTTSD2SIrm: SDI<0x2C, MRMSrcMem, (ops R32:$dst, f64mem:$src),
[(set GR32:$dst, (fp_to_sint FR64:$src))]>;
def CVTTSD2SIrm: SDI<0x2C, MRMSrcMem, (ops GR32:$dst, f64mem:$src),
"cvttsd2si {$src, $dst|$dst, $src}",
[(set R32:$dst, (fp_to_sint (loadf64 addr:$src)))]>;
[(set GR32:$dst, (fp_to_sint (loadf64 addr:$src)))]>;
def CVTSD2SSrr: SDI<0x5A, MRMSrcReg, (ops FR32:$dst, FR64:$src),
"cvtsd2ss {$src, $dst|$dst, $src}",
[(set FR32:$dst, (fround FR64:$src))]>;
def CVTSD2SSrm: SDI<0x5A, MRMSrcMem, (ops FR32:$dst, f64mem:$src),
"cvtsd2ss {$src, $dst|$dst, $src}",
[(set FR32:$dst, (fround (loadf64 addr:$src)))]>;
def CVTSI2SSrr: SSI<0x2A, MRMSrcReg, (ops FR32:$dst, R32:$src),
def CVTSI2SSrr: SSI<0x2A, MRMSrcReg, (ops FR32:$dst, GR32:$src),
"cvtsi2ss {$src, $dst|$dst, $src}",
[(set FR32:$dst, (sint_to_fp R32:$src))]>;
[(set FR32:$dst, (sint_to_fp GR32:$src))]>;
def CVTSI2SSrm: SSI<0x2A, MRMSrcMem, (ops FR32:$dst, i32mem:$src),
"cvtsi2ss {$src, $dst|$dst, $src}",
[(set FR32:$dst, (sint_to_fp (loadi32 addr:$src)))]>;
def CVTSI2SDrr: SDI<0x2A, MRMSrcReg, (ops FR64:$dst, R32:$src),
def CVTSI2SDrr: SDI<0x2A, MRMSrcReg, (ops FR64:$dst, GR32:$src),
"cvtsi2sd {$src, $dst|$dst, $src}",
[(set FR64:$dst, (sint_to_fp R32:$src))]>;
[(set FR64:$dst, (sint_to_fp GR32:$src))]>;
def CVTSI2SDrm: SDI<0x2A, MRMSrcMem, (ops FR64:$dst, i32mem:$src),
"cvtsi2sd {$src, $dst|$dst, $src}",
[(set FR64:$dst, (sint_to_fp (loadi32 addr:$src)))]>;
@ -530,43 +530,43 @@ def CVTSS2SDrm: I<0x5A, MRMSrcMem, (ops FR64:$dst, f32mem:$src),
Requires<[HasSSE2]>;
// Match intrinsics which expect XMM operand(s).
def CVTSS2SIrr: SSI<0x2D, MRMSrcReg, (ops R32:$dst, VR128:$src),
def CVTSS2SIrr: SSI<0x2D, MRMSrcReg, (ops GR32:$dst, VR128:$src),
"cvtss2si {$src, $dst|$dst, $src}",
[(set R32:$dst, (int_x86_sse_cvtss2si VR128:$src))]>;
def CVTSS2SIrm: SSI<0x2D, MRMSrcMem, (ops R32:$dst, f32mem:$src),
[(set GR32:$dst, (int_x86_sse_cvtss2si VR128:$src))]>;
def CVTSS2SIrm: SSI<0x2D, MRMSrcMem, (ops GR32:$dst, f32mem:$src),
"cvtss2si {$src, $dst|$dst, $src}",
[(set R32:$dst, (int_x86_sse_cvtss2si
[(set GR32:$dst, (int_x86_sse_cvtss2si
(loadv4f32 addr:$src)))]>;
def CVTSD2SIrr: SDI<0x2D, MRMSrcReg, (ops R32:$dst, VR128:$src),
def CVTSD2SIrr: SDI<0x2D, MRMSrcReg, (ops GR32:$dst, VR128:$src),
"cvtsd2si {$src, $dst|$dst, $src}",
[(set R32:$dst, (int_x86_sse2_cvtsd2si VR128:$src))]>;
def CVTSD2SIrm: SDI<0x2D, MRMSrcMem, (ops R32:$dst, f128mem:$src),
[(set GR32:$dst, (int_x86_sse2_cvtsd2si VR128:$src))]>;
def CVTSD2SIrm: SDI<0x2D, MRMSrcMem, (ops GR32:$dst, f128mem:$src),
"cvtsd2si {$src, $dst|$dst, $src}",
[(set R32:$dst, (int_x86_sse2_cvtsd2si
[(set GR32:$dst, (int_x86_sse2_cvtsd2si
(loadv2f64 addr:$src)))]>;
// Aliases for intrinsics
def Int_CVTTSS2SIrr: SSI<0x2C, MRMSrcReg, (ops R32:$dst, VR128:$src),
def Int_CVTTSS2SIrr: SSI<0x2C, MRMSrcReg, (ops GR32:$dst, VR128:$src),
"cvttss2si {$src, $dst|$dst, $src}",
[(set R32:$dst, (int_x86_sse_cvttss2si VR128:$src))]>;
def Int_CVTTSS2SIrm: SSI<0x2C, MRMSrcMem, (ops R32:$dst, f32mem:$src),
[(set GR32:$dst, (int_x86_sse_cvttss2si VR128:$src))]>;
def Int_CVTTSS2SIrm: SSI<0x2C, MRMSrcMem, (ops GR32:$dst, f32mem:$src),
"cvttss2si {$src, $dst|$dst, $src}",
[(set R32:$dst, (int_x86_sse_cvttss2si
[(set GR32:$dst, (int_x86_sse_cvttss2si
(loadv4f32 addr:$src)))]>;
def Int_CVTTSD2SIrr: SDI<0x2C, MRMSrcReg, (ops R32:$dst, VR128:$src),
def Int_CVTTSD2SIrr: SDI<0x2C, MRMSrcReg, (ops GR32:$dst, VR128:$src),
"cvttsd2si {$src, $dst|$dst, $src}",
[(set R32:$dst, (int_x86_sse2_cvttsd2si VR128:$src))]>;
def Int_CVTTSD2SIrm: SDI<0x2C, MRMSrcMem, (ops R32:$dst, f128mem:$src),
[(set GR32:$dst, (int_x86_sse2_cvttsd2si VR128:$src))]>;
def Int_CVTTSD2SIrm: SDI<0x2C, MRMSrcMem, (ops GR32:$dst, f128mem:$src),
"cvttsd2si {$src, $dst|$dst, $src}",
[(set R32:$dst, (int_x86_sse2_cvttsd2si
[(set GR32:$dst, (int_x86_sse2_cvttsd2si
(loadv2f64 addr:$src)))]>;
let isTwoAddress = 1 in {
def Int_CVTSI2SSrr: SSI<0x2A, MRMSrcReg,
(ops VR128:$dst, VR128:$src1, R32:$src2),
(ops VR128:$dst, VR128:$src1, GR32:$src2),
"cvtsi2ss {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse_cvtsi2ss VR128:$src1,
R32:$src2))]>;
GR32:$src2))]>;
def Int_CVTSI2SSrm: SSI<0x2A, MRMSrcMem,
(ops VR128:$dst, VR128:$src1, i32mem:$src2),
"cvtsi2ss {$src2, $dst|$dst, $src2}",
@ -960,10 +960,10 @@ def CVTPD2PSrm : PDI<0x5A, MRMSrcReg, (ops VR128:$dst, f128mem:$src),
// Aliases for intrinsics
let isTwoAddress = 1 in {
def Int_CVTSI2SDrr: SDI<0x2A, MRMSrcReg,
(ops VR128:$dst, VR128:$src1, R32:$src2),
(ops VR128:$dst, VR128:$src1, GR32:$src2),
"cvtsi2sd {$src2, $dst|$dst, $src2}",
[(set VR128:$dst, (int_x86_sse2_cvtsi2sd VR128:$src1,
R32:$src2))]>;
GR32:$src2))]>;
def Int_CVTSI2SDrm: SDI<0x2A, MRMSrcMem,
(ops VR128:$dst, VR128:$src1, i32mem:$src2),
"cvtsi2sd {$src2, $dst|$dst, $src2}",
@ -2003,16 +2003,16 @@ def PUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
// Extract / Insert
def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
(ops R32:$dst, VR128:$src1, i32i8imm:$src2),
(ops GR32:$dst, VR128:$src1, i32i8imm:$src2),
"pextrw {$src2, $src1, $dst|$dst, $src1, $src2}",
[(set R32:$dst, (X86pextrw (v8i16 VR128:$src1),
[(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
(i32 imm:$src2)))]>;
let isTwoAddress = 1 in {
def PINSRWrri : PDIi8<0xC4, MRMSrcReg,
(ops VR128:$dst, VR128:$src1, R32:$src2, i32i8imm:$src3),
(ops VR128:$dst, VR128:$src1, GR32:$src2, i32i8imm:$src3),
"pinsrw {$src3, $src2, $dst|$dst, $src2, $src3}",
[(set VR128:$dst, (v8i16 (X86pinsrw (v8i16 VR128:$src1),
R32:$src2, (i32 imm:$src3))))]>;
GR32:$src2, (i32 imm:$src3))))]>;
def PINSRWrmi : PDIi8<0xC4, MRMSrcMem,
(ops VR128:$dst, VR128:$src1, i16mem:$src2, i32i8imm:$src3),
"pinsrw {$src3, $src2, $dst|$dst, $src2, $src3}",
@ -2027,16 +2027,16 @@ def PINSRWrmi : PDIi8<0xC4, MRMSrcMem,
//===----------------------------------------------------------------------===//
// Mask creation
def MOVMSKPSrr : PSI<0x50, MRMSrcReg, (ops R32:$dst, VR128:$src),
def MOVMSKPSrr : PSI<0x50, MRMSrcReg, (ops GR32:$dst, VR128:$src),
"movmskps {$src, $dst|$dst, $src}",
[(set R32:$dst, (int_x86_sse_movmsk_ps VR128:$src))]>;
def MOVMSKPDrr : PSI<0x50, MRMSrcReg, (ops R32:$dst, VR128:$src),
[(set GR32:$dst, (int_x86_sse_movmsk_ps VR128:$src))]>;
def MOVMSKPDrr : PSI<0x50, MRMSrcReg, (ops GR32:$dst, VR128:$src),
"movmskpd {$src, $dst|$dst, $src}",
[(set R32:$dst, (int_x86_sse2_movmsk_pd VR128:$src))]>;
[(set GR32:$dst, (int_x86_sse2_movmsk_pd VR128:$src))]>;
def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (ops R32:$dst, VR128:$src),
def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (ops GR32:$dst, VR128:$src),
"pmovmskb {$src, $dst|$dst, $src}",
[(set R32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
[(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
// Conditional store
def MASKMOVDQU : PDI<0xF7, RawFrm, (ops VR128:$src, VR128:$mask),
@ -2064,9 +2064,9 @@ def MOVNTPDmr : PDI<0x2B, MRMDestMem, (ops i128mem:$dst, VR128:$src),
def MOVNTDQmr : PDI<0xE7, MRMDestMem, (ops f128mem:$dst, VR128:$src),
"movntdq {$src, $dst|$dst, $src}",
[(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>;
def MOVNTImr : I<0xC3, MRMDestMem, (ops i32mem:$dst, R32:$src),
def MOVNTImr : I<0xC3, MRMDestMem, (ops i32mem:$dst, GR32:$src),
"movnti {$src, $dst|$dst, $src}",
[(int_x86_sse2_movnt_i addr:$dst, R32:$src)]>,
[(int_x86_sse2_movnt_i addr:$dst, GR32:$src)]>,
TB, Requires<[HasSSE2]>;
// Flush cache
@ -2136,10 +2136,10 @@ def MOVSD2PDrm : SDI<0x10, MRMSrcMem, (ops VR128:$dst, f64mem:$src),
[(set VR128:$dst,
(v2f64 (scalar_to_vector (loadf64 addr:$src))))]>;
def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (ops VR128:$dst, R32:$src),
def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (ops VR128:$dst, GR32:$src),
"movd {$src, $dst|$dst, $src}",
[(set VR128:$dst,
(v4i32 (scalar_to_vector R32:$src)))]>;
(v4i32 (scalar_to_vector GR32:$src)))]>;
def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (ops VR128:$dst, i32mem:$src),
"movd {$src, $dst|$dst, $src}",
[(set VR128:$dst,
@ -2176,9 +2176,9 @@ def MOVPD2SDmr : SDI<0x11, MRMDestMem, (ops f64mem:$dst, VR128:$src),
"movsd {$src, $dst|$dst, $src}",
[(store (f64 (vector_extract (v2f64 VR128:$src),
(i32 0))), addr:$dst)]>;
def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (ops R32:$dst, VR128:$src),
def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (ops GR32:$dst, VR128:$src),
"movd {$src, $dst|$dst, $src}",
[(set R32:$dst, (vector_extract (v4i32 VR128:$src),
[(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
(i32 0)))]>;
def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (ops i32mem:$dst, VR128:$src),
"movd {$src, $dst|$dst, $src}",
@ -2226,10 +2226,10 @@ def MOVZSD2PDrm : SDI<0x10, MRMSrcMem, (ops VR128:$dst, f64mem:$src),
(v2f64 (scalar_to_vector (loadf64 addr:$src))),
MOVL_shuffle_mask)))]>;
// movd / movq to XMM register zero-extends
def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (ops VR128:$dst, R32:$src),
def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (ops VR128:$dst, GR32:$src),
"movd {$src, $dst|$dst, $src}",
[(set VR128:$dst, (v4i32 (vector_shuffle immAllZerosV,
(v4i32 (scalar_to_vector R32:$src)),
(v4i32 (scalar_to_vector GR32:$src)),
MOVL_shuffle_mask)))]>;
def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (ops VR128:$dst, i32mem:$src),
"movd {$src, $dst|$dst, $src}",
@ -2279,11 +2279,11 @@ def : Pat<(store (v8i16 VR128:$src), addr:$dst),
def : Pat<(store (v4i32 VR128:$src), addr:$dst),
(MOVDQAmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
// Scalar to v8i16 / v16i8. The source may be a R32, but only the lower 8 or
// Scalar to v8i16 / v16i8. The source may be a GR32, but only the lower 8 or
// 16-bits matter.
def : Pat<(v8i16 (X86s2vec R32:$src)), (v8i16 (MOVDI2PDIrr R32:$src))>,
def : Pat<(v8i16 (X86s2vec GR32:$src)), (v8i16 (MOVDI2PDIrr GR32:$src))>,
Requires<[HasSSE2]>;
def : Pat<(v16i8 (X86s2vec R32:$src)), (v16i8 (MOVDI2PDIrr R32:$src))>,
def : Pat<(v16i8 (X86s2vec GR32:$src)), (v16i8 (MOVDI2PDIrr GR32:$src))>,
Requires<[HasSSE2]>;
// bit_convert
@ -2352,11 +2352,11 @@ def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>,
// movd to XMM register zero-extends
let AddedComplexity = 20 in {
def : Pat<(v8i16 (vector_shuffle immAllZerosV,
(v8i16 (X86s2vec R32:$src)), MOVL_shuffle_mask)),
(v8i16 (MOVZDI2PDIrr R32:$src))>, Requires<[HasSSE2]>;
(v8i16 (X86s2vec GR32:$src)), MOVL_shuffle_mask)),
(v8i16 (MOVZDI2PDIrr GR32:$src))>, Requires<[HasSSE2]>;
def : Pat<(v16i8 (vector_shuffle immAllZerosV,
(v16i8 (X86s2vec R32:$src)), MOVL_shuffle_mask)),
(v16i8 (MOVZDI2PDIrr R32:$src))>, Requires<[HasSSE2]>;
(v16i8 (X86s2vec GR32:$src)), MOVL_shuffle_mask)),
(v16i8 (MOVZDI2PDIrr GR32:$src))>, Requires<[HasSSE2]>;
// Zeroing a VR128 then do a MOVS{S|D} to the lower bits.
def : Pat<(v2f64 (vector_shuffle immAllZerosV,
(v2f64 (scalar_to_vector FR64:$src)), MOVL_shuffle_mask)),

View File

@ -261,14 +261,14 @@ void X86IntelAsmPrinter::printMachineInstruction(const MachineInstr *MI) {
// See if a truncate instruction can be turned into a nop.
switch (MI->getOpcode()) {
default: break;
case X86::TRUNC_R32_R16:
case X86::TRUNC_R32_R8:
case X86::TRUNC_R16_R8: {
case X86::TRUNC_GR32_GR16:
case X86::TRUNC_GR32_GR8:
case X86::TRUNC_GR16_GR8: {
const MachineOperand &MO0 = MI->getOperand(0);
const MachineOperand &MO1 = MI->getOperand(1);
unsigned Reg0 = MO0.getReg();
unsigned Reg1 = MO1.getReg();
if (MI->getOpcode() == X86::TRUNC_R32_R16)
if (MI->getOpcode() == X86::TRUNC_GR32_GR16)
Reg1 = getX86SubSuperRegister(Reg1, MVT::i16);
else
Reg1 = getX86SubSuperRegister(Reg1, MVT::i8);

View File

@ -50,15 +50,15 @@ void X86RegisterInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
unsigned SrcReg, int FrameIdx,
const TargetRegisterClass *RC) const {
unsigned Opc;
if (RC == &X86::R32RegClass) {
if (RC == &X86::GR32RegClass) {
Opc = X86::MOV32mr;
} else if (RC == &X86::R16RegClass) {
} else if (RC == &X86::GR16RegClass) {
Opc = X86::MOV16mr;
} else if (RC == &X86::R8RegClass) {
} else if (RC == &X86::GR8RegClass) {
Opc = X86::MOV8mr;
} else if (RC == &X86::R32_RegClass) {
} else if (RC == &X86::GR32_RegClass) {
Opc = X86::MOV32_mr;
} else if (RC == &X86::R16_RegClass) {
} else if (RC == &X86::GR16_RegClass) {
Opc = X86::MOV16_mr;
} else if (RC == &X86::RFPRegClass || RC == &X86::RSTRegClass) {
Opc = X86::FpST64m;
@ -80,15 +80,15 @@ void X86RegisterInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
unsigned DestReg, int FrameIdx,
const TargetRegisterClass *RC) const{
unsigned Opc;
if (RC == &X86::R32RegClass) {
if (RC == &X86::GR32RegClass) {
Opc = X86::MOV32rm;
} else if (RC == &X86::R16RegClass) {
} else if (RC == &X86::GR16RegClass) {
Opc = X86::MOV16rm;
} else if (RC == &X86::R8RegClass) {
} else if (RC == &X86::GR8RegClass) {
Opc = X86::MOV8rm;
} else if (RC == &X86::R32_RegClass) {
} else if (RC == &X86::GR32_RegClass) {
Opc = X86::MOV32_rm;
} else if (RC == &X86::R16_RegClass) {
} else if (RC == &X86::GR16_RegClass) {
Opc = X86::MOV16_rm;
} else if (RC == &X86::RFPRegClass || RC == &X86::RSTRegClass) {
Opc = X86::FpLD64m;
@ -110,15 +110,15 @@ void X86RegisterInfo::copyRegToReg(MachineBasicBlock &MBB,
unsigned DestReg, unsigned SrcReg,
const TargetRegisterClass *RC) const {
unsigned Opc;
if (RC == &X86::R32RegClass) {
if (RC == &X86::GR32RegClass) {
Opc = X86::MOV32rr;
} else if (RC == &X86::R16RegClass) {
} else if (RC == &X86::GR16RegClass) {
Opc = X86::MOV16rr;
} else if (RC == &X86::R8RegClass) {
} else if (RC == &X86::GR8RegClass) {
Opc = X86::MOV8rr;
} else if (RC == &X86::R32_RegClass) {
} else if (RC == &X86::GR32_RegClass) {
Opc = X86::MOV32_rr;
} else if (RC == &X86::R16_RegClass) {
} else if (RC == &X86::GR16_RegClass) {
Opc = X86::MOV16_rr;
} else if (RC == &X86::RFPRegClass || RC == &X86::RSTRegClass) {
Opc = X86::FpMOV;

View File

@ -103,15 +103,15 @@ let Namespace = "X86" in {
// dependences between upper and lower parts of the register. BL and BH are
// last because they are call clobbered. Both Athlon and P4 chips suffer this
// issue.
def R8 : RegisterClass<"X86", [i8], 8, [AL, CL, DL, AH, CH, DH, BL, BH]>;
def GR8 : RegisterClass<"X86", [i8], 8, [AL, CL, DL, AH, CH, DH, BL, BH]>;
def R16 : RegisterClass<"X86", [i16], 16, [AX, CX, DX, SI, DI, BX, BP, SP]> {
def GR16 : RegisterClass<"X86", [i16], 16, [AX, CX, DX, SI, DI, BX, BP, SP]> {
let MethodProtos = [{
iterator allocation_order_end(MachineFunction &MF) const;
}];
let MethodBodies = [{
R16Class::iterator
R16Class::allocation_order_end(MachineFunction &MF) const {
GR16Class::iterator
GR16Class::allocation_order_end(MachineFunction &MF) const {
if (hasFP(MF)) // Does the function dedicate EBP to being a frame ptr?
return end()-2; // If so, don't allocate SP or BP
else
@ -120,14 +120,14 @@ def R16 : RegisterClass<"X86", [i16], 16, [AX, CX, DX, SI, DI, BX, BP, SP]> {
}];
}
def R32 : RegisterClass<"X86", [i32], 32,
[EAX, ECX, EDX, ESI, EDI, EBX, EBP, ESP]> {
def GR32 : RegisterClass<"X86", [i32], 32,
[EAX, ECX, EDX, ESI, EDI, EBX, EBP, ESP]> {
let MethodProtos = [{
iterator allocation_order_end(MachineFunction &MF) const;
}];
let MethodBodies = [{
R32Class::iterator
R32Class::allocation_order_end(MachineFunction &MF) const {
GR32Class::iterator
GR32Class::allocation_order_end(MachineFunction &MF) const {
if (hasFP(MF)) // Does the function dedicate EBP to being a frame ptr?
return end()-2; // If so, don't allocate ESP or EBP
else
@ -136,9 +136,9 @@ def R32 : RegisterClass<"X86", [i32], 32,
}];
}
// R16, R32 subclasses which contain registers that have R8 sub-registers.
def R16_ : RegisterClass<"X86", [i16], 16, [AX, CX, DX, BX]>;
def R32_ : RegisterClass<"X86", [i32], 32, [EAX, ECX, EDX, EBX]>;
// GR16, GR32 subclasses which contain registers that have R8 sub-registers.
def GR16_ : RegisterClass<"X86", [i16], 16, [AX, CX, DX, BX]>;
def GR32_ : RegisterClass<"X86", [i32], 32, [EAX, ECX, EDX, EBX]>;
// Scalar SSE2 floating point registers.
def FR32 : RegisterClass<"X86", [f32], 32,