diff --git a/lib/Target/Alpha/AlphaRegisterInfo.td b/lib/Target/Alpha/AlphaRegisterInfo.td index 35e6804ea6a..d644f05f91a 100644 --- a/lib/Target/Alpha/AlphaRegisterInfo.td +++ b/lib/Target/Alpha/AlphaRegisterInfo.td @@ -121,51 +121,18 @@ def GPRC : RegisterClass<"Alpha", [i64], 64, // Non-volatile R9, R10, R11, R12, R13, R14, // Don't allocate 15, 30, 31 - R15, R30, R31 ]> //zero -{ - let MethodProtos = [{ - iterator allocation_order_end(const MachineFunction &MF) const; - }]; - let MethodBodies = [{ - GPRCClass::iterator - GPRCClass::allocation_order_end(const MachineFunction &MF) const { - return end()-3; - } - }]; -} + R15, R30, R31 ]>; //zero def F4RC : RegisterClass<"Alpha", [f32], 64, [F0, F1, F10, F11, F12, F13, F14, F15, F16, F17, F18, F19, F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, F30, // Saved: F2, F3, F4, F5, F6, F7, F8, F9, - F31 ]> //zero -{ - let MethodProtos = [{ - iterator allocation_order_end(const MachineFunction &MF) const; - }]; - let MethodBodies = [{ - F4RCClass::iterator - F4RCClass::allocation_order_end(const MachineFunction &MF) const { - return end()-1; - } - }]; -} + F31 ]>; //zero def F8RC : RegisterClass<"Alpha", [f64], 64, [F0, F1, F10, F11, F12, F13, F14, F15, F16, F17, F18, F19, F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, F30, // Saved: F2, F3, F4, F5, F6, F7, F8, F9, - F31 ]> //zero -{ - let MethodProtos = [{ - iterator allocation_order_end(const MachineFunction &MF) const; - }]; - let MethodBodies = [{ - F8RCClass::iterator - F8RCClass::allocation_order_end(const MachineFunction &MF) const { - return end()-1; - } - }]; -} + F31 ]>; //zero diff --git a/lib/Target/Blackfin/BlackfinRegisterInfo.td b/lib/Target/Blackfin/BlackfinRegisterInfo.td index f5dd439a811..d8fd302b513 100644 --- a/lib/Target/Blackfin/BlackfinRegisterInfo.td +++ b/lib/Target/Blackfin/BlackfinRegisterInfo.td @@ -245,18 +245,6 @@ def D : RegisterClass<"BF", [i32], 32, [R0, R1, R2, R3, R4, R5, R6, R7]> { def P : RegisterClass<"BF", [i32], 32, [P0, P1, P2, P3, P4, P5, FP, SP]> { let SubRegClasses = [(P16L lo16), (P16H hi16)]; - let MethodProtos = [{ - iterator allocation_order_end(const MachineFunction &MF) const; - }]; - let MethodBodies = [{ - PClass::iterator - PClass::allocation_order_end(const MachineFunction &MF) const { - const TargetMachine &TM = MF.getTarget(); - const TargetFrameLowering *TFI = TM.getFrameLowering(); - return allocation_order_begin(MF) - + (TFI->hasFP(MF) ? 7 : 6); - } - }]; } def I : RegisterClass<"BF", [i32], 32, [I0, I1, I2, I3]>; @@ -268,18 +256,6 @@ def DP : RegisterClass<"BF", [i32], 32, [R0, R1, R2, R3, R4, R5, R6, R7, P0, P1, P2, P3, P4, P5, FP, SP]> { let SubRegClasses = [(DP16L lo16), (DP16H hi16)]; - let MethodProtos = [{ - iterator allocation_order_end(const MachineFunction &MF) const; - }]; - let MethodBodies = [{ - DPClass::iterator - DPClass::allocation_order_end(const MachineFunction &MF) const { - const TargetMachine &TM = MF.getTarget(); - const TargetFrameLowering *TFI = TM.getFrameLowering(); - return allocation_order_begin(MF) - + (TFI->hasFP(MF) ? 15 : 14); - } - }]; } def GR : RegisterClass<"BF", [i32], 32, @@ -287,20 +263,7 @@ def GR : RegisterClass<"BF", [i32], 32, P0, P1, P2, P3, P4, P5, I0, I1, I2, I3, M0, M1, M2, M3, B0, B1, B2, B3, L0, L1, L2, L3, - FP, SP]> { - let MethodProtos = [{ - iterator allocation_order_end(const MachineFunction &MF) const; - }]; - let MethodBodies = [{ - GRClass::iterator - GRClass::allocation_order_end(const MachineFunction &MF) const { - const TargetMachine &TM = MF.getTarget(); - const TargetFrameLowering *TFI = TM.getFrameLowering(); - return allocation_order_begin(MF) - + (TFI->hasFP(MF) ? 31 : 30); - } - }]; -} + FP, SP]>; def ALL : RegisterClass<"BF", [i32], 32, [R0, R1, R2, R3, R4, R5, R6, R7, @@ -310,36 +273,10 @@ def ALL : RegisterClass<"BF", [i32], 32, FP, SP, A0X, A0W, A1X, A1W, ASTAT, RETS, LC0, LT0, LB0, LC1, LT1, LB1, CYCLES, CYCLES2, - USP, SEQSTAT, SYSCFG, RETI, RETX, RETN, RETE, EMUDAT]> { - let MethodProtos = [{ - iterator allocation_order_end(const MachineFunction &MF) const; - }]; - let MethodBodies = [{ - ALLClass::iterator - ALLClass::allocation_order_end(const MachineFunction &MF) const { - const TargetMachine &TM = MF.getTarget(); - const TargetFrameLowering *TFI = TM.getFrameLowering(); - return allocation_order_begin(MF) - + (TFI->hasFP(MF) ? 31 : 30); - } - }]; -} + USP, SEQSTAT, SYSCFG, RETI, RETX, RETN, RETE, EMUDAT]>; def PI : RegisterClass<"BF", [i32], 32, - [P0, P1, P2, P3, P4, P5, I0, I1, I2, I3, FP, SP]> { - let MethodProtos = [{ - iterator allocation_order_end(const MachineFunction &MF) const; - }]; - let MethodBodies = [{ - PIClass::iterator - PIClass::allocation_order_end(const MachineFunction &MF) const { - const TargetMachine &TM = MF.getTarget(); - const TargetFrameLowering *TFI = TM.getFrameLowering(); - return allocation_order_begin(MF) - + (TFI->hasFP(MF) ? 11 : 10); - } - }]; -} + [P0, P1, P2, P3, P4, P5, I0, I1, I2, I3, FP, SP]>; // We are going to pretend that CC and !CC are 32-bit registers, even though // they only can hold 1 bit. diff --git a/lib/Target/CellSPU/SPURegisterInfo.td b/lib/Target/CellSPU/SPURegisterInfo.td index 3e8f0979256..cce0c823c93 100644 --- a/lib/Target/CellSPU/SPURegisterInfo.td +++ b/lib/Target/CellSPU/SPURegisterInfo.td @@ -170,23 +170,7 @@ def GPRC : RegisterClass<"SPU", [i128], 128, R101, R100, R99, R98, R97, R96, R95, R94, R93, R92, R91, R90, R89, R88, R87, R86, R85, R84, R83, R82, R81, R80, /* environment ptr, SP, LR */ - R2, R1, R0 ]> -{ - let MethodProtos = [{ - iterator allocation_order_begin(const MachineFunction &MF) const; - iterator allocation_order_end(const MachineFunction &MF) const; - }]; - let MethodBodies = [{ - GPRCClass::iterator - GPRCClass::allocation_order_begin(const MachineFunction &MF) const { - return begin(); - } - GPRCClass::iterator - GPRCClass::allocation_order_end(const MachineFunction &MF) const { - return end()-3; // don't allocate R2, R1, or R0 (envp, sp, lr) - } - }]; -} + R2, R1, R0 ]>; // The SPU's registers as 64-bit wide (double word integer) "preferred slot": def R64C : RegisterClass<"SPU", [i64], 128, @@ -204,23 +188,7 @@ def R64C : RegisterClass<"SPU", [i64], 128, R101, R100, R99, R98, R97, R96, R95, R94, R93, R92, R91, R90, R89, R88, R87, R86, R85, R84, R83, R82, R81, R80, /* environment ptr, SP, LR */ - R2, R1, R0 ]> -{ - let MethodProtos = [{ - iterator allocation_order_begin(const MachineFunction &MF) const; - iterator allocation_order_end(const MachineFunction &MF) const; - }]; - let MethodBodies = [{ - R64CClass::iterator - R64CClass::allocation_order_begin(const MachineFunction &MF) const { - return begin(); - } - R64CClass::iterator - R64CClass::allocation_order_end(const MachineFunction &MF) const { - return end()-3; // don't allocate R2, R1, or R0 (envp, sp, lr) - } - }]; -} + R2, R1, R0 ]>; // The SPU's registers as 64-bit wide (double word) FP "preferred slot": def R64FP : RegisterClass<"SPU", [f64], 128, @@ -238,23 +206,7 @@ def R64FP : RegisterClass<"SPU", [f64], 128, R101, R100, R99, R98, R97, R96, R95, R94, R93, R92, R91, R90, R89, R88, R87, R86, R85, R84, R83, R82, R81, R80, /* environment ptr, SP, LR */ - R2, R1, R0 ]> -{ - let MethodProtos = [{ - iterator allocation_order_begin(const MachineFunction &MF) const; - iterator allocation_order_end(const MachineFunction &MF) const; - }]; - let MethodBodies = [{ - R64FPClass::iterator - R64FPClass::allocation_order_begin(const MachineFunction &MF) const { - return begin(); - } - R64FPClass::iterator - R64FPClass::allocation_order_end(const MachineFunction &MF) const { - return end()-3; // don't allocate R2, R1, or R0 (envp, sp, lr) - } - }]; -} + R2, R1, R0 ]>; // The SPU's registers as 32-bit wide (word) "preferred slot": def R32C : RegisterClass<"SPU", [i32], 128, @@ -272,23 +224,7 @@ def R32C : RegisterClass<"SPU", [i32], 128, R101, R100, R99, R98, R97, R96, R95, R94, R93, R92, R91, R90, R89, R88, R87, R86, R85, R84, R83, R82, R81, R80, /* environment ptr, SP, LR */ - R2, R1, R0 ]> -{ - let MethodProtos = [{ - iterator allocation_order_begin(const MachineFunction &MF) const; - iterator allocation_order_end(const MachineFunction &MF) const; - }]; - let MethodBodies = [{ - R32CClass::iterator - R32CClass::allocation_order_begin(const MachineFunction &MF) const { - return begin(); - } - R32CClass::iterator - R32CClass::allocation_order_end(const MachineFunction &MF) const { - return end()-3; // don't allocate R2, R1, or R0 (envp, sp, lr) - } - }]; -} + R2, R1, R0 ]>; // The SPU's registers as single precision floating point "preferred slot": def R32FP : RegisterClass<"SPU", [f32], 128, @@ -306,23 +242,7 @@ def R32FP : RegisterClass<"SPU", [f32], 128, R101, R100, R99, R98, R97, R96, R95, R94, R93, R92, R91, R90, R89, R88, R87, R86, R85, R84, R83, R82, R81, R80, /* environment ptr, SP, LR */ - R2, R1, R0 ]> -{ - let MethodProtos = [{ - iterator allocation_order_begin(const MachineFunction &MF) const; - iterator allocation_order_end(const MachineFunction &MF) const; - }]; - let MethodBodies = [{ - R32FPClass::iterator - R32FPClass::allocation_order_begin(const MachineFunction &MF) const { - return begin(); - } - R32FPClass::iterator - R32FPClass::allocation_order_end(const MachineFunction &MF) const { - return end()-3; // don't allocate R2, R1, or R0 (envp, sp, lr) - } - }]; -} + R2, R1, R0 ]>; // The SPU's registers as 16-bit wide (halfword) "preferred slot": def R16C : RegisterClass<"SPU", [i16], 128, @@ -340,23 +260,7 @@ def R16C : RegisterClass<"SPU", [i16], 128, R101, R100, R99, R98, R97, R96, R95, R94, R93, R92, R91, R90, R89, R88, R87, R86, R85, R84, R83, R82, R81, R80, /* environment ptr, SP, LR */ - R2, R1, R0 ]> -{ - let MethodProtos = [{ - iterator allocation_order_begin(const MachineFunction &MF) const; - iterator allocation_order_end(const MachineFunction &MF) const; - }]; - let MethodBodies = [{ - R16CClass::iterator - R16CClass::allocation_order_begin(const MachineFunction &MF) const { - return begin(); - } - R16CClass::iterator - R16CClass::allocation_order_end(const MachineFunction &MF) const { - return end()-3; // don't allocate R2, R1, or R0 (envp, sp, lr) - } - }]; -} + R2, R1, R0 ]>; // The SPU's registers as 8-bit wide (byte) "preferred slot": def R8C : RegisterClass<"SPU", [i8], 128, @@ -374,23 +278,7 @@ def R8C : RegisterClass<"SPU", [i8], 128, R101, R100, R99, R98, R97, R96, R95, R94, R93, R92, R91, R90, R89, R88, R87, R86, R85, R84, R83, R82, R81, R80, /* environment ptr, SP, LR */ - R2, R1, R0 ]> -{ - let MethodProtos = [{ - iterator allocation_order_begin(const MachineFunction &MF) const; - iterator allocation_order_end(const MachineFunction &MF) const; - }]; - let MethodBodies = [{ - R8CClass::iterator - R8CClass::allocation_order_begin(const MachineFunction &MF) const { - return begin(); - } - R8CClass::iterator - R8CClass::allocation_order_end(const MachineFunction &MF) const { - return end()-3; // don't allocate R2, R1, or R0 (envp, sp, lr) - } - }]; -} + R2, R1, R0 ]>; // The SPU's registers as vector registers: def VECREG : RegisterClass<"SPU", @@ -410,20 +298,4 @@ def VECREG : RegisterClass<"SPU", R101, R100, R99, R98, R97, R96, R95, R94, R93, R92, R91, R90, R89, R88, R87, R86, R85, R84, R83, R82, R81, R80, /* environment ptr, SP, LR */ - R2, R1, R0 ]> -{ - let MethodProtos = [{ - iterator allocation_order_begin(const MachineFunction &MF) const; - iterator allocation_order_end(const MachineFunction &MF) const; - }]; - let MethodBodies = [{ - VECREGClass::iterator - VECREGClass::allocation_order_begin(const MachineFunction &MF) const { - return begin(); - } - VECREGClass::iterator - VECREGClass::allocation_order_end(const MachineFunction &MF) const { - return end()-3; // don't allocate R2, R1, or R0 (envp, sp, lr) - } - }]; -} + R2, R1, R0 ]>; diff --git a/lib/Target/MBlaze/MBlazeRegisterInfo.td b/lib/Target/MBlaze/MBlazeRegisterInfo.td index 4fb4d342ab2..bd396ed47b3 100644 --- a/lib/Target/MBlaze/MBlazeRegisterInfo.td +++ b/lib/Target/MBlaze/MBlazeRegisterInfo.td @@ -131,19 +131,7 @@ def GPR : RegisterClass<"MBlaze", [i32,f32], 32, R17, // Return address for exceptions R18, // Reserved for assembler R19 // The frame-pointer - ]> -{ - let MethodProtos = [{ - iterator allocation_order_end(const MachineFunction &MF) const; - }]; - let MethodBodies = [{ - GPRClass::iterator - GPRClass::allocation_order_end(const MachineFunction &MF) const { - // The last 10 registers on the list above are reserved - return end()-10; - } - }]; -} + ]>; def SPR : RegisterClass<"MBlaze", [i32], 32, [ @@ -174,16 +162,8 @@ def SPR : RegisterClass<"MBlaze", [i32], 32, RPVR11 ]> { - let MethodProtos = [{ - iterator allocation_order_end(const MachineFunction &MF) const; - }]; - let MethodBodies = [{ - SPRClass::iterator - SPRClass::allocation_order_end(const MachineFunction &MF) const { - // None of the special purpose registers are allocatable. - return end()-24; - } - }]; + // None of the special purpose registers are allocatable. + let isAllocatable = 0; } def CRC : RegisterClass<"MBlaze", [i32], 32, [CARRY]> { diff --git a/lib/Target/MSP430/MSP430RegisterInfo.cpp b/lib/Target/MSP430/MSP430RegisterInfo.cpp index 834c9af2639..53f4c2e4a88 100644 --- a/lib/Target/MSP430/MSP430RegisterInfo.cpp +++ b/lib/Target/MSP430/MSP430RegisterInfo.cpp @@ -76,7 +76,11 @@ BitVector MSP430RegisterInfo::getReservedRegs(const MachineFunction &MF) const { BitVector Reserved(getNumRegs()); const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); - // Mark 4 special registers as reserved. + // Mark 4 special registers with subregisters as reserved. + Reserved.set(MSP430::PCB); + Reserved.set(MSP430::SPB); + Reserved.set(MSP430::SRB); + Reserved.set(MSP430::CGB); Reserved.set(MSP430::PCW); Reserved.set(MSP430::SPW); Reserved.set(MSP430::SRW); diff --git a/lib/Target/MSP430/MSP430RegisterInfo.td b/lib/Target/MSP430/MSP430RegisterInfo.td index ab7b59b4eaf..3ef6ab219de 100644 --- a/lib/Target/MSP430/MSP430RegisterInfo.td +++ b/lib/Target/MSP430/MSP430RegisterInfo.td @@ -70,25 +70,7 @@ def GR8 : RegisterClass<"MSP430", [i8], 8, // Frame pointer, sometimes allocable FPB, // Volatile, but not allocable - PCB, SPB, SRB, CGB]> -{ - let MethodProtos = [{ - iterator allocation_order_end(const MachineFunction &MF) const; - }]; - let MethodBodies = [{ - GR8Class::iterator - GR8Class::allocation_order_end(const MachineFunction &MF) const { - const TargetMachine &TM = MF.getTarget(); - const TargetFrameLowering *TFI = TM.getFrameLowering(); - // Depending on whether the function uses frame pointer or not, last 5 or 4 - // registers on the list above are reserved - if (TFI->hasFP(MF)) - return end()-5; - else - return end()-4; - } - }]; -} + PCB, SPB, SRB, CGB]>; def GR16 : RegisterClass<"MSP430", [i16], 16, // Volatile registers @@ -99,21 +81,5 @@ def GR16 : RegisterClass<"MSP430", [i16], 16, PCW, SPW, SRW, CGW]> { let SubRegClasses = [(GR8 subreg_8bit)]; - let MethodProtos = [{ - iterator allocation_order_end(const MachineFunction &MF) const; - }]; - let MethodBodies = [{ - GR16Class::iterator - GR16Class::allocation_order_end(const MachineFunction &MF) const { - const TargetMachine &TM = MF.getTarget(); - const TargetFrameLowering *TFI = TM.getFrameLowering(); - // Depending on whether the function uses frame pointer or not, last 5 or 4 - // registers on the list above are reserved - if (TFI->hasFP(MF)) - return end()-5; - else - return end()-4; - } - }]; } diff --git a/lib/Target/Mips/MipsRegisterInfo.cpp b/lib/Target/Mips/MipsRegisterInfo.cpp index f3f7272730f..b0984afbebe 100644 --- a/lib/Target/Mips/MipsRegisterInfo.cpp +++ b/lib/Target/Mips/MipsRegisterInfo.cpp @@ -127,6 +127,8 @@ getReservedRegs(const MachineFunction &MF) const { Reserved.set(Mips::SP); Reserved.set(Mips::FP); Reserved.set(Mips::RA); + Reserved.set(Mips::F31); + Reserved.set(Mips::D15); // SRV4 requires that odd register can't be used. if (!Subtarget.isSingleFloat() && !Subtarget.isMips32()) diff --git a/lib/Target/Mips/MipsRegisterInfo.td b/lib/Target/Mips/MipsRegisterInfo.td index 3134f9974f9..e97d4505eb4 100644 --- a/lib/Target/Mips/MipsRegisterInfo.td +++ b/lib/Target/Mips/MipsRegisterInfo.td @@ -165,19 +165,7 @@ def CPURegs : RegisterClass<"Mips", [i32], 32, // Callee save S0, S1, S2, S3, S4, S5, S6, S7, // Reserved - ZERO, AT, K0, K1, GP, SP, FP, RA]> -{ - let MethodProtos = [{ - iterator allocation_order_end(const MachineFunction &MF) const; - }]; - let MethodBodies = [{ - CPURegsClass::iterator - CPURegsClass::allocation_order_end(const MachineFunction &MF) const { - // The last 8 registers on the list above are reserved - return end()-8; - } - }]; -} + ZERO, AT, K0, K1, GP, SP, FP, RA]>; // 64bit fp: // * FGR64 - 32 64-bit registers @@ -194,52 +182,7 @@ def FGR32 : RegisterClass<"Mips", [f32], 32, // Callee save F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, F30, // Reserved - F31]> -{ - let MethodProtos = [{ - iterator allocation_order_begin(const MachineFunction &MF) const; - iterator allocation_order_end(const MachineFunction &MF) const; - }]; - let MethodBodies = [{ - - static const unsigned MIPS_FGR32[] = { - Mips::F0, Mips::F1, Mips::F2, Mips::F3, Mips::F12, Mips::F13, - Mips::F14, Mips::F15, Mips::F4, Mips::F5, Mips::F6, Mips::F7, - Mips::F8, Mips::F9, Mips::F10, Mips::F11, Mips::F16, Mips::F17, - Mips::F18, Mips::F19, Mips::F20, Mips::F21, Mips::F22, Mips::F23, - Mips::F24, Mips::F25, Mips::F26, Mips::F27, Mips::F28, Mips::F29, - Mips::F30 - }; - - static const unsigned MIPS_SVR4_FGR32[] = { - Mips::F0, Mips::F2, Mips::F12, Mips::F14, Mips::F4, - Mips::F6, Mips::F8, Mips::F10, Mips::F16, Mips::F18, - Mips::F20, Mips::F22, Mips::F24, Mips::F26, Mips::F28, Mips::F30, - }; - - FGR32Class::iterator - FGR32Class::allocation_order_begin(const MachineFunction &MF) const { - const TargetMachine &TM = MF.getTarget(); - const MipsSubtarget &Subtarget = TM.getSubtarget(); - - if (Subtarget.isMips32() || Subtarget.isSingleFloat()) - return MIPS_FGR32; - else - return MIPS_SVR4_FGR32; - } - - FGR32Class::iterator - FGR32Class::allocation_order_end(const MachineFunction &MF) const { - const TargetMachine &TM = MF.getTarget(); - const MipsSubtarget &Subtarget = TM.getSubtarget(); - - if (Subtarget.isMips32() || Subtarget.isSingleFloat()) - return MIPS_FGR32 + (sizeof(MIPS_FGR32) / sizeof(unsigned)); - else - return MIPS_SVR4_FGR32 + (sizeof(MIPS_SVR4_FGR32) / sizeof(unsigned)); - } - }]; -} + F31]>; def AFGR64 : RegisterClass<"Mips", [f64], 64, // Return Values and Arguments @@ -249,19 +192,8 @@ def AFGR64 : RegisterClass<"Mips", [f64], 64, // Callee save D10, D11, D12, D13, D14, // Reserved - D15]> -{ + D15]> { let SubRegClasses = [(FGR32 sub_fpeven, sub_fpodd)]; - let MethodProtos = [{ - iterator allocation_order_end(const MachineFunction &MF) const; - }]; - let MethodBodies = [{ - AFGR64Class::iterator - AFGR64Class::allocation_order_end(const MachineFunction &MF) const { - // The last register on the list above is reserved - return end()-1; - } - }]; } // Condition Register for floating point operations diff --git a/lib/Target/PowerPC/PPCRegisterInfo.td b/lib/Target/PowerPC/PPCRegisterInfo.td index b09e86a2bc9..3c0190199a8 100644 --- a/lib/Target/PowerPC/PPCRegisterInfo.td +++ b/lib/Target/PowerPC/PPCRegisterInfo.td @@ -279,68 +279,12 @@ def RM: SPR<512, "**ROUNDING MODE**">; def GPRC : RegisterClass<"PPC", [i32], 32, [R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, R30, R29, R28, R27, R26, R25, R24, R23, R22, R21, R20, R19, R18, R17, - R16, R15, R14, R13, R31, R0, R1, LR]> -{ - let MethodProtos = [{ - iterator allocation_order_begin(const MachineFunction &MF) const; - iterator allocation_order_end(const MachineFunction &MF) const; - }]; - let MethodBodies = [{ - GPRCClass::iterator - GPRCClass::allocation_order_begin(const MachineFunction &MF) const { - // 32-bit SVR4 ABI: r2 is reserved for the OS. - // 64-bit SVR4 ABI: r2 is reserved for the TOC pointer. - // Darwin: R2 is reserved for CR save/restore sequence. - return begin()+1; - } - GPRCClass::iterator - GPRCClass::allocation_order_end(const MachineFunction &MF) const { - // On PPC64, r13 is the thread pointer. Never allocate this register. - // Note that this is overconservative, as it also prevents allocation of - // R31 when the FP is not needed. - // When using the 32-bit SVR4 ABI, r13 is reserved for the Small Data Area - // pointer. - const PPCSubtarget &Subtarget = MF.getTarget().getSubtarget(); - const PPCFrameLowering *PPCFI = - static_cast(MF.getTarget().getFrameLowering()); - - if (Subtarget.isPPC64() || Subtarget.isSVR4ABI()) - return end()-5; // don't allocate R13, R31, R0, R1, LR - - if (PPCFI->needsFP(MF)) - return end()-4; // don't allocate R31, R0, R1, LR - else - return end()-3; // don't allocate R0, R1, LR - } - }]; -} + R16, R15, R14, R13, R31, R0, R1, LR]>; + def G8RC : RegisterClass<"PPC", [i64], 64, [X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X30, X29, X28, X27, X26, X25, X24, X23, X22, X21, X20, X19, X18, X17, - X16, X15, X14, X31, X13, X0, X1, LR8]> -{ - let MethodProtos = [{ - iterator allocation_order_begin(const MachineFunction &MF) const; - iterator allocation_order_end(const MachineFunction &MF) const; - }]; - let MethodBodies = [{ - G8RCClass::iterator - G8RCClass::allocation_order_begin(const MachineFunction &MF) const { - // 64-bit SVR4 ABI: r2 is reserved for the TOC pointer. - // Darwin: r2 is reserved for CR save/restore sequence. - return begin()+1; - } - G8RCClass::iterator - G8RCClass::allocation_order_end(const MachineFunction &MF) const { - const PPCFrameLowering *PPCFI = - static_cast(MF.getTarget().getFrameLowering()); - if (PPCFI->needsFP(MF)) - return end()-5; - else - return end()-4; - } - }]; -} + X16, X15, X14, X31, X13, X0, X1, LR8]>; // Allocate volatiles first, then non-volatiles in reverse order. With the SVR4 // ABI the size of the Floating-point register save area is determined by the diff --git a/lib/Target/Sparc/SparcRegisterInfo.cpp b/lib/Target/Sparc/SparcRegisterInfo.cpp index fdd66ddc7de..9fcf028fa60 100644 --- a/lib/Target/Sparc/SparcRegisterInfo.cpp +++ b/lib/Target/Sparc/SparcRegisterInfo.cpp @@ -39,6 +39,8 @@ const unsigned* SparcRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) BitVector SparcRegisterInfo::getReservedRegs(const MachineFunction &MF) const { BitVector Reserved(getNumRegs()); + // FIXME: G1 reserved for now for large imm generation by frame code. + Reserved.set(SP::G1); Reserved.set(SP::G2); Reserved.set(SP::G3); Reserved.set(SP::G4); diff --git a/lib/Target/Sparc/SparcRegisterInfo.td b/lib/Target/Sparc/SparcRegisterInfo.td index c1ef2e7e288..0729818e85b 100644 --- a/lib/Target/Sparc/SparcRegisterInfo.td +++ b/lib/Target/Sparc/SparcRegisterInfo.td @@ -142,8 +142,6 @@ def D15 : Rd<30, "F30", [F30, F31]>, DwarfRegNum<[87]>; def IntRegs : RegisterClass<"SP", [i32], 32, [L0, L1, L2, L3, L4, L5, L6, L7, I0, I1, I2, I3, I4, I5, O0, O1, O2, O3, O4, O5, O7, - - // FIXME: G1 reserved for now for large imm generation by frame code. G1, // Non-allocatable regs: G2, G3, G4, // FIXME: OK for use only in @@ -153,19 +151,7 @@ def IntRegs : RegisterClass<"SP", [i32], 32, [L0, L1, L2, L3, L4, L5, L6, L7, I7, // return address G0, // constant zero G5, G6, G7 // reserved for kernel - ]> { - let MethodProtos = [{ - iterator allocation_order_end(const MachineFunction &MF) const; - }]; - let MethodBodies = [{ - IntRegsClass::iterator - IntRegsClass::allocation_order_end(const MachineFunction &MF) const { - // FIXME: These special regs should be taken out of the regclass! - return end()-10 // Don't allocate special registers - -1; // FIXME: G1 reserved for large imm generation by frame code. - } - }]; -} + ]>; def FPRegs : RegisterClass<"SP", [f32], 32, [F0, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, F13, F14, F15, F16, F17, F18, F19, F20, F21, F22, diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp index 6f67101e7ef..1ad6203af2f 100644 --- a/lib/Target/X86/X86RegisterInfo.cpp +++ b/lib/Target/X86/X86RegisterInfo.cpp @@ -515,6 +515,25 @@ BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const { Reserved.set(X86::FS); Reserved.set(X86::GS); + // Reserve the registers that only exist in 64-bit mode. + if (!Is64Bit) { + for (unsigned n = 0; n != 8; ++n) { + const unsigned GPR64[] = { + X86::R8, X86::R9, X86::R10, X86::R11, + X86::R12, X86::R13, X86::R14, X86::R15 + }; + for (const unsigned *AI = getOverlaps(GPR64[n]); unsigned Reg = *AI; + ++AI) + Reserved.set(Reg); + + // XMM8, XMM9, ... + assert(X86::XMM15 == X86::XMM8+7); + for (const unsigned *AI = getOverlaps(X86::XMM8 + n); unsigned Reg = *AI; + ++AI) + Reserved.set(Reg); + } + } + return Reserved; } diff --git a/lib/Target/X86/X86RegisterInfo.td b/lib/Target/X86/X86RegisterInfo.td index 2c3cbfa4592..f1d149c3fbc 100644 --- a/lib/Target/X86/X86RegisterInfo.td +++ b/lib/Target/X86/X86RegisterInfo.td @@ -326,104 +326,12 @@ def GR16 : RegisterClass<"X86", [i16], 16, [AX, CX, DX, SI, DI, BX, BP, SP, R8W, R9W, R10W, R11W, R14W, R15W, R12W, R13W]> { let SubRegClasses = [(GR8 sub_8bit, sub_8bit_hi)]; - let MethodProtos = [{ - iterator allocation_order_begin(const MachineFunction &MF) const; - iterator allocation_order_end(const MachineFunction &MF) const; - }]; - let MethodBodies = [{ - static const unsigned X86_GR16_AO_64[] = { - X86::AX, X86::CX, X86::DX, X86::SI, X86::DI, - X86::R8W, X86::R9W, X86::R10W, X86::R11W, - X86::BX, X86::R14W, X86::R15W, X86::R12W, X86::R13W, X86::BP - }; - - GR16Class::iterator - GR16Class::allocation_order_begin(const MachineFunction &MF) const { - const TargetMachine &TM = MF.getTarget(); - const X86Subtarget &Subtarget = TM.getSubtarget(); - if (Subtarget.is64Bit()) - return X86_GR16_AO_64; - else - return begin(); - } - - GR16Class::iterator - GR16Class::allocation_order_end(const MachineFunction &MF) const { - const TargetMachine &TM = MF.getTarget(); - const TargetFrameLowering *TFI = TM.getFrameLowering(); - const X86Subtarget &Subtarget = TM.getSubtarget(); - const X86MachineFunctionInfo *MFI = MF.getInfo(); - if (Subtarget.is64Bit()) { - // Does the function dedicate RBP to being a frame ptr? - if (TFI->hasFP(MF) || MFI->getReserveFP()) - // If so, don't allocate SP or BP. - return array_endof(X86_GR16_AO_64) - 1; - else - // If not, just don't allocate SP. - return array_endof(X86_GR16_AO_64); - } else { - // Does the function dedicate EBP to being a frame ptr? - if (TFI->hasFP(MF) || MFI->getReserveFP()) - // If so, don't allocate SP or BP. - return begin() + 6; - else - // If not, just don't allocate SP. - return begin() + 7; - } - } - }]; } def GR32 : RegisterClass<"X86", [i32], 32, [EAX, ECX, EDX, ESI, EDI, EBX, EBP, ESP, R8D, R9D, R10D, R11D, R14D, R15D, R12D, R13D]> { let SubRegClasses = [(GR8 sub_8bit, sub_8bit_hi), (GR16 sub_16bit)]; - let MethodProtos = [{ - iterator allocation_order_begin(const MachineFunction &MF) const; - iterator allocation_order_end(const MachineFunction &MF) const; - }]; - let MethodBodies = [{ - static const unsigned X86_GR32_AO_64[] = { - X86::EAX, X86::ECX, X86::EDX, X86::ESI, X86::EDI, - X86::R8D, X86::R9D, X86::R10D, X86::R11D, - X86::EBX, X86::R14D, X86::R15D, X86::R12D, X86::R13D, X86::EBP - }; - - GR32Class::iterator - GR32Class::allocation_order_begin(const MachineFunction &MF) const { - const TargetMachine &TM = MF.getTarget(); - const X86Subtarget &Subtarget = TM.getSubtarget(); - if (Subtarget.is64Bit()) - return X86_GR32_AO_64; - else - return begin(); - } - - GR32Class::iterator - GR32Class::allocation_order_end(const MachineFunction &MF) const { - const TargetMachine &TM = MF.getTarget(); - const TargetFrameLowering *TFI = TM.getFrameLowering(); - const X86Subtarget &Subtarget = TM.getSubtarget(); - const X86MachineFunctionInfo *MFI = MF.getInfo(); - if (Subtarget.is64Bit()) { - // Does the function dedicate RBP to being a frame ptr? - if (TFI->hasFP(MF) || MFI->getReserveFP()) - // If so, don't allocate ESP or EBP. - return array_endof(X86_GR32_AO_64) - 1; - else - // If not, just don't allocate ESP. - return array_endof(X86_GR32_AO_64); - } else { - // Does the function dedicate EBP to being a frame ptr? - if (TFI->hasFP(MF) || MFI->getReserveFP()) - // If so, don't allocate ESP or EBP. - return begin() + 6; - else - // If not, just don't allocate ESP. - return begin() + 7; - } - } - }]; } // GR64 - 64-bit GPRs. This oddly includes RIP, which isn't accurate, since @@ -544,52 +452,6 @@ def GR32_NOSP : RegisterClass<"X86", [i32], 32, [EAX, ECX, EDX, ESI, EDI, EBX, EBP, R8D, R9D, R10D, R11D, R14D, R15D, R12D, R13D]> { let SubRegClasses = [(GR8 sub_8bit, sub_8bit_hi), (GR16 sub_16bit)]; - let MethodProtos = [{ - iterator allocation_order_begin(const MachineFunction &MF) const; - iterator allocation_order_end(const MachineFunction &MF) const; - }]; - let MethodBodies = [{ - static const unsigned X86_GR32_NOSP_AO_64[] = { - X86::EAX, X86::ECX, X86::EDX, X86::ESI, X86::EDI, - X86::R8D, X86::R9D, X86::R10D, X86::R11D, - X86::EBX, X86::R14D, X86::R15D, X86::R12D, X86::R13D, X86::EBP - }; - - GR32_NOSPClass::iterator - GR32_NOSPClass::allocation_order_begin(const MachineFunction &MF) const { - const TargetMachine &TM = MF.getTarget(); - const X86Subtarget &Subtarget = TM.getSubtarget(); - if (Subtarget.is64Bit()) - return X86_GR32_NOSP_AO_64; - else - return begin(); - } - - GR32_NOSPClass::iterator - GR32_NOSPClass::allocation_order_end(const MachineFunction &MF) const { - const TargetMachine &TM = MF.getTarget(); - const TargetFrameLowering *TFI = TM.getFrameLowering(); - const X86Subtarget &Subtarget = TM.getSubtarget(); - const X86MachineFunctionInfo *MFI = MF.getInfo(); - if (Subtarget.is64Bit()) { - // Does the function dedicate RBP to being a frame ptr? - if (TFI->hasFP(MF) || MFI->getReserveFP()) - // If so, don't allocate EBP. - return array_endof(X86_GR32_NOSP_AO_64) - 1; - else - // If not, any reg in this class is ok. - return array_endof(X86_GR32_NOSP_AO_64); - } else { - // Does the function dedicate EBP to being a frame ptr? - if (TFI->hasFP(MF) || MFI->getReserveFP()) - // If so, don't allocate EBP. - return begin() + 6; - else - // If not, any reg in this class is ok. - return begin() + 7; - } - } - }]; } // GR64_NOSP - GR64 registers except RSP (and RIP). @@ -628,42 +490,12 @@ def GR32_AD : RegisterClass<"X86", [i32], 32, [EAX, EDX]> { def FR32 : RegisterClass<"X86", [f32], 32, [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, XMM8, XMM9, XMM10, XMM11, - XMM12, XMM13, XMM14, XMM15]> { - let MethodProtos = [{ - iterator allocation_order_end(const MachineFunction &MF) const; - }]; - let MethodBodies = [{ - FR32Class::iterator - FR32Class::allocation_order_end(const MachineFunction &MF) const { - const TargetMachine &TM = MF.getTarget(); - const X86Subtarget &Subtarget = TM.getSubtarget(); - if (!Subtarget.is64Bit()) - return end()-8; // Only XMM0 to XMM7 are available in 32-bit mode. - else - return end(); - } - }]; -} + XMM12, XMM13, XMM14, XMM15]>; def FR64 : RegisterClass<"X86", [f64], 64, [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, XMM8, XMM9, XMM10, XMM11, - XMM12, XMM13, XMM14, XMM15]> { - let MethodProtos = [{ - iterator allocation_order_end(const MachineFunction &MF) const; - }]; - let MethodBodies = [{ - FR64Class::iterator - FR64Class::allocation_order_end(const MachineFunction &MF) const { - const TargetMachine &TM = MF.getTarget(); - const X86Subtarget &Subtarget = TM.getSubtarget(); - if (!Subtarget.is64Bit()) - return end()-8; // Only XMM0 to XMM7 are available in 32-bit mode. - else - return end(); - } - }]; -} + XMM12, XMM13, XMM14, XMM15]>; // FIXME: This sets up the floating point register files as though they are f64 @@ -692,21 +524,6 @@ def VR128 : RegisterClass<"X86", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],128, XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15]> { let SubRegClasses = [(FR32 sub_ss), (FR64 sub_sd)]; - - let MethodProtos = [{ - iterator allocation_order_end(const MachineFunction &MF) const; - }]; - let MethodBodies = [{ - VR128Class::iterator - VR128Class::allocation_order_end(const MachineFunction &MF) const { - const TargetMachine &TM = MF.getTarget(); - const X86Subtarget &Subtarget = TM.getSubtarget(); - if (!Subtarget.is64Bit()) - return end()-8; // Only XMM0 to XMM7 are available in 32-bit mode. - else - return end(); - } - }]; } def VR256 : RegisterClass<"X86", [v32i8, v8i32, v4i64, v8f32, v4f64], 256, @@ -714,21 +531,6 @@ def VR256 : RegisterClass<"X86", [v32i8, v8i32, v4i64, v8f32, v4f64], 256, YMM8, YMM9, YMM10, YMM11, YMM12, YMM13, YMM14, YMM15]> { let SubRegClasses = [(FR32 sub_ss), (FR64 sub_sd), (VR128 sub_xmm)]; - - let MethodProtos = [{ - iterator allocation_order_end(const MachineFunction &MF) const; - }]; - let MethodBodies = [{ - VR256Class::iterator - VR256Class::allocation_order_end(const MachineFunction &MF) const { - const TargetMachine &TM = MF.getTarget(); - const X86Subtarget &Subtarget = TM.getSubtarget(); - if (!Subtarget.is64Bit()) - return end()-8; // Only YMM0 to YMM7 are available in 32-bit mode. - else - return end(); - } - }]; } // Status flags registers. diff --git a/lib/Target/XCore/XCoreRegisterInfo.td b/lib/Target/XCore/XCoreRegisterInfo.td index b1ba9640691..09510976dd0 100644 --- a/lib/Target/XCore/XCoreRegisterInfo.td +++ b/lib/Target/XCore/XCoreRegisterInfo.td @@ -48,27 +48,7 @@ def GRRegs : RegisterClass<"XCore", [i32], 32, // Not preserved across procedure calls R11, // Callee save - R4, R5, R6, R7, R8, R9, R10]> { - let MethodProtos = [{ - iterator allocation_order_begin(const MachineFunction &MF) const; - iterator allocation_order_end(const MachineFunction &MF) const; - }]; - let MethodBodies = [{ - GRRegsClass::iterator - GRRegsClass::allocation_order_begin(const MachineFunction &MF) const { - return begin(); - } - GRRegsClass::iterator - GRRegsClass::allocation_order_end(const MachineFunction &MF) const { - const TargetMachine &TM = MF.getTarget(); - const TargetFrameLowering *TFI = TM.getFrameLowering(); - if (TFI->hasFP(MF)) - return end()-1; // don't allocate R10 - else - return end(); - } - }]; -} + R4, R5, R6, R7, R8, R9, R10]>; // Reserved def RRegs : RegisterClass<"XCore", [i32], 32, [CP, DP, SP, LR]> {