1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-24 03:33:20 +01:00

Use uint64_t as the type for the X86 TSFlag format enum. Allows removal of the VEXShift hack that was used to access the higher bits of TSFlags.

llvm-svn: 221673
This commit is contained in:
Craig Topper 2014-11-11 07:32:32 +00:00
parent cd941e090d
commit b267ba2986
2 changed files with 62 additions and 61 deletions

View File

@ -216,7 +216,7 @@ namespace X86II {
MO_SECREL
};
enum {
enum : uint64_t {
//===------------------------------------------------------------------===//
// Instruction encodings. These are the standard/most common forms for X86
// instructions.
@ -328,8 +328,8 @@ namespace X86II {
OpSizeShift = 7,
OpSizeMask = 0x3 << OpSizeShift,
OpSize16 = 1,
OpSize32 = 2,
OpSize16 = 1 << OpSizeShift,
OpSize32 = 2 << OpSizeShift,
// AsSize - Set if this instruction requires an operand size prefix (0x67),
// which most often indicates that the instruction address 16 bit address
@ -455,51 +455,53 @@ namespace X86II {
EncodingMask = 0x3 << EncodingShift,
// VEX - encoding using 0xC4/0xC5
VEX = 1,
VEX = 1 << EncodingShift,
/// XOP - Opcode prefix used by XOP instructions.
XOP = 2,
XOP = 2 << EncodingShift,
// VEX_EVEX - Specifies that this instruction use EVEX form which provides
// syntax support up to 32 512-bit register operands and up to 7 16-bit
// mask operands as well as source operand data swizzling/memory operand
// conversion, eviction hint, and rounding mode.
EVEX = 3,
EVEX = 3 << EncodingShift,
// Opcode
OpcodeShift = EncodingShift + 2,
//===------------------------------------------------------------------===//
/// VEX - The opcode prefix used by AVX instructions
VEXShift = OpcodeShift + 8,
/// VEX_W - Has a opcode specific functionality, but is used in the same
/// way as REX_W is for regular SSE instructions.
VEX_W = 1U << 0,
VEX_WShift = OpcodeShift + 8,
VEX_W = 1ULL << VEX_WShift,
/// VEX_4V - Used to specify an additional AVX/SSE register. Several 2
/// address instructions in SSE are represented as 3 address ones in AVX
/// and the additional register is encoded in VEX_VVVV prefix.
VEX_4V = 1U << 1,
VEX_4VShift = VEX_WShift + 1,
VEX_4V = 1ULL << VEX_4VShift,
/// VEX_4VOp3 - Similar to VEX_4V, but used on instructions that encode
/// operand 3 with VEX.vvvv.
VEX_4VOp3 = 1U << 2,
VEX_4VOp3Shift = VEX_4VShift + 1,
VEX_4VOp3 = 1ULL << VEX_4VOp3Shift,
/// VEX_I8IMM - Specifies that the last register used in a AVX instruction,
/// must be encoded in the i8 immediate field. This usually happens in
/// instructions with 4 operands.
VEX_I8IMM = 1U << 3,
VEX_I8IMMShift = VEX_4VOp3Shift + 1,
VEX_I8IMM = 1ULL << VEX_I8IMMShift,
/// VEX_L - Stands for a bit in the VEX opcode prefix meaning the current
/// instruction uses 256-bit wide registers. This is usually auto detected
/// if a VR256 register is used, but some AVX instructions also have this
/// field marked when using a f256 memory references.
VEX_L = 1U << 4,
VEX_LShift = VEX_I8IMMShift + 1,
VEX_L = 1ULL << VEX_LShift,
// VEX_LIG - Specifies that this instruction ignores the L-bit in the VEX
// prefix. Usually used for scalar instructions. Needed by disassembler.
VEX_LIG = 1U << 5,
VEX_LIGShift = VEX_LShift + 1,
VEX_LIG = 1ULL << VEX_LIGShift,
// TODO: we should combine VEX_L and VEX_LIG together to form a 2-bit field
// with following encoding:
@ -510,20 +512,24 @@ namespace X86II {
// this will save 1 tsflag bit
// EVEX_K - Set if this instruction requires masking
EVEX_K = 1U << 6,
EVEX_KShift = VEX_LIGShift + 1,
EVEX_K = 1ULL << EVEX_KShift,
// EVEX_Z - Set if this instruction has EVEX.Z field set.
EVEX_Z = 1U << 7,
EVEX_ZShift = EVEX_KShift + 1,
EVEX_Z = 1ULL << EVEX_ZShift,
// EVEX_L2 - Set if this instruction has EVEX.L' field set.
EVEX_L2 = 1U << 8,
EVEX_L2Shift = EVEX_ZShift + 1,
EVEX_L2 = 1ULL << EVEX_L2Shift,
// EVEX_B - Set if this instruction has EVEX.B field set.
EVEX_B = 1U << 9,
EVEX_BShift = EVEX_L2Shift + 1,
EVEX_B = 1ULL << EVEX_BShift,
// The scaling factor for the AVX512's 8-bit compressed displacement.
CD8_Scale_Shift = VEXShift + 10,
CD8_Scale_Mask = 127,
CD8_Scale_Shift = EVEX_BShift + 1,
CD8_Scale_Mask = 127ULL << CD8_Scale_Shift,
/// Has3DNow0F0FOpcode - This flag indicates that the instruction uses the
/// wacky 0x0F 0x0F prefix for 3DNow! instructions. The manual documents
@ -532,16 +538,16 @@ namespace X86II {
/// we handle this by storeing the classifier in the opcode field and using
/// this flag to indicate that the encoder should do the wacky 3DNow! thing.
Has3DNow0F0FOpcodeShift = CD8_Scale_Shift + 7,
Has3DNow0F0FOpcode = 1U << (Has3DNow0F0FOpcodeShift - VEXShift),
Has3DNow0F0FOpcode = 1ULL << Has3DNow0F0FOpcodeShift,
/// MemOp4 - Used to indicate swapping of operand 3 and 4 to be encoded in
/// ModRM or I8IMM. This is used for FMA4 and XOP instructions.
MemOp4Shift = Has3DNow0F0FOpcodeShift + 1,
MemOp4 = 1U << (MemOp4Shift - VEXShift),
MemOp4 = 1ULL << MemOp4Shift,
/// Explicitly specified rounding control
EVEX_RCShift = MemOp4Shift + 1,
EVEX_RC = 1U << (EVEX_RCShift - VEXShift)
EVEX_RC = 1ULL << EVEX_RCShift
};
// getBaseOpcodeFor - This function returns the "base" X86 opcode for the
@ -643,9 +649,9 @@ namespace X86II {
/// counted as one operand.
///
inline int getMemoryOperandNo(uint64_t TSFlags, unsigned Opcode) {
bool HasVEX_4V = (TSFlags >> X86II::VEXShift) & X86II::VEX_4V;
bool HasMemOp4 = (TSFlags >> X86II::VEXShift) & X86II::MemOp4;
bool HasEVEX_K = ((TSFlags >> X86II::VEXShift) & X86II::EVEX_K);
bool HasVEX_4V = TSFlags & X86II::VEX_4V;
bool HasMemOp4 = TSFlags & X86II::MemOp4;
bool HasEVEX_K = TSFlags & X86II::EVEX_K;
switch (TSFlags & X86II::FormMask) {
default: llvm_unreachable("Unknown FormMask value in getMemoryOperandNo!");
@ -687,7 +693,7 @@ namespace X86II {
case X86II::MRM2m: case X86II::MRM3m:
case X86II::MRM4m: case X86II::MRM5m:
case X86II::MRM6m: case X86II::MRM7m: {
bool HasVEX_4V = (TSFlags >> X86II::VEXShift) & X86II::VEX_4V;
bool HasVEX_4V = TSFlags & X86II::VEX_4V;
unsigned FirstMemOp = 0;
if (HasVEX_4V)
++FirstMemOp;// Skip the register dest (which is encoded in VEX_VVVV).

View File

@ -185,12 +185,11 @@ static bool isDisp8(int Value) {
/// isCDisp8 - Return true if this signed displacement fits in a 8-bit
/// compressed dispacement field.
static bool isCDisp8(uint64_t TSFlags, int Value, int& CValue) {
assert(((TSFlags & X86II::EncodingMask) >>
X86II::EncodingShift == X86II::EVEX) &&
assert(((TSFlags & X86II::EncodingMask) == X86II::EVEX) &&
"Compressed 8-bit displacement is only valid for EVEX inst.");
unsigned CD8_Scale =
(TSFlags >> X86II::CD8_Scale_Shift) & X86II::CD8_Scale_Mask;
(TSFlags & X86II::CD8_Scale_Mask) >> X86II::CD8_Scale_Shift;
if (CD8_Scale == 0) {
CValue = Value;
return isDisp8(Value);
@ -373,9 +372,7 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
const MCOperand &Scale = MI.getOperand(Op+X86::AddrScaleAmt);
const MCOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
unsigned BaseReg = Base.getReg();
unsigned char Encoding = (TSFlags & X86II::EncodingMask) >>
X86II::EncodingShift;
bool HasEVEX = (Encoding == X86II::EVEX);
bool HasEVEX = (TSFlags & X86II::EncodingMask) == X86II::EVEX;
// Handle %rip relative addressing.
if (BaseReg == X86::RIP) { // [disp32+RIP] in X86-64 mode
@ -593,13 +590,12 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
int MemOperand, const MCInst &MI,
const MCInstrDesc &Desc,
raw_ostream &OS) const {
unsigned char Encoding = (TSFlags & X86II::EncodingMask) >>
X86II::EncodingShift;
bool HasEVEX_K = ((TSFlags >> X86II::VEXShift) & X86II::EVEX_K);
bool HasVEX_4V = (TSFlags >> X86II::VEXShift) & X86II::VEX_4V;
bool HasVEX_4VOp3 = (TSFlags >> X86II::VEXShift) & X86II::VEX_4VOp3;
bool HasMemOp4 = (TSFlags >> X86II::VEXShift) & X86II::MemOp4;
bool HasEVEX_RC = (TSFlags >> X86II::VEXShift) & X86II::EVEX_RC;
uint64_t Encoding = TSFlags & X86II::EncodingMask;
bool HasEVEX_K = TSFlags & X86II::EVEX_K;
bool HasVEX_4V = TSFlags & X86II::VEX_4V;
bool HasVEX_4VOp3 = TSFlags & X86II::VEX_4VOp3;
bool HasMemOp4 = TSFlags & X86II::MemOp4;
bool HasEVEX_RC = TSFlags & X86II::EVEX_RC;
// VEX_R: opcode externsion equivalent to REX.R in
// 1's complement (inverted) form
@ -680,18 +676,18 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
bool EncodeRC = false;
if ((TSFlags >> X86II::VEXShift) & X86II::VEX_W)
if (TSFlags & X86II::VEX_W)
VEX_W = 1;
if ((TSFlags >> X86II::VEXShift) & X86II::VEX_L)
if (TSFlags & X86II::VEX_L)
VEX_L = 1;
if (((TSFlags >> X86II::VEXShift) & X86II::EVEX_L2))
if (TSFlags & X86II::EVEX_L2)
EVEX_L2 = 1;
if (HasEVEX_K && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_Z))
if (HasEVEX_K && (TSFlags & X86II::EVEX_Z))
EVEX_z = 1;
if (((TSFlags >> X86II::VEXShift) & X86II::EVEX_B))
if ((TSFlags & X86II::EVEX_B))
EVEX_b = 1;
switch (TSFlags & X86II::OpPrefixMask) {
@ -1109,8 +1105,8 @@ void X86MCCodeEmitter::EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
raw_ostream &OS) const {
// Emit the operand size opcode prefix as needed.
unsigned char OpSize = (TSFlags & X86II::OpSizeMask) >> X86II::OpSizeShift;
if (OpSize == (is16BitMode(STI) ? X86II::OpSize32 : X86II::OpSize16))
if ((TSFlags & X86II::OpSizeMask) == (is16BitMode(STI) ? X86II::OpSize32
: X86II::OpSize16))
EmitByte(0x66, CurByte, OS);
switch (TSFlags & X86II::OpPrefixMask) {
@ -1170,18 +1166,17 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
unsigned CurByte = 0;
// Encoding type for this instruction.
unsigned char Encoding = (TSFlags & X86II::EncodingMask) >>
X86II::EncodingShift;
uint64_t Encoding = TSFlags & X86II::EncodingMask;
// It uses the VEX.VVVV field?
bool HasVEX_4V = (TSFlags >> X86II::VEXShift) & X86II::VEX_4V;
bool HasVEX_4VOp3 = (TSFlags >> X86II::VEXShift) & X86II::VEX_4VOp3;
bool HasMemOp4 = (TSFlags >> X86II::VEXShift) & X86II::MemOp4;
bool HasVEX_4V = TSFlags & X86II::VEX_4V;
bool HasVEX_4VOp3 = TSFlags & X86II::VEX_4VOp3;
bool HasMemOp4 = TSFlags & X86II::MemOp4;
const unsigned MemOp4_I8IMMOperand = 2;
// It uses the EVEX.aaa field?
bool HasEVEX_K = ((TSFlags >> X86II::VEXShift) & X86II::EVEX_K);
bool HasEVEX_RC = ((TSFlags >> X86II::VEXShift) & X86II::EVEX_RC);
bool HasEVEX_K = TSFlags & X86II::EVEX_K;
bool HasEVEX_RC = TSFlags & X86II::EVEX_RC;
// Determine where the memory operand starts, if present.
int MemoryOperand = X86II::getMemoryOperandNo(TSFlags, Opcode);
@ -1237,7 +1232,7 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
unsigned char BaseOpcode = X86II::getBaseOpcodeFor(TSFlags);
if ((TSFlags >> X86II::VEXShift) & X86II::Has3DNow0F0FOpcode)
if (TSFlags & X86II::Has3DNow0F0FOpcode)
BaseOpcode = 0x0F; // Weird 3DNow! encoding.
unsigned SrcRegNum = 0;
@ -1521,7 +1516,7 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
while (CurOp != NumOps && NumOps - CurOp <= 2) {
// The last source register of a 4 operand instruction in AVX is encoded
// in bits[7:4] of a immediate byte.
if ((TSFlags >> X86II::VEXShift) & X86II::VEX_I8IMM) {
if (TSFlags & X86II::VEX_I8IMM) {
const MCOperand &MO = MI.getOperand(HasMemOp4 ? MemOp4_I8IMMOperand
: CurOp);
++CurOp;
@ -1547,7 +1542,7 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
}
}
if ((TSFlags >> X86II::VEXShift) & X86II::Has3DNow0F0FOpcode)
if (TSFlags & X86II::Has3DNow0F0FOpcode)
EmitByte(X86II::getBaseOpcodeFor(TSFlags), CurByte, OS);
#ifndef NDEBUG