mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-23 11:13:28 +01:00
ARM VSHR implied destination operand form aliases.
llvm-svn: 146192
This commit is contained in:
parent
d8a73b8918
commit
a33fa8aa88
@ -201,21 +201,29 @@ def msr_mask : Operand<i32> {
|
||||
// 16 imm6<5:4> = '01', 16 - <imm> is encoded in imm6<3:0>
|
||||
// 32 imm6<5> = '1', 32 - <imm> is encoded in imm6<4:0>
|
||||
// 64 64 - <imm> is encoded in imm6<5:0>
|
||||
def shr_imm8_asm_operand : ImmAsmOperand { let Name = "ShrImm8"; }
|
||||
def shr_imm8 : Operand<i32> {
|
||||
let EncoderMethod = "getShiftRight8Imm";
|
||||
let DecoderMethod = "DecodeShiftRight8Imm";
|
||||
let ParserMatchClass = shr_imm8_asm_operand;
|
||||
}
|
||||
def shr_imm16_asm_operand : ImmAsmOperand { let Name = "ShrImm16"; }
|
||||
def shr_imm16 : Operand<i32> {
|
||||
let EncoderMethod = "getShiftRight16Imm";
|
||||
let DecoderMethod = "DecodeShiftRight16Imm";
|
||||
let ParserMatchClass = shr_imm16_asm_operand;
|
||||
}
|
||||
def shr_imm32_asm_operand : ImmAsmOperand { let Name = "ShrImm32"; }
|
||||
def shr_imm32 : Operand<i32> {
|
||||
let EncoderMethod = "getShiftRight32Imm";
|
||||
let DecoderMethod = "DecodeShiftRight32Imm";
|
||||
let ParserMatchClass = shr_imm32_asm_operand;
|
||||
}
|
||||
def shr_imm64_asm_operand : ImmAsmOperand { let Name = "ShrImm64"; }
|
||||
def shr_imm64 : Operand<i32> {
|
||||
let EncoderMethod = "getShiftRight64Imm";
|
||||
let DecoderMethod = "DecodeShiftRight64Imm";
|
||||
let ParserMatchClass = shr_imm64_asm_operand;
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
@ -5533,6 +5533,43 @@ def : NEONInstAlias<"vshl${p}.u32 $Vdn, $Vm",
|
||||
def : NEONInstAlias<"vshl${p}.u64 $Vdn, $Vm",
|
||||
(VSHLuv2i64 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
|
||||
|
||||
// VSHL (immediate) two-operand aliases.
|
||||
def : NEONInstAlias<"vshr${p}.s8 $Vdn, $imm",
|
||||
(VSHRsv8i8 DPR:$Vdn, DPR:$Vdn, shr_imm8:$imm, pred:$p)>;
|
||||
def : NEONInstAlias<"vshr${p}.s16 $Vdn, $imm",
|
||||
(VSHRsv4i16 DPR:$Vdn, DPR:$Vdn, shr_imm16:$imm, pred:$p)>;
|
||||
def : NEONInstAlias<"vshr${p}.s32 $Vdn, $imm",
|
||||
(VSHRsv2i32 DPR:$Vdn, DPR:$Vdn, shr_imm32:$imm, pred:$p)>;
|
||||
def : NEONInstAlias<"vshr${p}.s64 $Vdn, $imm",
|
||||
(VSHRsv1i64 DPR:$Vdn, DPR:$Vdn, shr_imm64:$imm, pred:$p)>;
|
||||
|
||||
def : NEONInstAlias<"vshr${p}.s8 $Vdn, $imm",
|
||||
(VSHRsv16i8 QPR:$Vdn, QPR:$Vdn, shr_imm8:$imm, pred:$p)>;
|
||||
def : NEONInstAlias<"vshr${p}.s16 $Vdn, $imm",
|
||||
(VSHRsv8i16 QPR:$Vdn, QPR:$Vdn, shr_imm16:$imm, pred:$p)>;
|
||||
def : NEONInstAlias<"vshr${p}.s32 $Vdn, $imm",
|
||||
(VSHRsv4i32 QPR:$Vdn, QPR:$Vdn, shr_imm32:$imm, pred:$p)>;
|
||||
def : NEONInstAlias<"vshr${p}.s64 $Vdn, $imm",
|
||||
(VSHRsv2i64 QPR:$Vdn, QPR:$Vdn, shr_imm64:$imm, pred:$p)>;
|
||||
|
||||
def : NEONInstAlias<"vshr${p}.u8 $Vdn, $imm",
|
||||
(VSHRuv8i8 DPR:$Vdn, DPR:$Vdn, shr_imm8:$imm, pred:$p)>;
|
||||
def : NEONInstAlias<"vshr${p}.u16 $Vdn, $imm",
|
||||
(VSHRuv4i16 DPR:$Vdn, DPR:$Vdn, shr_imm16:$imm, pred:$p)>;
|
||||
def : NEONInstAlias<"vshr${p}.u32 $Vdn, $imm",
|
||||
(VSHRuv2i32 DPR:$Vdn, DPR:$Vdn, shr_imm32:$imm, pred:$p)>;
|
||||
def : NEONInstAlias<"vshr${p}.u64 $Vdn, $imm",
|
||||
(VSHRuv1i64 DPR:$Vdn, DPR:$Vdn, shr_imm64:$imm, pred:$p)>;
|
||||
|
||||
def : NEONInstAlias<"vshr${p}.u8 $Vdn, $imm",
|
||||
(VSHRuv16i8 QPR:$Vdn, QPR:$Vdn, shr_imm8:$imm, pred:$p)>;
|
||||
def : NEONInstAlias<"vshr${p}.u16 $Vdn, $imm",
|
||||
(VSHRuv8i16 QPR:$Vdn, QPR:$Vdn, shr_imm16:$imm, pred:$p)>;
|
||||
def : NEONInstAlias<"vshr${p}.u32 $Vdn, $imm",
|
||||
(VSHRuv4i32 QPR:$Vdn, QPR:$Vdn, shr_imm32:$imm, pred:$p)>;
|
||||
def : NEONInstAlias<"vshr${p}.u64 $Vdn, $imm",
|
||||
(VSHRuv2i64 QPR:$Vdn, QPR:$Vdn, shr_imm64:$imm, pred:$p)>;
|
||||
|
||||
// VLD1 single-lane pseudo-instructions. These need special handling for
|
||||
// the lane index that an InstAlias can't handle, so we use these instead.
|
||||
defm VLD1LNdAsm : NEONDT8AsmPseudoInst<"vld1${p}", "$list, $addr",
|
||||
|
@ -643,6 +643,38 @@ public:
|
||||
int64_t Value = CE->getValue();
|
||||
return Value == 32;
|
||||
}
|
||||
bool isShrImm8() const {
|
||||
if (Kind != k_Immediate)
|
||||
return false;
|
||||
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
|
||||
if (!CE) return false;
|
||||
int64_t Value = CE->getValue();
|
||||
return Value > 0 && Value <= 8;
|
||||
}
|
||||
bool isShrImm16() const {
|
||||
if (Kind != k_Immediate)
|
||||
return false;
|
||||
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
|
||||
if (!CE) return false;
|
||||
int64_t Value = CE->getValue();
|
||||
return Value > 0 && Value <= 16;
|
||||
}
|
||||
bool isShrImm32() const {
|
||||
if (Kind != k_Immediate)
|
||||
return false;
|
||||
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
|
||||
if (!CE) return false;
|
||||
int64_t Value = CE->getValue();
|
||||
return Value > 0 && Value <= 32;
|
||||
}
|
||||
bool isShrImm64() const {
|
||||
if (Kind != k_Immediate)
|
||||
return false;
|
||||
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
|
||||
if (!CE) return false;
|
||||
int64_t Value = CE->getValue();
|
||||
return Value > 0 && Value <= 64;
|
||||
}
|
||||
bool isImm1_7() const {
|
||||
if (Kind != k_Immediate)
|
||||
return false;
|
||||
|
@ -70,6 +70,41 @@ _foo:
|
||||
@ CHECK: vshr.s32 q8, q8, #31 @ encoding: [0x70,0x00,0xe1,0xf2]
|
||||
@ CHECK: vshr.s64 q8, q8, #63 @ encoding: [0xf0,0x00,0xc1,0xf2]
|
||||
|
||||
@ implied destination operand variants.
|
||||
vshr.u8 d16, #7
|
||||
vshr.u16 d16, #15
|
||||
vshr.u32 d16, #31
|
||||
vshr.u64 d16, #63
|
||||
vshr.u8 q8, #7
|
||||
vshr.u16 q8, #15
|
||||
vshr.u32 q8, #31
|
||||
vshr.u64 q8, #63
|
||||
vshr.s8 d16, #7
|
||||
vshr.s16 d16, #15
|
||||
vshr.s32 d16, #31
|
||||
vshr.s64 d16, #63
|
||||
vshr.s8 q8, #7
|
||||
vshr.s16 q8, #15
|
||||
vshr.s32 q8, #31
|
||||
vshr.s64 q8, #63
|
||||
|
||||
@ CHECK: vshr.u8 d16, d16, #7 @ encoding: [0x30,0x00,0xc9,0xf3]
|
||||
@ CHECK: vshr.u16 d16, d16, #15 @ encoding: [0x30,0x00,0xd1,0xf3]
|
||||
@ CHECK: vshr.u32 d16, d16, #31 @ encoding: [0x30,0x00,0xe1,0xf3]
|
||||
@ CHECK: vshr.u64 d16, d16, #63 @ encoding: [0xb0,0x00,0xc1,0xf3]
|
||||
@ CHECK: vshr.u8 q8, q8, #7 @ encoding: [0x70,0x00,0xc9,0xf3]
|
||||
@ CHECK: vshr.u16 q8, q8, #15 @ encoding: [0x70,0x00,0xd1,0xf3]
|
||||
@ CHECK: vshr.u32 q8, q8, #31 @ encoding: [0x70,0x00,0xe1,0xf3]
|
||||
@ CHECK: vshr.u64 q8, q8, #63 @ encoding: [0xf0,0x00,0xc1,0xf3]
|
||||
@ CHECK: vshr.s8 d16, d16, #7 @ encoding: [0x30,0x00,0xc9,0xf2]
|
||||
@ CHECK: vshr.s16 d16, d16, #15 @ encoding: [0x30,0x00,0xd1,0xf2]
|
||||
@ CHECK: vshr.s32 d16, d16, #31 @ encoding: [0x30,0x00,0xe1,0xf2]
|
||||
@ CHECK: vshr.s64 d16, d16, #63 @ encoding: [0xb0,0x00,0xc1,0xf2]
|
||||
@ CHECK: vshr.s8 q8, q8, #7 @ encoding: [0x70,0x00,0xc9,0xf2]
|
||||
@ CHECK: vshr.s16 q8, q8, #15 @ encoding: [0x70,0x00,0xd1,0xf2]
|
||||
@ CHECK: vshr.s32 q8, q8, #31 @ encoding: [0x70,0x00,0xe1,0xf2]
|
||||
@ CHECK: vshr.s64 q8, q8, #63 @ encoding: [0xf0,0x00,0xc1,0xf2]
|
||||
|
||||
@ CHECK: vsra.u8 d16, d16, #7 @ encoding: [0x30,0x01,0xc9,0xf3]
|
||||
vsra.u8 d16, d16, #7
|
||||
@ CHECK: vsra.u16 d16, d16, #15 @ encoding: [0x30,0x01,0xd1,0xf3]
|
||||
|
Loading…
Reference in New Issue
Block a user