mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 18:54:02 +01:00
[Hexagon] Switch to parameterized register classes for HVX
This removes the duplicate HVX instruction set for the 128-byte mode. Single instruction set now works for both modes (64- and 128-byte). llvm-svn: 313362
This commit is contained in:
parent
9739f59304
commit
325bb38667
@ -106,7 +106,7 @@ static DecodeStatus DecodeGeneralSubRegsRegisterClass(MCInst &Inst,
|
||||
static DecodeStatus DecodeIntRegsLow8RegisterClass(MCInst &Inst, unsigned RegNo,
|
||||
uint64_t Address,
|
||||
const void *Decoder);
|
||||
static DecodeStatus DecodeVectorRegsRegisterClass(MCInst &Inst, unsigned RegNo,
|
||||
static DecodeStatus DecodeHvxVRRegisterClass(MCInst &Inst, unsigned RegNo,
|
||||
uint64_t Address,
|
||||
const void *Decoder);
|
||||
static DecodeStatus DecodeDoubleRegsRegisterClass(MCInst &Inst, unsigned RegNo,
|
||||
@ -115,13 +115,13 @@ static DecodeStatus DecodeDoubleRegsRegisterClass(MCInst &Inst, unsigned RegNo,
|
||||
static DecodeStatus
|
||||
DecodeGeneralDoubleLow8RegsRegisterClass(MCInst &Inst, unsigned RegNo,
|
||||
uint64_t Address, const void *Decoder);
|
||||
static DecodeStatus DecodeVecDblRegsRegisterClass(MCInst &Inst, unsigned RegNo,
|
||||
static DecodeStatus DecodeHvxWRRegisterClass(MCInst &Inst, unsigned RegNo,
|
||||
uint64_t Address,
|
||||
const void *Decoder);
|
||||
static DecodeStatus DecodePredRegsRegisterClass(MCInst &Inst, unsigned RegNo,
|
||||
uint64_t Address,
|
||||
const void *Decoder);
|
||||
static DecodeStatus DecodeVecPredRegsRegisterClass(MCInst &Inst, unsigned RegNo,
|
||||
static DecodeStatus DecodeHvxQRRegisterClass(MCInst &Inst, unsigned RegNo,
|
||||
uint64_t Address,
|
||||
const void *Decoder);
|
||||
static DecodeStatus DecodeCtrRegsRegisterClass(MCInst &Inst, unsigned RegNo,
|
||||
@ -481,10 +481,10 @@ static DecodeStatus DecodeGeneralSubRegsRegisterClass(MCInst &Inst,
|
||||
return DecodeRegisterClass(Inst, RegNo, GeneralSubRegDecoderTable);
|
||||
}
|
||||
|
||||
static DecodeStatus DecodeVectorRegsRegisterClass(MCInst &Inst, unsigned RegNo,
|
||||
static DecodeStatus DecodeHvxVRRegisterClass(MCInst &Inst, unsigned RegNo,
|
||||
uint64_t /*Address*/,
|
||||
const void *Decoder) {
|
||||
static const MCPhysReg VecRegDecoderTable[] = {
|
||||
static const MCPhysReg HvxVRDecoderTable[] = {
|
||||
Hexagon::V0, Hexagon::V1, Hexagon::V2, Hexagon::V3, Hexagon::V4,
|
||||
Hexagon::V5, Hexagon::V6, Hexagon::V7, Hexagon::V8, Hexagon::V9,
|
||||
Hexagon::V10, Hexagon::V11, Hexagon::V12, Hexagon::V13, Hexagon::V14,
|
||||
@ -493,7 +493,7 @@ static DecodeStatus DecodeVectorRegsRegisterClass(MCInst &Inst, unsigned RegNo,
|
||||
Hexagon::V25, Hexagon::V26, Hexagon::V27, Hexagon::V28, Hexagon::V29,
|
||||
Hexagon::V30, Hexagon::V31};
|
||||
|
||||
return DecodeRegisterClass(Inst, RegNo, VecRegDecoderTable);
|
||||
return DecodeRegisterClass(Inst, RegNo, HvxVRDecoderTable);
|
||||
}
|
||||
|
||||
static DecodeStatus DecodeDoubleRegsRegisterClass(MCInst &Inst, unsigned RegNo,
|
||||
@ -517,16 +517,16 @@ static DecodeStatus DecodeGeneralDoubleLow8RegsRegisterClass(
|
||||
return DecodeRegisterClass(Inst, RegNo, GeneralDoubleLow8RegDecoderTable);
|
||||
}
|
||||
|
||||
static DecodeStatus DecodeVecDblRegsRegisterClass(MCInst &Inst, unsigned RegNo,
|
||||
static DecodeStatus DecodeHvxWRRegisterClass(MCInst &Inst, unsigned RegNo,
|
||||
uint64_t /*Address*/,
|
||||
const void *Decoder) {
|
||||
static const MCPhysReg VecDblRegDecoderTable[] = {
|
||||
static const MCPhysReg HvxWRDecoderTable[] = {
|
||||
Hexagon::W0, Hexagon::W1, Hexagon::W2, Hexagon::W3,
|
||||
Hexagon::W4, Hexagon::W5, Hexagon::W6, Hexagon::W7,
|
||||
Hexagon::W8, Hexagon::W9, Hexagon::W10, Hexagon::W11,
|
||||
Hexagon::W12, Hexagon::W13, Hexagon::W14, Hexagon::W15};
|
||||
|
||||
return (DecodeRegisterClass(Inst, RegNo >> 1, VecDblRegDecoderTable));
|
||||
return (DecodeRegisterClass(Inst, RegNo >> 1, HvxWRDecoderTable));
|
||||
}
|
||||
|
||||
static DecodeStatus DecodePredRegsRegisterClass(MCInst &Inst, unsigned RegNo,
|
||||
@ -538,13 +538,13 @@ static DecodeStatus DecodePredRegsRegisterClass(MCInst &Inst, unsigned RegNo,
|
||||
return DecodeRegisterClass(Inst, RegNo, PredRegDecoderTable);
|
||||
}
|
||||
|
||||
static DecodeStatus DecodeVecPredRegsRegisterClass(MCInst &Inst, unsigned RegNo,
|
||||
static DecodeStatus DecodeHvxQRRegisterClass(MCInst &Inst, unsigned RegNo,
|
||||
uint64_t /*Address*/,
|
||||
const void *Decoder) {
|
||||
static const MCPhysReg VecPredRegDecoderTable[] = {Hexagon::Q0, Hexagon::Q1,
|
||||
static const MCPhysReg HvxQRDecoderTable[] = {Hexagon::Q0, Hexagon::Q1,
|
||||
Hexagon::Q2, Hexagon::Q3};
|
||||
|
||||
return DecodeRegisterClass(Inst, RegNo, VecPredRegDecoderTable);
|
||||
return DecodeRegisterClass(Inst, RegNo, HvxQRDecoderTable);
|
||||
}
|
||||
|
||||
static DecodeStatus DecodeCtrRegsRegisterClass(MCInst &Inst, unsigned RegNo,
|
||||
|
@ -44,6 +44,9 @@ def UseHVXSgl : Predicate<"HST->useHVXSglOps()">;
|
||||
def UseHVX : Predicate<"HST->useHVXSglOps() ||HST->useHVXDblOps()">,
|
||||
AssemblerPredicate<"ExtensionHVX">;
|
||||
|
||||
def Hvx64 : HwMode<"+hvx,-hvx-double">;
|
||||
def Hvx128 : HwMode<"+hvx,+hvx-double">;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Classes used for relation maps.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
@ -281,10 +281,8 @@ void HexagonAsmPrinter::HexagonProcessInstruction(MCInst &Inst,
|
||||
MCInst &MappedInst = static_cast <MCInst &>(Inst);
|
||||
const MCRegisterInfo *RI = OutStreamer->getContext().getRegisterInfo();
|
||||
const MachineFunction &MF = *MI.getParent()->getParent();
|
||||
const auto &HST = MF.getSubtarget<HexagonSubtarget>();
|
||||
const auto &VecRC = HST.useHVXSglOps() ? Hexagon::VectorRegsRegClass
|
||||
: Hexagon::VectorRegs128BRegClass;
|
||||
unsigned VectorSize = HST.getRegisterInfo()->getSpillSize(VecRC);
|
||||
auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
|
||||
unsigned VectorSize = HRI.getRegSizeInBits(Hexagon::HvxVRRegClass) / 8;
|
||||
|
||||
switch (Inst.getOpcode()) {
|
||||
default: return;
|
||||
@ -605,8 +603,7 @@ void HexagonAsmPrinter::HexagonProcessInstruction(MCInst &Inst,
|
||||
return;
|
||||
}
|
||||
|
||||
case Hexagon::V6_vd0:
|
||||
case Hexagon::V6_vd0_128B: {
|
||||
case Hexagon::V6_vd0: {
|
||||
MCInst TmpInst;
|
||||
assert(Inst.getOperand(0).isReg() &&
|
||||
"Expected register and none was found");
|
||||
@ -626,13 +623,6 @@ void HexagonAsmPrinter::HexagonProcessInstruction(MCInst &Inst,
|
||||
case Hexagon::V6_vL32b_nt_pi:
|
||||
case Hexagon::V6_vL32b_nt_tmp_pi:
|
||||
case Hexagon::V6_vL32b_tmp_pi:
|
||||
case Hexagon::V6_vL32Ub_pi_128B:
|
||||
case Hexagon::V6_vL32b_cur_pi_128B:
|
||||
case Hexagon::V6_vL32b_nt_cur_pi_128B:
|
||||
case Hexagon::V6_vL32b_pi_128B:
|
||||
case Hexagon::V6_vL32b_nt_pi_128B:
|
||||
case Hexagon::V6_vL32b_nt_tmp_pi_128B:
|
||||
case Hexagon::V6_vL32b_tmp_pi_128B:
|
||||
MappedInst = ScaleVectorOffset(Inst, 3, VectorSize, OutContext);
|
||||
return;
|
||||
|
||||
@ -643,13 +633,6 @@ void HexagonAsmPrinter::HexagonProcessInstruction(MCInst &Inst,
|
||||
case Hexagon::V6_vL32b_nt_cur_ai:
|
||||
case Hexagon::V6_vL32b_nt_tmp_ai:
|
||||
case Hexagon::V6_vL32b_tmp_ai:
|
||||
case Hexagon::V6_vL32Ub_ai_128B:
|
||||
case Hexagon::V6_vL32b_ai_128B:
|
||||
case Hexagon::V6_vL32b_cur_ai_128B:
|
||||
case Hexagon::V6_vL32b_nt_ai_128B:
|
||||
case Hexagon::V6_vL32b_nt_cur_ai_128B:
|
||||
case Hexagon::V6_vL32b_nt_tmp_ai_128B:
|
||||
case Hexagon::V6_vL32b_tmp_ai_128B:
|
||||
MappedInst = ScaleVectorOffset(Inst, 2, VectorSize, OutContext);
|
||||
return;
|
||||
|
||||
@ -658,11 +641,6 @@ void HexagonAsmPrinter::HexagonProcessInstruction(MCInst &Inst,
|
||||
case Hexagon::V6_vS32b_nt_new_pi:
|
||||
case Hexagon::V6_vS32b_nt_pi:
|
||||
case Hexagon::V6_vS32b_pi:
|
||||
case Hexagon::V6_vS32Ub_pi_128B:
|
||||
case Hexagon::V6_vS32b_new_pi_128B:
|
||||
case Hexagon::V6_vS32b_nt_new_pi_128B:
|
||||
case Hexagon::V6_vS32b_nt_pi_128B:
|
||||
case Hexagon::V6_vS32b_pi_128B:
|
||||
MappedInst = ScaleVectorOffset(Inst, 2, VectorSize, OutContext);
|
||||
return;
|
||||
|
||||
@ -671,11 +649,6 @@ void HexagonAsmPrinter::HexagonProcessInstruction(MCInst &Inst,
|
||||
case Hexagon::V6_vS32b_new_ai:
|
||||
case Hexagon::V6_vS32b_nt_ai:
|
||||
case Hexagon::V6_vS32b_nt_new_ai:
|
||||
case Hexagon::V6_vS32Ub_ai_128B:
|
||||
case Hexagon::V6_vS32b_ai_128B:
|
||||
case Hexagon::V6_vS32b_new_ai_128B:
|
||||
case Hexagon::V6_vS32b_nt_ai_128B:
|
||||
case Hexagon::V6_vS32b_nt_new_ai_128B:
|
||||
MappedInst = ScaleVectorOffset(Inst, 1, VectorSize, OutContext);
|
||||
return;
|
||||
|
||||
@ -691,18 +664,6 @@ void HexagonAsmPrinter::HexagonProcessInstruction(MCInst &Inst,
|
||||
case Hexagon::V6_vL32b_pred_pi:
|
||||
case Hexagon::V6_vL32b_tmp_npred_pi:
|
||||
case Hexagon::V6_vL32b_tmp_pred_pi:
|
||||
case Hexagon::V6_vL32b_cur_npred_pi_128B:
|
||||
case Hexagon::V6_vL32b_cur_pred_pi_128B:
|
||||
case Hexagon::V6_vL32b_npred_pi_128B:
|
||||
case Hexagon::V6_vL32b_nt_cur_npred_pi_128B:
|
||||
case Hexagon::V6_vL32b_nt_cur_pred_pi_128B:
|
||||
case Hexagon::V6_vL32b_nt_npred_pi_128B:
|
||||
case Hexagon::V6_vL32b_nt_pred_pi_128B:
|
||||
case Hexagon::V6_vL32b_nt_tmp_npred_pi_128B:
|
||||
case Hexagon::V6_vL32b_nt_tmp_pred_pi_128B:
|
||||
case Hexagon::V6_vL32b_pred_pi_128B:
|
||||
case Hexagon::V6_vL32b_tmp_npred_pi_128B:
|
||||
case Hexagon::V6_vL32b_tmp_pred_pi_128B:
|
||||
MappedInst = ScaleVectorOffset(Inst, 4, VectorSize, OutContext);
|
||||
return;
|
||||
|
||||
@ -718,18 +679,6 @@ void HexagonAsmPrinter::HexagonProcessInstruction(MCInst &Inst,
|
||||
case Hexagon::V6_vL32b_pred_ai:
|
||||
case Hexagon::V6_vL32b_tmp_npred_ai:
|
||||
case Hexagon::V6_vL32b_tmp_pred_ai:
|
||||
case Hexagon::V6_vL32b_cur_npred_ai_128B:
|
||||
case Hexagon::V6_vL32b_cur_pred_ai_128B:
|
||||
case Hexagon::V6_vL32b_npred_ai_128B:
|
||||
case Hexagon::V6_vL32b_nt_cur_npred_ai_128B:
|
||||
case Hexagon::V6_vL32b_nt_cur_pred_ai_128B:
|
||||
case Hexagon::V6_vL32b_nt_npred_ai_128B:
|
||||
case Hexagon::V6_vL32b_nt_pred_ai_128B:
|
||||
case Hexagon::V6_vL32b_nt_tmp_npred_ai_128B:
|
||||
case Hexagon::V6_vL32b_nt_tmp_pred_ai_128B:
|
||||
case Hexagon::V6_vL32b_pred_ai_128B:
|
||||
case Hexagon::V6_vL32b_tmp_npred_ai_128B:
|
||||
case Hexagon::V6_vL32b_tmp_pred_ai_128B:
|
||||
MappedInst = ScaleVectorOffset(Inst, 3, VectorSize, OutContext);
|
||||
return;
|
||||
|
||||
@ -747,20 +696,6 @@ void HexagonAsmPrinter::HexagonProcessInstruction(MCInst &Inst,
|
||||
case Hexagon::V6_vS32b_nt_qpred_pi:
|
||||
case Hexagon::V6_vS32b_pred_pi:
|
||||
case Hexagon::V6_vS32b_qpred_pi:
|
||||
case Hexagon::V6_vS32Ub_npred_pi_128B:
|
||||
case Hexagon::V6_vS32Ub_pred_pi_128B:
|
||||
case Hexagon::V6_vS32b_new_npred_pi_128B:
|
||||
case Hexagon::V6_vS32b_new_pred_pi_128B:
|
||||
case Hexagon::V6_vS32b_npred_pi_128B:
|
||||
case Hexagon::V6_vS32b_nqpred_pi_128B:
|
||||
case Hexagon::V6_vS32b_nt_new_npred_pi_128B:
|
||||
case Hexagon::V6_vS32b_nt_new_pred_pi_128B:
|
||||
case Hexagon::V6_vS32b_nt_npred_pi_128B:
|
||||
case Hexagon::V6_vS32b_nt_nqpred_pi_128B:
|
||||
case Hexagon::V6_vS32b_nt_pred_pi_128B:
|
||||
case Hexagon::V6_vS32b_nt_qpred_pi_128B:
|
||||
case Hexagon::V6_vS32b_pred_pi_128B:
|
||||
case Hexagon::V6_vS32b_qpred_pi_128B:
|
||||
MappedInst = ScaleVectorOffset(Inst, 3, VectorSize, OutContext);
|
||||
return;
|
||||
|
||||
@ -778,20 +713,6 @@ void HexagonAsmPrinter::HexagonProcessInstruction(MCInst &Inst,
|
||||
case Hexagon::V6_vS32b_nt_qpred_ai:
|
||||
case Hexagon::V6_vS32b_pred_ai:
|
||||
case Hexagon::V6_vS32b_qpred_ai:
|
||||
case Hexagon::V6_vS32Ub_npred_ai_128B:
|
||||
case Hexagon::V6_vS32Ub_pred_ai_128B:
|
||||
case Hexagon::V6_vS32b_new_npred_ai_128B:
|
||||
case Hexagon::V6_vS32b_new_pred_ai_128B:
|
||||
case Hexagon::V6_vS32b_npred_ai_128B:
|
||||
case Hexagon::V6_vS32b_nqpred_ai_128B:
|
||||
case Hexagon::V6_vS32b_nt_new_npred_ai_128B:
|
||||
case Hexagon::V6_vS32b_nt_new_pred_ai_128B:
|
||||
case Hexagon::V6_vS32b_nt_npred_ai_128B:
|
||||
case Hexagon::V6_vS32b_nt_nqpred_ai_128B:
|
||||
case Hexagon::V6_vS32b_nt_pred_ai_128B:
|
||||
case Hexagon::V6_vS32b_nt_qpred_ai_128B:
|
||||
case Hexagon::V6_vS32b_pred_ai_128B:
|
||||
case Hexagon::V6_vS32b_qpred_ai_128B:
|
||||
MappedInst = ScaleVectorOffset(Inst, 2, VectorSize, OutContext);
|
||||
return;
|
||||
}
|
||||
|
@ -420,8 +420,7 @@ bool HexagonBitSimplify::getSubregMask(const BitTracker::RegisterRef &RR,
|
||||
|
||||
switch (RC->getID()) {
|
||||
case Hexagon::DoubleRegsRegClassID:
|
||||
case Hexagon::VecDblRegsRegClassID:
|
||||
case Hexagon::VecDblRegs128BRegClassID:
|
||||
case Hexagon::HvxWRRegClassID:
|
||||
Width = MRI.getTargetRegisterInfo()->getRegSizeInBits(*RC) / 2;
|
||||
if (RR.Sub == Hexagon::isub_hi || RR.Sub == Hexagon::vsub_hi)
|
||||
Begin = Width;
|
||||
@ -918,12 +917,9 @@ const TargetRegisterClass *HexagonBitSimplify::getFinalVRegClass(
|
||||
case Hexagon::DoubleRegsRegClassID:
|
||||
VerifySR(RC, RR.Sub);
|
||||
return &Hexagon::IntRegsRegClass;
|
||||
case Hexagon::VecDblRegsRegClassID:
|
||||
case Hexagon::HvxWRRegClassID:
|
||||
VerifySR(RC, RR.Sub);
|
||||
return &Hexagon::VectorRegsRegClass;
|
||||
case Hexagon::VecDblRegs128BRegClassID:
|
||||
VerifySR(RC, RR.Sub);
|
||||
return &Hexagon::VectorRegs128BRegClass;
|
||||
return &Hexagon::HvxVRRegClass;
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
@ -1627,8 +1623,7 @@ bool CopyGeneration::processBlock(MachineBasicBlock &B,
|
||||
}
|
||||
|
||||
if (FRC == &Hexagon::DoubleRegsRegClass ||
|
||||
FRC == &Hexagon::VecDblRegsRegClass ||
|
||||
FRC == &Hexagon::VecDblRegs128BRegClass) {
|
||||
FRC == &Hexagon::HvxWRRegClass) {
|
||||
// Try to generate REG_SEQUENCE.
|
||||
unsigned SubLo = HRI.getHexagonSubRegIndex(FRC, Hexagon::ps_sub_lo);
|
||||
unsigned SubHi = HRI.getHexagonSubRegIndex(FRC, Hexagon::ps_sub_hi);
|
||||
@ -1665,7 +1660,6 @@ bool CopyPropagation::isCopyReg(unsigned Opc, bool NoConv) {
|
||||
case Hexagon::A2_tfrp:
|
||||
case Hexagon::A2_combinew:
|
||||
case Hexagon::V6_vcombine:
|
||||
case Hexagon::V6_vcombine_128B:
|
||||
return NoConv;
|
||||
default:
|
||||
break;
|
||||
@ -1704,8 +1698,7 @@ bool CopyPropagation::propagateRegCopy(MachineInstr &MI) {
|
||||
break;
|
||||
}
|
||||
case Hexagon::A2_combinew:
|
||||
case Hexagon::V6_vcombine:
|
||||
case Hexagon::V6_vcombine_128B: {
|
||||
case Hexagon::V6_vcombine: {
|
||||
const TargetRegisterClass *RC = MRI.getRegClass(RD.Reg);
|
||||
unsigned SubLo = HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_lo);
|
||||
unsigned SubHi = HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_hi);
|
||||
|
@ -102,8 +102,7 @@ BT::BitMask HexagonEvaluator::mask(unsigned Reg, unsigned Sub) const {
|
||||
bool IsSubLo = (Sub == HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_lo));
|
||||
switch (ID) {
|
||||
case DoubleRegsRegClassID:
|
||||
case VecDblRegsRegClassID:
|
||||
case VecDblRegs128BRegClassID:
|
||||
case HvxWRRegClassID:
|
||||
return IsSubLo ? BT::BitMask(0, RW-1)
|
||||
: BT::BitMask(RW, 2*RW-1);
|
||||
default:
|
||||
@ -703,7 +702,6 @@ bool HexagonEvaluator::evaluate(const MachineInstr &MI,
|
||||
case A4_combineri:
|
||||
case A2_combinew:
|
||||
case V6_vcombine:
|
||||
case V6_vcombine_128B:
|
||||
assert(W0 % 2 == 0);
|
||||
return rr0(cop(2, W0/2).cat(cop(1, W0/2)), Outputs);
|
||||
case A2_combine_ll:
|
||||
|
@ -161,7 +161,6 @@ static bool isCombinableInstType(MachineInstr &MI, const HexagonInstrInfo *TII,
|
||||
}
|
||||
|
||||
case Hexagon::V6_vassign:
|
||||
case Hexagon::V6_vassign_128B:
|
||||
return true;
|
||||
|
||||
default:
|
||||
@ -231,8 +230,7 @@ static bool isEvenReg(unsigned Reg) {
|
||||
assert(TargetRegisterInfo::isPhysicalRegister(Reg));
|
||||
if (Hexagon::IntRegsRegClass.contains(Reg))
|
||||
return (Reg - Hexagon::R0) % 2 == 0;
|
||||
if (Hexagon::VectorRegsRegClass.contains(Reg) ||
|
||||
Hexagon::VectorRegs128BRegClass.contains(Reg))
|
||||
if (Hexagon::HvxVRRegClass.contains(Reg))
|
||||
return (Reg - Hexagon::V0) % 2 == 0;
|
||||
llvm_unreachable("Invalid register");
|
||||
}
|
||||
@ -593,12 +591,9 @@ void HexagonCopyToCombine::combine(MachineInstr &I1, MachineInstr &I2,
|
||||
if (Hexagon::IntRegsRegClass.contains(LoRegDef)) {
|
||||
SuperRC = &Hexagon::DoubleRegsRegClass;
|
||||
SubLo = Hexagon::isub_lo;
|
||||
} else if (Hexagon::VectorRegsRegClass.contains(LoRegDef)) {
|
||||
} else if (Hexagon::HvxVRRegClass.contains(LoRegDef)) {
|
||||
assert(ST->useHVXOps());
|
||||
if (ST->useHVXSglOps())
|
||||
SuperRC = &Hexagon::VecDblRegsRegClass;
|
||||
else
|
||||
SuperRC = &Hexagon::VecDblRegs128BRegClass;
|
||||
SuperRC = &Hexagon::HvxWRRegClass;
|
||||
SubLo = Hexagon::vsub_lo;
|
||||
} else
|
||||
llvm_unreachable("Unexpected register class");
|
||||
@ -875,12 +870,9 @@ void HexagonCopyToCombine::emitCombineRR(MachineBasicBlock::iterator &InsertPt,
|
||||
unsigned NewOpc;
|
||||
if (Hexagon::DoubleRegsRegClass.contains(DoubleDestReg)) {
|
||||
NewOpc = Hexagon::A2_combinew;
|
||||
} else if (Hexagon::VecDblRegsRegClass.contains(DoubleDestReg)) {
|
||||
} else if (Hexagon::HvxWRRegClass.contains(DoubleDestReg)) {
|
||||
assert(ST->useHVXOps());
|
||||
if (ST->useHVXSglOps())
|
||||
NewOpc = Hexagon::V6_vcombine;
|
||||
else
|
||||
NewOpc = Hexagon::V6_vcombine_128B;
|
||||
} else
|
||||
llvm_unreachable("Unexpected register");
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -140,515 +140,263 @@ def S4_storeirif_zomapAlias : InstAlias<"if (!$Pv4) memw($Rs32)=#$II", (S4_store
|
||||
def S4_storeirifnew_zomapAlias : InstAlias<"if (!$Pv4.new) memw($Rs32)=#$II", (S4_storeirifnew_io PredRegs:$Pv4, IntRegs:$Rs32, 0, s32_0Imm:$II)>;
|
||||
def S4_storeirit_zomapAlias : InstAlias<"if ($Pv4) memw($Rs32)=#$II", (S4_storeirit_io PredRegs:$Pv4, IntRegs:$Rs32, 0, s32_0Imm:$II)>;
|
||||
def S4_storeiritnew_zomapAlias : InstAlias<"if ($Pv4.new) memw($Rs32)=#$II", (S4_storeiritnew_io PredRegs:$Pv4, IntRegs:$Rs32, 0, s32_0Imm:$II)>;
|
||||
def V6_MAP_equbAlias : InstAlias<"$Qd4=vcmp.eq($Vu32.ub,$Vv32.ub)", (V6_veqb VecPredRegs:$Qd4, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_MAP_equb_128BAlias : InstAlias<"$Qd4=vcmp.eq($Vu32.ub,$Vv32.ub)", (V6_veqb VecPredRegs:$Qd4, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_MAP_equb_andAlias : InstAlias<"$Qx4&=vcmp.eq($Vu32.ub,$Vv32.ub)", (V6_veqb_and VecPredRegs:$Qx4, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_MAP_equb_and_128BAlias : InstAlias<"$Qx4&=vcmp.eq($Vu32.ub,$Vv32.ub)", (V6_veqb_and VecPredRegs:$Qx4, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_MAP_equb_iorAlias : InstAlias<"$Qx4|=vcmp.eq($Vu32.ub,$Vv32.ub)", (V6_veqb_or VecPredRegs:$Qx4, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_MAP_equb_ior_128BAlias : InstAlias<"$Qx4|=vcmp.eq($Vu32.ub,$Vv32.ub)", (V6_veqb_or VecPredRegs:$Qx4, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_MAP_equb_xorAlias : InstAlias<"$Qx4^=vcmp.eq($Vu32.ub,$Vv32.ub)", (V6_veqb_xor VecPredRegs:$Qx4, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_MAP_equb_xor_128BAlias : InstAlias<"$Qx4^=vcmp.eq($Vu32.ub,$Vv32.ub)", (V6_veqb_xor VecPredRegs:$Qx4, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_MAP_equhAlias : InstAlias<"$Qd4=vcmp.eq($Vu32.uh,$Vv32.uh)", (V6_veqh VecPredRegs:$Qd4, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_MAP_equh_128BAlias : InstAlias<"$Qd4=vcmp.eq($Vu32.uh,$Vv32.uh)", (V6_veqh VecPredRegs:$Qd4, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_MAP_equh_andAlias : InstAlias<"$Qx4&=vcmp.eq($Vu32.uh,$Vv32.uh)", (V6_veqh_and VecPredRegs:$Qx4, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_MAP_equh_and_128BAlias : InstAlias<"$Qx4&=vcmp.eq($Vu32.uh,$Vv32.uh)", (V6_veqh_and VecPredRegs:$Qx4, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_MAP_equh_iorAlias : InstAlias<"$Qx4|=vcmp.eq($Vu32.uh,$Vv32.uh)", (V6_veqh_or VecPredRegs:$Qx4, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_MAP_equh_ior_128BAlias : InstAlias<"$Qx4|=vcmp.eq($Vu32.uh,$Vv32.uh)", (V6_veqh_or VecPredRegs:$Qx4, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_MAP_equh_xorAlias : InstAlias<"$Qx4^=vcmp.eq($Vu32.uh,$Vv32.uh)", (V6_veqh_xor VecPredRegs:$Qx4, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_MAP_equh_xor_128BAlias : InstAlias<"$Qx4^=vcmp.eq($Vu32.uh,$Vv32.uh)", (V6_veqh_xor VecPredRegs:$Qx4, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_MAP_equwAlias : InstAlias<"$Qd4=vcmp.eq($Vu32.uw,$Vv32.uw)", (V6_veqw VecPredRegs:$Qd4, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_MAP_equw_128BAlias : InstAlias<"$Qd4=vcmp.eq($Vu32.uw,$Vv32.uw)", (V6_veqw VecPredRegs:$Qd4, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_MAP_equw_andAlias : InstAlias<"$Qx4&=vcmp.eq($Vu32.uw,$Vv32.uw)", (V6_veqw_and VecPredRegs:$Qx4, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_MAP_equw_and_128BAlias : InstAlias<"$Qx4&=vcmp.eq($Vu32.uw,$Vv32.uw)", (V6_veqw_and VecPredRegs:$Qx4, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_MAP_equw_iorAlias : InstAlias<"$Qx4|=vcmp.eq($Vu32.uw,$Vv32.uw)", (V6_veqw_or VecPredRegs:$Qx4, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_MAP_equw_ior_128BAlias : InstAlias<"$Qx4|=vcmp.eq($Vu32.uw,$Vv32.uw)", (V6_veqw_or VecPredRegs:$Qx4, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_MAP_equw_xorAlias : InstAlias<"$Qx4^=vcmp.eq($Vu32.uw,$Vv32.uw)", (V6_veqw_xor VecPredRegs:$Qx4, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_MAP_equw_xor_128BAlias : InstAlias<"$Qx4^=vcmp.eq($Vu32.uw,$Vv32.uw)", (V6_veqw_xor VecPredRegs:$Qx4, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_extractw_altAlias : InstAlias<"$Rd32.w=vextract($Vu32,$Rs32)", (V6_extractw IntRegs:$Rd32, VectorRegs:$Vu32, IntRegs:$Rs32)>, Requires<[UseHVX]>;
|
||||
def V6_extractw_alt_128BAlias : InstAlias<"$Rd32.w=vextract($Vu32,$Rs32)", (V6_extractw IntRegs:$Rd32, VectorRegs:$Vu32, IntRegs:$Rs32)>, Requires<[UseHVX]>;
|
||||
def V6_ld0Alias : InstAlias<"$Vd32=vmem($Rt32)", (V6_vL32b_ai VectorRegs:$Vd32, IntRegs:$Rt32, 0)>, Requires<[UseHVX]>;
|
||||
def V6_ld0_128BAlias : InstAlias<"$Vd32=vmem($Rt32)", (V6_vL32b_ai VectorRegs:$Vd32, IntRegs:$Rt32, 0)>, Requires<[UseHVX]>;
|
||||
def V6_ldnt0Alias : InstAlias<"$Vd32=vmem($Rt32):nt", (V6_vL32b_nt_ai VectorRegs:$Vd32, IntRegs:$Rt32, 0)>, Requires<[UseHVX]>;
|
||||
def V6_ldnt0_128BAlias : InstAlias<"$Vd32=vmem($Rt32):nt", (V6_vL32b_nt_ai VectorRegs:$Vd32, IntRegs:$Rt32, 0)>, Requires<[UseHVX]>;
|
||||
def V6_ldu0Alias : InstAlias<"$Vd32=vmemu($Rt32)", (V6_vL32Ub_ai VectorRegs:$Vd32, IntRegs:$Rt32, 0)>, Requires<[UseHVX]>;
|
||||
def V6_ldu0_128BAlias : InstAlias<"$Vd32=vmemu($Rt32)", (V6_vL32Ub_ai VectorRegs:$Vd32, IntRegs:$Rt32, 0)>, Requires<[UseHVX]>;
|
||||
def V6_st0Alias : InstAlias<"vmem($Rt32)=$Vs32", (V6_vS32b_ai IntRegs:$Rt32, 0, VectorRegs:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_st0_128BAlias : InstAlias<"vmem($Rt32)=$Vs32", (V6_vS32b_ai IntRegs:$Rt32, 0, VectorRegs:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_stn0Alias : InstAlias<"vmem($Rt32)=$Os8.new", (V6_vS32b_new_ai IntRegs:$Rt32, 0, VectorRegs:$Os8)>, Requires<[UseHVX]>;
|
||||
def V6_stn0_128BAlias : InstAlias<"vmem($Rt32)=$Os8.new", (V6_vS32b_new_ai IntRegs:$Rt32, 0, VectorRegs:$Os8)>, Requires<[UseHVX]>;
|
||||
def V6_stnnt0Alias : InstAlias<"vmem($Rt32):nt=$Os8.new", (V6_vS32b_nt_new_ai IntRegs:$Rt32, 0, VectorRegs:$Os8)>, Requires<[UseHVX]>;
|
||||
def V6_stnnt0_128BAlias : InstAlias<"vmem($Rt32):nt=$Os8.new", (V6_vS32b_nt_new_ai IntRegs:$Rt32, 0, VectorRegs:$Os8)>, Requires<[UseHVX]>;
|
||||
def V6_stnp0Alias : InstAlias<"if (!$Pv4) vmem($Rt32)=$Vs32", (V6_vS32b_npred_ai PredRegs:$Pv4, IntRegs:$Rt32, 0, VectorRegs:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_stnp0_128BAlias : InstAlias<"if (!$Pv4) vmem($Rt32)=$Vs32", (V6_vS32b_npred_ai PredRegs:$Pv4, IntRegs:$Rt32, 0, VectorRegs:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_stnpnt0Alias : InstAlias<"if (!$Pv4) vmem($Rt32):nt=$Vs32", (V6_vS32b_nt_npred_ai PredRegs:$Pv4, IntRegs:$Rt32, 0, VectorRegs:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_stnpnt0_128BAlias : InstAlias<"if (!$Pv4) vmem($Rt32):nt=$Vs32", (V6_vS32b_nt_npred_ai PredRegs:$Pv4, IntRegs:$Rt32, 0, VectorRegs:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_stnq0Alias : InstAlias<"if (!$Qv4) vmem($Rt32)=$Vs32", (V6_vS32b_nqpred_ai VecPredRegs:$Qv4, IntRegs:$Rt32, 0, VectorRegs:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_stnq0_128BAlias : InstAlias<"if (!$Qv4) vmem($Rt32)=$Vs32", (V6_vS32b_nqpred_ai VecPredRegs:$Qv4, IntRegs:$Rt32, 0, VectorRegs:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_stnqnt0Alias : InstAlias<"if (!$Qv4) vmem($Rt32):nt=$Vs32", (V6_vS32b_nt_nqpred_ai VecPredRegs:$Qv4, IntRegs:$Rt32, 0, VectorRegs:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_stnqnt0_128BAlias : InstAlias<"if (!$Qv4) vmem($Rt32):nt=$Vs32", (V6_vS32b_nt_nqpred_ai VecPredRegs:$Qv4, IntRegs:$Rt32, 0, VectorRegs:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_stnt0Alias : InstAlias<"vmem($Rt32):nt=$Vs32", (V6_vS32b_nt_ai IntRegs:$Rt32, 0, VectorRegs:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_stnt0_128BAlias : InstAlias<"vmem($Rt32):nt=$Vs32", (V6_vS32b_nt_ai IntRegs:$Rt32, 0, VectorRegs:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_stp0Alias : InstAlias<"if ($Pv4) vmem($Rt32)=$Vs32", (V6_vS32b_pred_ai PredRegs:$Pv4, IntRegs:$Rt32, 0, VectorRegs:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_stp0_128BAlias : InstAlias<"if ($Pv4) vmem($Rt32)=$Vs32", (V6_vS32b_pred_ai PredRegs:$Pv4, IntRegs:$Rt32, 0, VectorRegs:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_stpnt0Alias : InstAlias<"if ($Pv4) vmem($Rt32):nt=$Vs32", (V6_vS32b_nt_pred_ai PredRegs:$Pv4, IntRegs:$Rt32, 0, VectorRegs:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_stpnt0_128BAlias : InstAlias<"if ($Pv4) vmem($Rt32):nt=$Vs32", (V6_vS32b_nt_pred_ai PredRegs:$Pv4, IntRegs:$Rt32, 0, VectorRegs:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_stq0Alias : InstAlias<"if ($Qv4) vmem($Rt32)=$Vs32", (V6_vS32b_qpred_ai VecPredRegs:$Qv4, IntRegs:$Rt32, 0, VectorRegs:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_stq0_128BAlias : InstAlias<"if ($Qv4) vmem($Rt32)=$Vs32", (V6_vS32b_qpred_ai VecPredRegs:$Qv4, IntRegs:$Rt32, 0, VectorRegs:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_stqnt0Alias : InstAlias<"if ($Qv4) vmem($Rt32):nt=$Vs32", (V6_vS32b_nt_qpred_ai VecPredRegs:$Qv4, IntRegs:$Rt32, 0, VectorRegs:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_stqnt0_128BAlias : InstAlias<"if ($Qv4) vmem($Rt32):nt=$Vs32", (V6_vS32b_nt_qpred_ai VecPredRegs:$Qv4, IntRegs:$Rt32, 0, VectorRegs:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_stu0Alias : InstAlias<"vmemu($Rt32)=$Vs32", (V6_vS32Ub_ai IntRegs:$Rt32, 0, VectorRegs:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_stu0_128BAlias : InstAlias<"vmemu($Rt32)=$Vs32", (V6_vS32Ub_ai IntRegs:$Rt32, 0, VectorRegs:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_stunp0Alias : InstAlias<"if (!$Pv4) vmemu($Rt32)=$Vs32", (V6_vS32Ub_npred_ai PredRegs:$Pv4, IntRegs:$Rt32, 0, VectorRegs:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_stunp0_128BAlias : InstAlias<"if (!$Pv4) vmemu($Rt32)=$Vs32", (V6_vS32Ub_npred_ai PredRegs:$Pv4, IntRegs:$Rt32, 0, VectorRegs:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_stup0Alias : InstAlias<"if ($Pv4) vmemu($Rt32)=$Vs32", (V6_vS32Ub_pred_ai PredRegs:$Pv4, IntRegs:$Rt32, 0, VectorRegs:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_stup0_128BAlias : InstAlias<"if ($Pv4) vmemu($Rt32)=$Vs32", (V6_vS32Ub_pred_ai PredRegs:$Pv4, IntRegs:$Rt32, 0, VectorRegs:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_vabsdiffh_altAlias : InstAlias<"$Vd32=vabsdiffh($Vu32,$Vv32)", (V6_vabsdiffh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vabsdiffh_alt_128BAlias : InstAlias<"$Vd32=vabsdiffh($Vu32,$Vv32)", (V6_vabsdiffh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vabsdiffub_altAlias : InstAlias<"$Vd32=vabsdiffub($Vu32,$Vv32)", (V6_vabsdiffub VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vabsdiffub_alt_128BAlias : InstAlias<"$Vd32=vabsdiffub($Vu32,$Vv32)", (V6_vabsdiffub VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vabsdiffuh_altAlias : InstAlias<"$Vd32=vabsdiffuh($Vu32,$Vv32)", (V6_vabsdiffuh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vabsdiffuh_alt_128BAlias : InstAlias<"$Vd32=vabsdiffuh($Vu32,$Vv32)", (V6_vabsdiffuh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vabsdiffw_altAlias : InstAlias<"$Vd32=vabsdiffw($Vu32,$Vv32)", (V6_vabsdiffw VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vabsdiffw_alt_128BAlias : InstAlias<"$Vd32=vabsdiffw($Vu32,$Vv32)", (V6_vabsdiffw VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vabsh_altAlias : InstAlias<"$Vd32=vabsh($Vu32)", (V6_vabsh VectorRegs:$Vd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vabsh_alt_128BAlias : InstAlias<"$Vd32=vabsh($Vu32)", (V6_vabsh VectorRegs:$Vd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vabsh_sat_altAlias : InstAlias<"$Vd32=vabsh($Vu32):sat", (V6_vabsh_sat VectorRegs:$Vd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vabsh_sat_alt_128BAlias : InstAlias<"$Vd32=vabsh($Vu32):sat", (V6_vabsh_sat VectorRegs:$Vd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vabsuh_altAlias : InstAlias<"$Vd32.uh=vabs($Vu32.h)", (V6_vabsh VectorRegs:$Vd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vabsuh_alt_128BAlias : InstAlias<"$Vd32.uh=vabs($Vu32.h)", (V6_vabsh VectorRegs:$Vd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vabsuw_altAlias : InstAlias<"$Vd32.uw=vabs($Vu32.w)", (V6_vabsw VectorRegs:$Vd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vabsuw_alt_128BAlias : InstAlias<"$Vd32.uw=vabs($Vu32.w)", (V6_vabsw VectorRegs:$Vd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vabsw_altAlias : InstAlias<"$Vd32=vabsw($Vu32)", (V6_vabsw VectorRegs:$Vd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vabsw_alt_128BAlias : InstAlias<"$Vd32=vabsw($Vu32)", (V6_vabsw VectorRegs:$Vd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vabsw_sat_altAlias : InstAlias<"$Vd32=vabsw($Vu32):sat", (V6_vabsw_sat VectorRegs:$Vd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vabsw_sat_alt_128BAlias : InstAlias<"$Vd32=vabsw($Vu32):sat", (V6_vabsw_sat VectorRegs:$Vd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddb_altAlias : InstAlias<"$Vd32=vaddb($Vu32,$Vv32)", (V6_vaddb VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddb_alt_128BAlias : InstAlias<"$Vd32=vaddb($Vu32,$Vv32)", (V6_vaddb VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddb_dv_altAlias : InstAlias<"$Vdd32=vaddb($Vuu32,$Vvv32)", (V6_vaddb_dv VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, VecDblRegs:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddb_dv_alt_128BAlias : InstAlias<"$Vdd32=vaddb($Vuu32,$Vvv32)", (V6_vaddb_dv VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, VecDblRegs:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddbnq_altAlias : InstAlias<"if (!$Qv4.b) $Vx32.b+=$Vu32.b", (V6_vaddbnq VectorRegs:$Vx32, VecPredRegs:$Qv4, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddbnq_alt_128BAlias : InstAlias<"if (!$Qv4.b) $Vx32.b+=$Vu32.b", (V6_vaddbnq VectorRegs:$Vx32, VecPredRegs:$Qv4, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddbq_altAlias : InstAlias<"if ($Qv4.b) $Vx32.b+=$Vu32.b", (V6_vaddbq VectorRegs:$Vx32, VecPredRegs:$Qv4, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddbq_alt_128BAlias : InstAlias<"if ($Qv4.b) $Vx32.b+=$Vu32.b", (V6_vaddbq VectorRegs:$Vx32, VecPredRegs:$Qv4, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddh_altAlias : InstAlias<"$Vd32=vaddh($Vu32,$Vv32)", (V6_vaddh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddh_alt_128BAlias : InstAlias<"$Vd32=vaddh($Vu32,$Vv32)", (V6_vaddh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddh_dv_altAlias : InstAlias<"$Vdd32=vaddh($Vuu32,$Vvv32)", (V6_vaddh_dv VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, VecDblRegs:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddh_dv_alt_128BAlias : InstAlias<"$Vdd32=vaddh($Vuu32,$Vvv32)", (V6_vaddh_dv VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, VecDblRegs:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddhnq_altAlias : InstAlias<"if (!$Qv4.h) $Vx32.h+=$Vu32.h", (V6_vaddhnq VectorRegs:$Vx32, VecPredRegs:$Qv4, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddhnq_alt_128BAlias : InstAlias<"if (!$Qv4.h) $Vx32.h+=$Vu32.h", (V6_vaddhnq VectorRegs:$Vx32, VecPredRegs:$Qv4, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddhq_altAlias : InstAlias<"if ($Qv4.h) $Vx32.h+=$Vu32.h", (V6_vaddhq VectorRegs:$Vx32, VecPredRegs:$Qv4, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddhq_alt_128BAlias : InstAlias<"if ($Qv4.h) $Vx32.h+=$Vu32.h", (V6_vaddhq VectorRegs:$Vx32, VecPredRegs:$Qv4, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddhsat_altAlias : InstAlias<"$Vd32=vaddh($Vu32,$Vv32):sat", (V6_vaddhsat VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddhsat_alt_128BAlias : InstAlias<"$Vd32=vaddh($Vu32,$Vv32):sat", (V6_vaddhsat VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddhsat_dv_altAlias : InstAlias<"$Vdd32=vaddh($Vuu32,$Vvv32):sat", (V6_vaddhsat_dv VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, VecDblRegs:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddhsat_dv_alt_128BAlias : InstAlias<"$Vdd32=vaddh($Vuu32,$Vvv32):sat", (V6_vaddhsat_dv VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, VecDblRegs:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddhw_altAlias : InstAlias<"$Vdd32=vaddh($Vu32,$Vv32)", (V6_vaddhw VecDblRegs:$Vdd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddhw_alt_128BAlias : InstAlias<"$Vdd32=vaddh($Vu32,$Vv32)", (V6_vaddhw VecDblRegs:$Vdd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddubh_altAlias : InstAlias<"$Vdd32=vaddub($Vu32,$Vv32)", (V6_vaddubh VecDblRegs:$Vdd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddubh_alt_128BAlias : InstAlias<"$Vdd32=vaddub($Vu32,$Vv32)", (V6_vaddubh VecDblRegs:$Vdd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddubsat_altAlias : InstAlias<"$Vd32=vaddub($Vu32,$Vv32):sat", (V6_vaddubsat VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddubsat_alt_128BAlias : InstAlias<"$Vd32=vaddub($Vu32,$Vv32):sat", (V6_vaddubsat VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddubsat_dv_altAlias : InstAlias<"$Vdd32=vaddub($Vuu32,$Vvv32):sat", (V6_vaddubsat_dv VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, VecDblRegs:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddubsat_dv_alt_128BAlias : InstAlias<"$Vdd32=vaddub($Vuu32,$Vvv32):sat", (V6_vaddubsat_dv VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, VecDblRegs:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vadduhsat_altAlias : InstAlias<"$Vd32=vadduh($Vu32,$Vv32):sat", (V6_vadduhsat VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vadduhsat_alt_128BAlias : InstAlias<"$Vd32=vadduh($Vu32,$Vv32):sat", (V6_vadduhsat VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vadduhsat_dv_altAlias : InstAlias<"$Vdd32=vadduh($Vuu32,$Vvv32):sat", (V6_vadduhsat_dv VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, VecDblRegs:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vadduhsat_dv_alt_128BAlias : InstAlias<"$Vdd32=vadduh($Vuu32,$Vvv32):sat", (V6_vadduhsat_dv VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, VecDblRegs:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vadduhw_altAlias : InstAlias<"$Vdd32=vadduh($Vu32,$Vv32)", (V6_vadduhw VecDblRegs:$Vdd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vadduhw_alt_128BAlias : InstAlias<"$Vdd32=vadduh($Vu32,$Vv32)", (V6_vadduhw VecDblRegs:$Vdd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddw_altAlias : InstAlias<"$Vd32=vaddw($Vu32,$Vv32)", (V6_vaddw VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddw_alt_128BAlias : InstAlias<"$Vd32=vaddw($Vu32,$Vv32)", (V6_vaddw VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddw_dv_altAlias : InstAlias<"$Vdd32=vaddw($Vuu32,$Vvv32)", (V6_vaddw_dv VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, VecDblRegs:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddw_dv_alt_128BAlias : InstAlias<"$Vdd32=vaddw($Vuu32,$Vvv32)", (V6_vaddw_dv VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, VecDblRegs:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddwnq_altAlias : InstAlias<"if (!$Qv4.w) $Vx32.w+=$Vu32.w", (V6_vaddwnq VectorRegs:$Vx32, VecPredRegs:$Qv4, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddwnq_alt_128BAlias : InstAlias<"if (!$Qv4.w) $Vx32.w+=$Vu32.w", (V6_vaddwnq VectorRegs:$Vx32, VecPredRegs:$Qv4, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddwq_altAlias : InstAlias<"if ($Qv4.w) $Vx32.w+=$Vu32.w", (V6_vaddwq VectorRegs:$Vx32, VecPredRegs:$Qv4, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddwq_alt_128BAlias : InstAlias<"if ($Qv4.w) $Vx32.w+=$Vu32.w", (V6_vaddwq VectorRegs:$Vx32, VecPredRegs:$Qv4, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddwsat_altAlias : InstAlias<"$Vd32=vaddw($Vu32,$Vv32):sat", (V6_vaddwsat VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddwsat_alt_128BAlias : InstAlias<"$Vd32=vaddw($Vu32,$Vv32):sat", (V6_vaddwsat VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddwsat_dv_altAlias : InstAlias<"$Vdd32=vaddw($Vuu32,$Vvv32):sat", (V6_vaddwsat_dv VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, VecDblRegs:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddwsat_dv_alt_128BAlias : InstAlias<"$Vdd32=vaddw($Vuu32,$Vvv32):sat", (V6_vaddwsat_dv VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, VecDblRegs:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vandqrt_acc_altAlias : InstAlias<"$Vx32.ub|=vand($Qu4.ub,$Rt32.ub)", (V6_vandqrt_acc VectorRegs:$Vx32, VecPredRegs:$Qu4, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vandqrt_acc_alt_128BAlias : InstAlias<"$Vx32.ub|=vand($Qu4.ub,$Rt32.ub)", (V6_vandqrt_acc VectorRegs:$Vx32, VecPredRegs:$Qu4, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vandqrt_altAlias : InstAlias<"$Vd32.ub=vand($Qu4.ub,$Rt32.ub)", (V6_vandqrt VectorRegs:$Vd32, VecPredRegs:$Qu4, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vandqrt_alt_128BAlias : InstAlias<"$Vd32.ub=vand($Qu4.ub,$Rt32.ub)", (V6_vandqrt VectorRegs:$Vd32, VecPredRegs:$Qu4, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vandvrt_acc_altAlias : InstAlias<"$Qx4.ub|=vand($Vu32.ub,$Rt32.ub)", (V6_vandvrt_acc VecPredRegs:$Qx4, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vandvrt_acc_alt_128BAlias : InstAlias<"$Qx4.ub|=vand($Vu32.ub,$Rt32.ub)", (V6_vandvrt_acc VecPredRegs:$Qx4, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vandvrt_altAlias : InstAlias<"$Qd4.ub=vand($Vu32.ub,$Rt32.ub)", (V6_vandvrt VecPredRegs:$Qd4, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vandvrt_alt_128BAlias : InstAlias<"$Qd4.ub=vand($Vu32.ub,$Rt32.ub)", (V6_vandvrt VecPredRegs:$Qd4, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vaslh_altAlias : InstAlias<"$Vd32=vaslh($Vu32,$Rt32)", (V6_vaslh VectorRegs:$Vd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vaslh_alt_128BAlias : InstAlias<"$Vd32=vaslh($Vu32,$Rt32)", (V6_vaslh VectorRegs:$Vd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vaslhv_altAlias : InstAlias<"$Vd32=vaslh($Vu32,$Vv32)", (V6_vaslhv VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaslhv_alt_128BAlias : InstAlias<"$Vd32=vaslh($Vu32,$Vv32)", (V6_vaslhv VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaslw_acc_altAlias : InstAlias<"$Vx32+=vaslw($Vu32,$Rt32)", (V6_vaslw_acc VectorRegs:$Vx32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vaslw_acc_alt_128BAlias : InstAlias<"$Vx32+=vaslw($Vu32,$Rt32)", (V6_vaslw_acc VectorRegs:$Vx32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vaslw_altAlias : InstAlias<"$Vd32=vaslw($Vu32,$Rt32)", (V6_vaslw VectorRegs:$Vd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vaslw_alt_128BAlias : InstAlias<"$Vd32=vaslw($Vu32,$Rt32)", (V6_vaslw VectorRegs:$Vd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vaslwv_altAlias : InstAlias<"$Vd32=vaslw($Vu32,$Vv32)", (V6_vaslwv VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaslwv_alt_128BAlias : InstAlias<"$Vd32=vaslw($Vu32,$Vv32)", (V6_vaslwv VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vasrh_altAlias : InstAlias<"$Vd32=vasrh($Vu32,$Rt32)", (V6_vasrh VectorRegs:$Vd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vasrh_alt_128BAlias : InstAlias<"$Vd32=vasrh($Vu32,$Rt32)", (V6_vasrh VectorRegs:$Vd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vasrhbrndsat_altAlias : InstAlias<"$Vd32=vasrhb($Vu32,$Vv32,$Rt8):rnd:sat", (V6_vasrhbrndsat VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8)>;
|
||||
def V6_vasrhubrndsat_altAlias : InstAlias<"$Vd32=vasrhub($Vu32,$Vv32,$Rt8):rnd:sat", (V6_vasrhubrndsat VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8)>;
|
||||
def V6_vasrhubsat_altAlias : InstAlias<"$Vd32=vasrhub($Vu32,$Vv32,$Rt8):sat", (V6_vasrhubsat VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8)>;
|
||||
def V6_vasrhv_altAlias : InstAlias<"$Vd32=vasrh($Vu32,$Vv32)", (V6_vasrhv VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vasrhv_alt_128BAlias : InstAlias<"$Vd32=vasrh($Vu32,$Vv32)", (V6_vasrhv VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vasrw_acc_altAlias : InstAlias<"$Vx32+=vasrw($Vu32,$Rt32)", (V6_vasrw_acc VectorRegs:$Vx32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vasrw_acc_alt_128BAlias : InstAlias<"$Vx32+=vasrw($Vu32,$Rt32)", (V6_vasrw_acc VectorRegs:$Vx32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vasrw_altAlias : InstAlias<"$Vd32=vasrw($Vu32,$Rt32)", (V6_vasrw VectorRegs:$Vd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vasrw_alt_128BAlias : InstAlias<"$Vd32=vasrw($Vu32,$Rt32)", (V6_vasrw VectorRegs:$Vd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vasrwh_altAlias : InstAlias<"$Vd32=vasrwh($Vu32,$Vv32,$Rt8)", (V6_vasrwhsat VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8)>;
|
||||
def V6_vasrwhrndsat_altAlias : InstAlias<"$Vd32=vasrwh($Vu32,$Vv32,$Rt8):rnd:sat", (V6_vasrwhrndsat VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8)>;
|
||||
def V6_vasrwhsat_altAlias : InstAlias<"$Vd32=vasrwh($Vu32,$Vv32,$Rt8):sat", (V6_vasrwhsat VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8)>;
|
||||
def V6_vasrwuhsat_altAlias : InstAlias<"$Vd32=vasrwuh($Vu32,$Vv32,$Rt8):sat", (V6_vasrwuhsat VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8)>;
|
||||
def V6_vasrwv_altAlias : InstAlias<"$Vd32=vasrw($Vu32,$Vv32)", (V6_vasrwv VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vasrwv_alt_128BAlias : InstAlias<"$Vd32=vasrw($Vu32,$Vv32)", (V6_vasrwv VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vavgh_altAlias : InstAlias<"$Vd32=vavgh($Vu32,$Vv32)", (V6_vavgh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vavgh_alt_128BAlias : InstAlias<"$Vd32=vavgh($Vu32,$Vv32)", (V6_vavgh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vavghrnd_altAlias : InstAlias<"$Vd32=vavgh($Vu32,$Vv32):rnd", (V6_vavghrnd VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vavghrnd_alt_128BAlias : InstAlias<"$Vd32=vavgh($Vu32,$Vv32):rnd", (V6_vavghrnd VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vavgub_altAlias : InstAlias<"$Vd32=vavgub($Vu32,$Vv32)", (V6_vavgub VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vavgub_alt_128BAlias : InstAlias<"$Vd32=vavgub($Vu32,$Vv32)", (V6_vavgub VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vavgubrnd_altAlias : InstAlias<"$Vd32=vavgub($Vu32,$Vv32):rnd", (V6_vavgubrnd VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vavgubrnd_alt_128BAlias : InstAlias<"$Vd32=vavgub($Vu32,$Vv32):rnd", (V6_vavgubrnd VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vavguh_altAlias : InstAlias<"$Vd32=vavguh($Vu32,$Vv32)", (V6_vavguh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vavguh_alt_128BAlias : InstAlias<"$Vd32=vavguh($Vu32,$Vv32)", (V6_vavguh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vavguhrnd_altAlias : InstAlias<"$Vd32=vavguh($Vu32,$Vv32):rnd", (V6_vavguhrnd VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vavguhrnd_alt_128BAlias : InstAlias<"$Vd32=vavguh($Vu32,$Vv32):rnd", (V6_vavguhrnd VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vavgw_altAlias : InstAlias<"$Vd32=vavgw($Vu32,$Vv32)", (V6_vavgw VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vavgw_alt_128BAlias : InstAlias<"$Vd32=vavgw($Vu32,$Vv32)", (V6_vavgw VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vavgwrnd_altAlias : InstAlias<"$Vd32=vavgw($Vu32,$Vv32):rnd", (V6_vavgwrnd VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vavgwrnd_alt_128BAlias : InstAlias<"$Vd32=vavgw($Vu32,$Vv32):rnd", (V6_vavgwrnd VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vcl0h_altAlias : InstAlias<"$Vd32=vcl0h($Vu32)", (V6_vcl0h VectorRegs:$Vd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vcl0h_alt_128BAlias : InstAlias<"$Vd32=vcl0h($Vu32)", (V6_vcl0h VectorRegs:$Vd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vcl0w_altAlias : InstAlias<"$Vd32=vcl0w($Vu32)", (V6_vcl0w VectorRegs:$Vd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vcl0w_alt_128BAlias : InstAlias<"$Vd32=vcl0w($Vu32)", (V6_vcl0w VectorRegs:$Vd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vd0Alias : InstAlias<"$Vd32=#0", (V6_vxor VectorRegs:$Vd32, VectorRegs:$Vd32, VectorRegs:$Vd32)>, Requires<[UseHVX]>;
|
||||
def V6_vd0_128BAlias : InstAlias<"$Vd32=#0", (V6_vxor VectorRegs:$Vd32, VectorRegs:$Vd32, VectorRegs:$Vd32)>, Requires<[UseHVX]>;
|
||||
def V6_vdd0Alias : InstAlias<"$Vdd32=#0", (V6_vsubw_dv VecDblRegs:$Vdd32, W15, W15)>, Requires<[UseHVX]>;
|
||||
def V6_vdd0_128BAlias : InstAlias<"$Vdd32=#0", (V6_vsubw_dv VecDblRegs:$Vdd32, W15, W15)>, Requires<[UseHVX]>;
|
||||
def V6_vdealb4w_altAlias : InstAlias<"$Vd32=vdealb4w($Vu32,$Vv32)", (V6_vdealb4w VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vdealb4w_alt_128BAlias : InstAlias<"$Vd32=vdealb4w($Vu32,$Vv32)", (V6_vdealb4w VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vdealb_altAlias : InstAlias<"$Vd32=vdealb($Vu32)", (V6_vdealb VectorRegs:$Vd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vdealb_alt_128BAlias : InstAlias<"$Vd32=vdealb($Vu32)", (V6_vdealb VectorRegs:$Vd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vdealh_altAlias : InstAlias<"$Vd32=vdealh($Vu32)", (V6_vdealh VectorRegs:$Vd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vdealh_alt_128BAlias : InstAlias<"$Vd32=vdealh($Vu32)", (V6_vdealh VectorRegs:$Vd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpybus_acc_altAlias : InstAlias<"$Vx32+=vdmpybus($Vu32,$Rt32)", (V6_vdmpybus_acc VectorRegs:$Vx32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpybus_acc_alt_128BAlias : InstAlias<"$Vx32+=vdmpybus($Vu32,$Rt32)", (V6_vdmpybus_acc VectorRegs:$Vx32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpybus_altAlias : InstAlias<"$Vd32=vdmpybus($Vu32,$Rt32)", (V6_vdmpybus VectorRegs:$Vd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpybus_alt_128BAlias : InstAlias<"$Vd32=vdmpybus($Vu32,$Rt32)", (V6_vdmpybus VectorRegs:$Vd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpybus_dv_acc_altAlias : InstAlias<"$Vxx32+=vdmpybus($Vuu32,$Rt32)", (V6_vdmpybus_dv_acc VecDblRegs:$Vxx32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpybus_dv_acc_alt_128BAlias : InstAlias<"$Vxx32+=vdmpybus($Vuu32,$Rt32)", (V6_vdmpybus_dv_acc VecDblRegs:$Vxx32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpybus_dv_altAlias : InstAlias<"$Vdd32=vdmpybus($Vuu32,$Rt32)", (V6_vdmpybus_dv VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpybus_dv_alt_128BAlias : InstAlias<"$Vdd32=vdmpybus($Vuu32,$Rt32)", (V6_vdmpybus_dv VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhb_acc_altAlias : InstAlias<"$Vx32+=vdmpyhb($Vu32,$Rt32)", (V6_vdmpyhb_acc VectorRegs:$Vx32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhb_acc_alt_128BAlias : InstAlias<"$Vx32+=vdmpyhb($Vu32,$Rt32)", (V6_vdmpyhb_acc VectorRegs:$Vx32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhb_altAlias : InstAlias<"$Vd32=vdmpyhb($Vu32,$Rt32)", (V6_vdmpyhb VectorRegs:$Vd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhb_alt_128BAlias : InstAlias<"$Vd32=vdmpyhb($Vu32,$Rt32)", (V6_vdmpyhb VectorRegs:$Vd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhb_dv_acc_altAlias : InstAlias<"$Vxx32+=vdmpyhb($Vuu32,$Rt32)", (V6_vdmpyhb_dv_acc VecDblRegs:$Vxx32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhb_dv_acc_alt_128BAlias : InstAlias<"$Vxx32+=vdmpyhb($Vuu32,$Rt32)", (V6_vdmpyhb_dv_acc VecDblRegs:$Vxx32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhb_dv_altAlias : InstAlias<"$Vdd32=vdmpyhb($Vuu32,$Rt32)", (V6_vdmpyhb_dv VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhb_dv_alt_128BAlias : InstAlias<"$Vdd32=vdmpyhb($Vuu32,$Rt32)", (V6_vdmpyhb_dv VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhisat_acc_altAlias : InstAlias<"$Vx32+=vdmpyh($Vuu32,$Rt32):sat", (V6_vdmpyhisat_acc VectorRegs:$Vx32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhisat_acc_alt_128BAlias : InstAlias<"$Vx32+=vdmpyh($Vuu32,$Rt32):sat", (V6_vdmpyhisat_acc VectorRegs:$Vx32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhisat_altAlias : InstAlias<"$Vd32=vdmpyh($Vuu32,$Rt32):sat", (V6_vdmpyhisat VectorRegs:$Vd32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhisat_alt_128BAlias : InstAlias<"$Vd32=vdmpyh($Vuu32,$Rt32):sat", (V6_vdmpyhisat VectorRegs:$Vd32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhsat_acc_altAlias : InstAlias<"$Vx32+=vdmpyh($Vu32,$Rt32):sat", (V6_vdmpyhsat_acc VectorRegs:$Vx32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhsat_acc_alt_128BAlias : InstAlias<"$Vx32+=vdmpyh($Vu32,$Rt32):sat", (V6_vdmpyhsat_acc VectorRegs:$Vx32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhsat_altAlias : InstAlias<"$Vd32=vdmpyh($Vu32,$Rt32):sat", (V6_vdmpyhsat VectorRegs:$Vd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhsat_alt_128BAlias : InstAlias<"$Vd32=vdmpyh($Vu32,$Rt32):sat", (V6_vdmpyhsat VectorRegs:$Vd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhsuisat_acc_altAlias : InstAlias<"$Vx32+=vdmpyhsu($Vuu32,$Rt32,#1):sat", (V6_vdmpyhsuisat_acc VectorRegs:$Vx32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhsuisat_acc_alt_128BAlias : InstAlias<"$Vx32+=vdmpyhsu($Vuu32,$Rt32,#1):sat", (V6_vdmpyhsuisat_acc VectorRegs:$Vx32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhsuisat_altAlias : InstAlias<"$Vd32=vdmpyhsu($Vuu32,$Rt32,#1):sat", (V6_vdmpyhsuisat VectorRegs:$Vd32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhsuisat_alt_128BAlias : InstAlias<"$Vd32=vdmpyhsu($Vuu32,$Rt32,#1):sat", (V6_vdmpyhsuisat VectorRegs:$Vd32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhsusat_acc_altAlias : InstAlias<"$Vx32+=vdmpyhsu($Vu32,$Rt32):sat", (V6_vdmpyhsusat_acc VectorRegs:$Vx32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhsusat_acc_alt_128BAlias : InstAlias<"$Vx32+=vdmpyhsu($Vu32,$Rt32):sat", (V6_vdmpyhsusat_acc VectorRegs:$Vx32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhsusat_altAlias : InstAlias<"$Vd32=vdmpyhsu($Vu32,$Rt32):sat", (V6_vdmpyhsusat VectorRegs:$Vd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhsusat_alt_128BAlias : InstAlias<"$Vd32=vdmpyhsu($Vu32,$Rt32):sat", (V6_vdmpyhsusat VectorRegs:$Vd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhvsat_acc_altAlias : InstAlias<"$Vx32+=vdmpyh($Vu32,$Vv32):sat", (V6_vdmpyhvsat_acc VectorRegs:$Vx32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhvsat_acc_alt_128BAlias : InstAlias<"$Vx32+=vdmpyh($Vu32,$Vv32):sat", (V6_vdmpyhvsat_acc VectorRegs:$Vx32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhvsat_altAlias : InstAlias<"$Vd32=vdmpyh($Vu32,$Vv32):sat", (V6_vdmpyhvsat VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhvsat_alt_128BAlias : InstAlias<"$Vd32=vdmpyh($Vu32,$Vv32):sat", (V6_vdmpyhvsat VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vdsaduh_acc_altAlias : InstAlias<"$Vxx32+=vdsaduh($Vuu32,$Rt32)", (V6_vdsaduh_acc VecDblRegs:$Vxx32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdsaduh_acc_alt_128BAlias : InstAlias<"$Vxx32+=vdsaduh($Vuu32,$Rt32)", (V6_vdsaduh_acc VecDblRegs:$Vxx32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdsaduh_altAlias : InstAlias<"$Vdd32=vdsaduh($Vuu32,$Rt32)", (V6_vdsaduh VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdsaduh_alt_128BAlias : InstAlias<"$Vdd32=vdsaduh($Vuu32,$Rt32)", (V6_vdsaduh VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vlsrh_altAlias : InstAlias<"$Vd32=vlsrh($Vu32,$Rt32)", (V6_vlsrh VectorRegs:$Vd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vlsrh_alt_128BAlias : InstAlias<"$Vd32=vlsrh($Vu32,$Rt32)", (V6_vlsrh VectorRegs:$Vd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vlsrhv_altAlias : InstAlias<"$Vd32=vlsrh($Vu32,$Vv32)", (V6_vlsrhv VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vlsrhv_alt_128BAlias : InstAlias<"$Vd32=vlsrh($Vu32,$Vv32)", (V6_vlsrhv VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vlsrw_altAlias : InstAlias<"$Vd32=vlsrw($Vu32,$Rt32)", (V6_vlsrw VectorRegs:$Vd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vlsrw_alt_128BAlias : InstAlias<"$Vd32=vlsrw($Vu32,$Rt32)", (V6_vlsrw VectorRegs:$Vd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vlsrwv_altAlias : InstAlias<"$Vd32=vlsrw($Vu32,$Vv32)", (V6_vlsrwv VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vlsrwv_alt_128BAlias : InstAlias<"$Vd32=vlsrw($Vu32,$Vv32)", (V6_vlsrwv VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmaxh_altAlias : InstAlias<"$Vd32=vmaxh($Vu32,$Vv32)", (V6_vmaxh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmaxh_alt_128BAlias : InstAlias<"$Vd32=vmaxh($Vu32,$Vv32)", (V6_vmaxh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmaxub_altAlias : InstAlias<"$Vd32=vmaxub($Vu32,$Vv32)", (V6_vmaxub VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmaxub_alt_128BAlias : InstAlias<"$Vd32=vmaxub($Vu32,$Vv32)", (V6_vmaxub VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmaxuh_altAlias : InstAlias<"$Vd32=vmaxuh($Vu32,$Vv32)", (V6_vmaxuh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmaxuh_alt_128BAlias : InstAlias<"$Vd32=vmaxuh($Vu32,$Vv32)", (V6_vmaxuh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmaxw_altAlias : InstAlias<"$Vd32=vmaxw($Vu32,$Vv32)", (V6_vmaxw VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmaxw_alt_128BAlias : InstAlias<"$Vd32=vmaxw($Vu32,$Vv32)", (V6_vmaxw VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vminh_altAlias : InstAlias<"$Vd32=vminh($Vu32,$Vv32)", (V6_vminh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vminh_alt_128BAlias : InstAlias<"$Vd32=vminh($Vu32,$Vv32)", (V6_vminh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vminub_altAlias : InstAlias<"$Vd32=vminub($Vu32,$Vv32)", (V6_vminub VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vminub_alt_128BAlias : InstAlias<"$Vd32=vminub($Vu32,$Vv32)", (V6_vminub VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vminuh_altAlias : InstAlias<"$Vd32=vminuh($Vu32,$Vv32)", (V6_vminuh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vminuh_alt_128BAlias : InstAlias<"$Vd32=vminuh($Vu32,$Vv32)", (V6_vminuh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vminw_altAlias : InstAlias<"$Vd32=vminw($Vu32,$Vv32)", (V6_vminw VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vminw_alt_128BAlias : InstAlias<"$Vd32=vminw($Vu32,$Vv32)", (V6_vminw VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpabus_acc_altAlias : InstAlias<"$Vxx32+=vmpabus($Vuu32,$Rt32)", (V6_vmpabus_acc VecDblRegs:$Vxx32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpabus_acc_alt_128BAlias : InstAlias<"$Vxx32+=vmpabus($Vuu32,$Rt32)", (V6_vmpabus_acc VecDblRegs:$Vxx32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpabus_altAlias : InstAlias<"$Vdd32=vmpabus($Vuu32,$Rt32)", (V6_vmpabus VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpabus_alt_128BAlias : InstAlias<"$Vdd32=vmpabus($Vuu32,$Rt32)", (V6_vmpabus VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpabusv_altAlias : InstAlias<"$Vdd32=vmpabus($Vuu32,$Vvv32)", (V6_vmpabusv VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, VecDblRegs:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpabusv_alt_128BAlias : InstAlias<"$Vdd32=vmpabus($Vuu32,$Vvv32)", (V6_vmpabusv VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, VecDblRegs:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpabuuv_altAlias : InstAlias<"$Vdd32=vmpabuu($Vuu32,$Vvv32)", (V6_vmpabuuv VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, VecDblRegs:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpabuuv_alt_128BAlias : InstAlias<"$Vdd32=vmpabuu($Vuu32,$Vvv32)", (V6_vmpabuuv VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, VecDblRegs:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpahb_acc_altAlias : InstAlias<"$Vxx32+=vmpahb($Vuu32,$Rt32)", (V6_vmpahb_acc VecDblRegs:$Vxx32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpahb_acc_alt_128BAlias : InstAlias<"$Vxx32+=vmpahb($Vuu32,$Rt32)", (V6_vmpahb_acc VecDblRegs:$Vxx32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpahb_altAlias : InstAlias<"$Vdd32=vmpahb($Vuu32,$Rt32)", (V6_vmpahb VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpahb_alt_128BAlias : InstAlias<"$Vdd32=vmpahb($Vuu32,$Rt32)", (V6_vmpahb VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpybus_acc_altAlias : InstAlias<"$Vxx32+=vmpybus($Vu32,$Rt32)", (V6_vmpybus_acc VecDblRegs:$Vxx32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpybus_acc_alt_128BAlias : InstAlias<"$Vxx32+=vmpybus($Vu32,$Rt32)", (V6_vmpybus_acc VecDblRegs:$Vxx32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpybus_altAlias : InstAlias<"$Vdd32=vmpybus($Vu32,$Rt32)", (V6_vmpybus VecDblRegs:$Vdd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpybus_alt_128BAlias : InstAlias<"$Vdd32=vmpybus($Vu32,$Rt32)", (V6_vmpybus VecDblRegs:$Vdd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpybusv_acc_altAlias : InstAlias<"$Vxx32+=vmpybus($Vu32,$Vv32)", (V6_vmpybusv_acc VecDblRegs:$Vxx32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpybusv_acc_alt_128BAlias : InstAlias<"$Vxx32+=vmpybus($Vu32,$Vv32)", (V6_vmpybusv_acc VecDblRegs:$Vxx32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpybusv_altAlias : InstAlias<"$Vdd32=vmpybus($Vu32,$Vv32)", (V6_vmpybusv VecDblRegs:$Vdd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpybusv_alt_128BAlias : InstAlias<"$Vdd32=vmpybus($Vu32,$Vv32)", (V6_vmpybusv VecDblRegs:$Vdd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpybv_acc_altAlias : InstAlias<"$Vxx32+=vmpyb($Vu32,$Vv32)", (V6_vmpybv_acc VecDblRegs:$Vxx32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpybv_acc_alt_128BAlias : InstAlias<"$Vxx32+=vmpyb($Vu32,$Vv32)", (V6_vmpybv_acc VecDblRegs:$Vxx32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpybv_altAlias : InstAlias<"$Vdd32=vmpyb($Vu32,$Vv32)", (V6_vmpybv VecDblRegs:$Vdd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpybv_alt_128BAlias : InstAlias<"$Vdd32=vmpyb($Vu32,$Vv32)", (V6_vmpybv VecDblRegs:$Vdd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyewuh_altAlias : InstAlias<"$Vd32=vmpyewuh($Vu32,$Vv32)", (V6_vmpyewuh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyewuh_alt_128BAlias : InstAlias<"$Vd32=vmpyewuh($Vu32,$Vv32)", (V6_vmpyewuh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyh_altAlias : InstAlias<"$Vdd32=vmpyh($Vu32,$Rt32)", (V6_vmpyh VecDblRegs:$Vdd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyh_alt_128BAlias : InstAlias<"$Vdd32=vmpyh($Vu32,$Rt32)", (V6_vmpyh VecDblRegs:$Vdd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyhsat_acc_altAlias : InstAlias<"$Vxx32+=vmpyh($Vu32,$Rt32):sat", (V6_vmpyhsat_acc VecDblRegs:$Vxx32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyhsat_acc_alt_128BAlias : InstAlias<"$Vxx32+=vmpyh($Vu32,$Rt32):sat", (V6_vmpyhsat_acc VecDblRegs:$Vxx32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyhsrs_altAlias : InstAlias<"$Vd32=vmpyh($Vu32,$Rt32):<<1:rnd:sat", (V6_vmpyhsrs VectorRegs:$Vd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyhsrs_alt_128BAlias : InstAlias<"$Vd32=vmpyh($Vu32,$Rt32):<<1:rnd:sat", (V6_vmpyhsrs VectorRegs:$Vd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyhss_altAlias : InstAlias<"$Vd32=vmpyh($Vu32,$Rt32):<<1:sat", (V6_vmpyhss VectorRegs:$Vd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyhss_alt_128BAlias : InstAlias<"$Vd32=vmpyh($Vu32,$Rt32):<<1:sat", (V6_vmpyhss VectorRegs:$Vd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyhus_acc_altAlias : InstAlias<"$Vxx32+=vmpyhus($Vu32,$Vv32)", (V6_vmpyhus_acc VecDblRegs:$Vxx32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyhus_acc_alt_128BAlias : InstAlias<"$Vxx32+=vmpyhus($Vu32,$Vv32)", (V6_vmpyhus_acc VecDblRegs:$Vxx32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyhus_altAlias : InstAlias<"$Vdd32=vmpyhus($Vu32,$Vv32)", (V6_vmpyhus VecDblRegs:$Vdd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyhus_alt_128BAlias : InstAlias<"$Vdd32=vmpyhus($Vu32,$Vv32)", (V6_vmpyhus VecDblRegs:$Vdd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyhv_acc_altAlias : InstAlias<"$Vxx32+=vmpyh($Vu32,$Vv32)", (V6_vmpyhv_acc VecDblRegs:$Vxx32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyhv_acc_alt_128BAlias : InstAlias<"$Vxx32+=vmpyh($Vu32,$Vv32)", (V6_vmpyhv_acc VecDblRegs:$Vxx32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyhv_altAlias : InstAlias<"$Vdd32=vmpyh($Vu32,$Vv32)", (V6_vmpyhv VecDblRegs:$Vdd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyhv_alt_128BAlias : InstAlias<"$Vdd32=vmpyh($Vu32,$Vv32)", (V6_vmpyhv VecDblRegs:$Vdd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyhvsrs_altAlias : InstAlias<"$Vd32=vmpyh($Vu32,$Vv32):<<1:rnd:sat", (V6_vmpyhvsrs VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyhvsrs_alt_128BAlias : InstAlias<"$Vd32=vmpyh($Vu32,$Vv32):<<1:rnd:sat", (V6_vmpyhvsrs VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyiewh_acc_altAlias : InstAlias<"$Vx32+=vmpyiewh($Vu32,$Vv32)", (V6_vmpyiewh_acc VectorRegs:$Vx32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyiewh_acc_alt_128BAlias : InstAlias<"$Vx32+=vmpyiewh($Vu32,$Vv32)", (V6_vmpyiewh_acc VectorRegs:$Vx32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyiewuh_acc_altAlias : InstAlias<"$Vx32+=vmpyiewuh($Vu32,$Vv32)", (V6_vmpyiewuh_acc VectorRegs:$Vx32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyiewuh_acc_alt_128BAlias : InstAlias<"$Vx32+=vmpyiewuh($Vu32,$Vv32)", (V6_vmpyiewuh_acc VectorRegs:$Vx32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyiewuh_altAlias : InstAlias<"$Vd32=vmpyiewuh($Vu32,$Vv32)", (V6_vmpyiewuh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyiewuh_alt_128BAlias : InstAlias<"$Vd32=vmpyiewuh($Vu32,$Vv32)", (V6_vmpyiewuh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyih_acc_altAlias : InstAlias<"$Vx32+=vmpyih($Vu32,$Vv32)", (V6_vmpyih_acc VectorRegs:$Vx32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyih_acc_alt_128BAlias : InstAlias<"$Vx32+=vmpyih($Vu32,$Vv32)", (V6_vmpyih_acc VectorRegs:$Vx32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyih_altAlias : InstAlias<"$Vd32=vmpyih($Vu32,$Vv32)", (V6_vmpyih VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyih_alt_128BAlias : InstAlias<"$Vd32=vmpyih($Vu32,$Vv32)", (V6_vmpyih VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyihb_acc_altAlias : InstAlias<"$Vx32+=vmpyihb($Vu32,$Rt32)", (V6_vmpyihb_acc VectorRegs:$Vx32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyihb_acc_alt_128BAlias : InstAlias<"$Vx32+=vmpyihb($Vu32,$Rt32)", (V6_vmpyihb_acc VectorRegs:$Vx32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyihb_altAlias : InstAlias<"$Vd32=vmpyihb($Vu32,$Rt32)", (V6_vmpyihb VectorRegs:$Vd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyihb_alt_128BAlias : InstAlias<"$Vd32=vmpyihb($Vu32,$Rt32)", (V6_vmpyihb VectorRegs:$Vd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyiowh_altAlias : InstAlias<"$Vd32=vmpyiowh($Vu32,$Vv32)", (V6_vmpyiowh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyiowh_alt_128BAlias : InstAlias<"$Vd32=vmpyiowh($Vu32,$Vv32)", (V6_vmpyiowh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyiwb_acc_altAlias : InstAlias<"$Vx32+=vmpyiwb($Vu32,$Rt32)", (V6_vmpyiwb_acc VectorRegs:$Vx32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyiwb_acc_alt_128BAlias : InstAlias<"$Vx32+=vmpyiwb($Vu32,$Rt32)", (V6_vmpyiwb_acc VectorRegs:$Vx32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyiwb_altAlias : InstAlias<"$Vd32=vmpyiwb($Vu32,$Rt32)", (V6_vmpyiwb VectorRegs:$Vd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyiwb_alt_128BAlias : InstAlias<"$Vd32=vmpyiwb($Vu32,$Rt32)", (V6_vmpyiwb VectorRegs:$Vd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyiwh_acc_altAlias : InstAlias<"$Vx32+=vmpyiwh($Vu32,$Rt32)", (V6_vmpyiwh_acc VectorRegs:$Vx32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyiwh_acc_alt_128BAlias : InstAlias<"$Vx32+=vmpyiwh($Vu32,$Rt32)", (V6_vmpyiwh_acc VectorRegs:$Vx32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyiwh_altAlias : InstAlias<"$Vd32=vmpyiwh($Vu32,$Rt32)", (V6_vmpyiwh VectorRegs:$Vd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyiwh_alt_128BAlias : InstAlias<"$Vd32=vmpyiwh($Vu32,$Rt32)", (V6_vmpyiwh VectorRegs:$Vd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyowh_altAlias : InstAlias<"$Vd32=vmpyowh($Vu32,$Vv32):<<1:sat", (V6_vmpyowh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyowh_alt_128BAlias : InstAlias<"$Vd32=vmpyowh($Vu32,$Vv32):<<1:sat", (V6_vmpyowh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyowh_rnd_altAlias : InstAlias<"$Vd32=vmpyowh($Vu32,$Vv32):<<1:rnd:sat", (V6_vmpyowh_rnd VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyowh_rnd_alt_128BAlias : InstAlias<"$Vd32=vmpyowh($Vu32,$Vv32):<<1:rnd:sat", (V6_vmpyowh_rnd VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyub_acc_altAlias : InstAlias<"$Vxx32+=vmpyub($Vu32,$Rt32)", (V6_vmpyub_acc VecDblRegs:$Vxx32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyub_acc_alt_128BAlias : InstAlias<"$Vxx32+=vmpyub($Vu32,$Rt32)", (V6_vmpyub_acc VecDblRegs:$Vxx32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyub_altAlias : InstAlias<"$Vdd32=vmpyub($Vu32,$Rt32)", (V6_vmpyub VecDblRegs:$Vdd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyub_alt_128BAlias : InstAlias<"$Vdd32=vmpyub($Vu32,$Rt32)", (V6_vmpyub VecDblRegs:$Vdd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyubv_acc_altAlias : InstAlias<"$Vxx32+=vmpyub($Vu32,$Vv32)", (V6_vmpyubv_acc VecDblRegs:$Vxx32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyubv_acc_alt_128BAlias : InstAlias<"$Vxx32+=vmpyub($Vu32,$Vv32)", (V6_vmpyubv_acc VecDblRegs:$Vxx32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyubv_altAlias : InstAlias<"$Vdd32=vmpyub($Vu32,$Vv32)", (V6_vmpyubv VecDblRegs:$Vdd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyubv_alt_128BAlias : InstAlias<"$Vdd32=vmpyub($Vu32,$Vv32)", (V6_vmpyubv VecDblRegs:$Vdd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyuh_acc_altAlias : InstAlias<"$Vxx32+=vmpyuh($Vu32,$Rt32)", (V6_vmpyuh_acc VecDblRegs:$Vxx32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyuh_acc_alt_128BAlias : InstAlias<"$Vxx32+=vmpyuh($Vu32,$Rt32)", (V6_vmpyuh_acc VecDblRegs:$Vxx32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyuh_altAlias : InstAlias<"$Vdd32=vmpyuh($Vu32,$Rt32)", (V6_vmpyuh VecDblRegs:$Vdd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyuh_alt_128BAlias : InstAlias<"$Vdd32=vmpyuh($Vu32,$Rt32)", (V6_vmpyuh VecDblRegs:$Vdd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyuhv_acc_altAlias : InstAlias<"$Vxx32+=vmpyuh($Vu32,$Vv32)", (V6_vmpyuhv_acc VecDblRegs:$Vxx32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyuhv_acc_alt_128BAlias : InstAlias<"$Vxx32+=vmpyuh($Vu32,$Vv32)", (V6_vmpyuhv_acc VecDblRegs:$Vxx32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyuhv_altAlias : InstAlias<"$Vdd32=vmpyuh($Vu32,$Vv32)", (V6_vmpyuhv VecDblRegs:$Vdd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyuhv_alt_128BAlias : InstAlias<"$Vdd32=vmpyuh($Vu32,$Vv32)", (V6_vmpyuhv VecDblRegs:$Vdd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vnavgh_altAlias : InstAlias<"$Vd32=vnavgh($Vu32,$Vv32)", (V6_vnavgh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vnavgh_alt_128BAlias : InstAlias<"$Vd32=vnavgh($Vu32,$Vv32)", (V6_vnavgh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vnavgub_altAlias : InstAlias<"$Vd32=vnavgub($Vu32,$Vv32)", (V6_vnavgub VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vnavgub_alt_128BAlias : InstAlias<"$Vd32=vnavgub($Vu32,$Vv32)", (V6_vnavgub VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vnavgw_altAlias : InstAlias<"$Vd32=vnavgw($Vu32,$Vv32)", (V6_vnavgw VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vnavgw_alt_128BAlias : InstAlias<"$Vd32=vnavgw($Vu32,$Vv32)", (V6_vnavgw VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vnormamth_altAlias : InstAlias<"$Vd32=vnormamth($Vu32)", (V6_vnormamth VectorRegs:$Vd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vnormamth_alt_128BAlias : InstAlias<"$Vd32=vnormamth($Vu32)", (V6_vnormamth VectorRegs:$Vd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vnormamtw_altAlias : InstAlias<"$Vd32=vnormamtw($Vu32)", (V6_vnormamtw VectorRegs:$Vd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vnormamtw_alt_128BAlias : InstAlias<"$Vd32=vnormamtw($Vu32)", (V6_vnormamtw VectorRegs:$Vd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vpackeb_altAlias : InstAlias<"$Vd32=vpackeb($Vu32,$Vv32)", (V6_vpackeb VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vpackeb_alt_128BAlias : InstAlias<"$Vd32=vpackeb($Vu32,$Vv32)", (V6_vpackeb VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vpackeh_altAlias : InstAlias<"$Vd32=vpackeh($Vu32,$Vv32)", (V6_vpackeh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vpackeh_alt_128BAlias : InstAlias<"$Vd32=vpackeh($Vu32,$Vv32)", (V6_vpackeh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vpackhb_sat_altAlias : InstAlias<"$Vd32=vpackhb($Vu32,$Vv32):sat", (V6_vpackhb_sat VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vpackhb_sat_alt_128BAlias : InstAlias<"$Vd32=vpackhb($Vu32,$Vv32):sat", (V6_vpackhb_sat VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vpackhub_sat_altAlias : InstAlias<"$Vd32=vpackhub($Vu32,$Vv32):sat", (V6_vpackhub_sat VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vpackhub_sat_alt_128BAlias : InstAlias<"$Vd32=vpackhub($Vu32,$Vv32):sat", (V6_vpackhub_sat VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vpackob_altAlias : InstAlias<"$Vd32=vpackob($Vu32,$Vv32)", (V6_vpackob VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vpackob_alt_128BAlias : InstAlias<"$Vd32=vpackob($Vu32,$Vv32)", (V6_vpackob VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vpackoh_altAlias : InstAlias<"$Vd32=vpackoh($Vu32,$Vv32)", (V6_vpackoh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vpackoh_alt_128BAlias : InstAlias<"$Vd32=vpackoh($Vu32,$Vv32)", (V6_vpackoh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vpackwh_sat_altAlias : InstAlias<"$Vd32=vpackwh($Vu32,$Vv32):sat", (V6_vpackwh_sat VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vpackwh_sat_alt_128BAlias : InstAlias<"$Vd32=vpackwh($Vu32,$Vv32):sat", (V6_vpackwh_sat VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vpackwuh_sat_altAlias : InstAlias<"$Vd32=vpackwuh($Vu32,$Vv32):sat", (V6_vpackwuh_sat VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vpackwuh_sat_alt_128BAlias : InstAlias<"$Vd32=vpackwuh($Vu32,$Vv32):sat", (V6_vpackwuh_sat VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vpopcounth_altAlias : InstAlias<"$Vd32=vpopcounth($Vu32)", (V6_vpopcounth VectorRegs:$Vd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vpopcounth_alt_128BAlias : InstAlias<"$Vd32=vpopcounth($Vu32)", (V6_vpopcounth VectorRegs:$Vd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpybus_acc_altAlias : InstAlias<"$Vx32+=vrmpybus($Vu32,$Rt32)", (V6_vrmpybus_acc VectorRegs:$Vx32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpybus_acc_alt_128BAlias : InstAlias<"$Vx32+=vrmpybus($Vu32,$Rt32)", (V6_vrmpybus_acc VectorRegs:$Vx32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpybus_altAlias : InstAlias<"$Vd32=vrmpybus($Vu32,$Rt32)", (V6_vrmpybus VectorRegs:$Vd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpybus_alt_128BAlias : InstAlias<"$Vd32=vrmpybus($Vu32,$Rt32)", (V6_vrmpybus VectorRegs:$Vd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpybusi_acc_altAlias : InstAlias<"$Vxx32+=vrmpybus($Vuu32,$Rt32,#$Ii)", (V6_vrmpybusi_acc VecDblRegs:$Vxx32, VecDblRegs:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpybusi_acc_alt_128BAlias : InstAlias<"$Vxx32+=vrmpybus($Vuu32,$Rt32,#$Ii)", (V6_vrmpybusi_acc VecDblRegs:$Vxx32, VecDblRegs:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpybusi_altAlias : InstAlias<"$Vdd32=vrmpybus($Vuu32,$Rt32,#$Ii)", (V6_vrmpybusi VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpybusi_alt_128BAlias : InstAlias<"$Vdd32=vrmpybus($Vuu32,$Rt32,#$Ii)", (V6_vrmpybusi VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpybusv_acc_altAlias : InstAlias<"$Vx32+=vrmpybus($Vu32,$Vv32)", (V6_vrmpybusv_acc VectorRegs:$Vx32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpybusv_acc_alt_128BAlias : InstAlias<"$Vx32+=vrmpybus($Vu32,$Vv32)", (V6_vrmpybusv_acc VectorRegs:$Vx32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpybusv_altAlias : InstAlias<"$Vd32=vrmpybus($Vu32,$Vv32)", (V6_vrmpybusv VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpybusv_alt_128BAlias : InstAlias<"$Vd32=vrmpybus($Vu32,$Vv32)", (V6_vrmpybusv VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpybv_acc_altAlias : InstAlias<"$Vx32+=vrmpyb($Vu32,$Vv32)", (V6_vrmpybv_acc VectorRegs:$Vx32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpybv_acc_alt_128BAlias : InstAlias<"$Vx32+=vrmpyb($Vu32,$Vv32)", (V6_vrmpybv_acc VectorRegs:$Vx32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpybv_altAlias : InstAlias<"$Vd32=vrmpyb($Vu32,$Vv32)", (V6_vrmpybv VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpybv_alt_128BAlias : InstAlias<"$Vd32=vrmpyb($Vu32,$Vv32)", (V6_vrmpybv VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpyub_acc_altAlias : InstAlias<"$Vx32+=vrmpyub($Vu32,$Rt32)", (V6_vrmpyub_acc VectorRegs:$Vx32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpyub_acc_alt_128BAlias : InstAlias<"$Vx32+=vrmpyub($Vu32,$Rt32)", (V6_vrmpyub_acc VectorRegs:$Vx32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpyub_altAlias : InstAlias<"$Vd32=vrmpyub($Vu32,$Rt32)", (V6_vrmpyub VectorRegs:$Vd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpyub_alt_128BAlias : InstAlias<"$Vd32=vrmpyub($Vu32,$Rt32)", (V6_vrmpyub VectorRegs:$Vd32, VectorRegs:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpyubi_acc_altAlias : InstAlias<"$Vxx32+=vrmpyub($Vuu32,$Rt32,#$Ii)", (V6_vrmpyubi_acc VecDblRegs:$Vxx32, VecDblRegs:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpyubi_acc_alt_128BAlias : InstAlias<"$Vxx32+=vrmpyub($Vuu32,$Rt32,#$Ii)", (V6_vrmpyubi_acc VecDblRegs:$Vxx32, VecDblRegs:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpyubi_altAlias : InstAlias<"$Vdd32=vrmpyub($Vuu32,$Rt32,#$Ii)", (V6_vrmpyubi VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpyubi_alt_128BAlias : InstAlias<"$Vdd32=vrmpyub($Vuu32,$Rt32,#$Ii)", (V6_vrmpyubi VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpyubv_acc_altAlias : InstAlias<"$Vx32+=vrmpyub($Vu32,$Vv32)", (V6_vrmpyubv_acc VectorRegs:$Vx32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpyubv_acc_alt_128BAlias : InstAlias<"$Vx32+=vrmpyub($Vu32,$Vv32)", (V6_vrmpyubv_acc VectorRegs:$Vx32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpyubv_altAlias : InstAlias<"$Vd32=vrmpyub($Vu32,$Vv32)", (V6_vrmpyubv VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpyubv_alt_128BAlias : InstAlias<"$Vd32=vrmpyub($Vu32,$Vv32)", (V6_vrmpyubv VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vroundhb_altAlias : InstAlias<"$Vd32=vroundhb($Vu32,$Vv32):sat", (V6_vroundhb VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vroundhb_alt_128BAlias : InstAlias<"$Vd32=vroundhb($Vu32,$Vv32):sat", (V6_vroundhb VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vroundhub_altAlias : InstAlias<"$Vd32=vroundhub($Vu32,$Vv32):sat", (V6_vroundhub VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vroundhub_alt_128BAlias : InstAlias<"$Vd32=vroundhub($Vu32,$Vv32):sat", (V6_vroundhub VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vroundwh_altAlias : InstAlias<"$Vd32=vroundwh($Vu32,$Vv32):sat", (V6_vroundwh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vroundwh_alt_128BAlias : InstAlias<"$Vd32=vroundwh($Vu32,$Vv32):sat", (V6_vroundwh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vroundwuh_altAlias : InstAlias<"$Vd32=vroundwuh($Vu32,$Vv32):sat", (V6_vroundwuh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vroundwuh_alt_128BAlias : InstAlias<"$Vd32=vroundwuh($Vu32,$Vv32):sat", (V6_vroundwuh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vrsadubi_acc_altAlias : InstAlias<"$Vxx32+=vrsadub($Vuu32,$Rt32,#$Ii)", (V6_vrsadubi_acc VecDblRegs:$Vxx32, VecDblRegs:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii)>, Requires<[UseHVX]>;
|
||||
def V6_vrsadubi_acc_alt_128BAlias : InstAlias<"$Vxx32+=vrsadub($Vuu32,$Rt32,#$Ii)", (V6_vrsadubi_acc VecDblRegs:$Vxx32, VecDblRegs:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii)>, Requires<[UseHVX]>;
|
||||
def V6_vrsadubi_altAlias : InstAlias<"$Vdd32=vrsadub($Vuu32,$Rt32,#$Ii)", (V6_vrsadubi VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii)>, Requires<[UseHVX]>;
|
||||
def V6_vrsadubi_alt_128BAlias : InstAlias<"$Vdd32=vrsadub($Vuu32,$Rt32,#$Ii)", (V6_vrsadubi VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii)>, Requires<[UseHVX]>;
|
||||
def V6_vsathub_altAlias : InstAlias<"$Vd32=vsathub($Vu32,$Vv32)", (V6_vsathub VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsathub_alt_128BAlias : InstAlias<"$Vd32=vsathub($Vu32,$Vv32)", (V6_vsathub VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsatwh_altAlias : InstAlias<"$Vd32=vsatwh($Vu32,$Vv32)", (V6_vsatwh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsatwh_alt_128BAlias : InstAlias<"$Vd32=vsatwh($Vu32,$Vv32)", (V6_vsatwh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsb_altAlias : InstAlias<"$Vdd32=vsxtb($Vu32)", (V6_vsb VecDblRegs:$Vdd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vsb_alt_128BAlias : InstAlias<"$Vdd32=vsxtb($Vu32)", (V6_vsb VecDblRegs:$Vdd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vsh_altAlias : InstAlias<"$Vdd32=vsxth($Vu32)", (V6_vsh VecDblRegs:$Vdd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vsh_alt_128BAlias : InstAlias<"$Vdd32=vsxth($Vu32)", (V6_vsh VecDblRegs:$Vdd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vshufeh_altAlias : InstAlias<"$Vd32=vshuffeh($Vu32,$Vv32)", (V6_vshufeh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vshufeh_alt_128BAlias : InstAlias<"$Vd32=vshuffeh($Vu32,$Vv32)", (V6_vshufeh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vshuffb_altAlias : InstAlias<"$Vd32=vshuffb($Vu32)", (V6_vshuffb VectorRegs:$Vd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vshuffb_alt_128BAlias : InstAlias<"$Vd32=vshuffb($Vu32)", (V6_vshuffb VectorRegs:$Vd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vshuffeb_altAlias : InstAlias<"$Vd32=vshuffeb($Vu32,$Vv32)", (V6_vshuffeb VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vshuffeb_alt_128BAlias : InstAlias<"$Vd32=vshuffeb($Vu32,$Vv32)", (V6_vshuffeb VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vshuffh_altAlias : InstAlias<"$Vd32=vshuffh($Vu32)", (V6_vshuffh VectorRegs:$Vd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vshuffh_alt_128BAlias : InstAlias<"$Vd32=vshuffh($Vu32)", (V6_vshuffh VectorRegs:$Vd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vshuffob_altAlias : InstAlias<"$Vd32=vshuffob($Vu32,$Vv32)", (V6_vshuffob VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vshuffob_alt_128BAlias : InstAlias<"$Vd32=vshuffob($Vu32,$Vv32)", (V6_vshuffob VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vshufoeb_altAlias : InstAlias<"$Vdd32=vshuffoeb($Vu32,$Vv32)", (V6_vshufoeb VecDblRegs:$Vdd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vshufoeb_alt_128BAlias : InstAlias<"$Vdd32=vshuffoeb($Vu32,$Vv32)", (V6_vshufoeb VecDblRegs:$Vdd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vshufoeh_altAlias : InstAlias<"$Vdd32=vshuffoeh($Vu32,$Vv32)", (V6_vshufoeh VecDblRegs:$Vdd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vshufoeh_alt_128BAlias : InstAlias<"$Vdd32=vshuffoeh($Vu32,$Vv32)", (V6_vshufoeh VecDblRegs:$Vdd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vshufoh_altAlias : InstAlias<"$Vd32=vshuffoh($Vu32,$Vv32)", (V6_vshufoh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vshufoh_alt_128BAlias : InstAlias<"$Vd32=vshuffoh($Vu32,$Vv32)", (V6_vshufoh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubb_altAlias : InstAlias<"$Vd32=vsubb($Vu32,$Vv32)", (V6_vsubb VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubb_alt_128BAlias : InstAlias<"$Vd32=vsubb($Vu32,$Vv32)", (V6_vsubb VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubb_dv_altAlias : InstAlias<"$Vdd32=vsubb($Vuu32,$Vvv32)", (V6_vsubb_dv VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, VecDblRegs:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubb_dv_alt_128BAlias : InstAlias<"$Vdd32=vsubb($Vuu32,$Vvv32)", (V6_vsubb_dv VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, VecDblRegs:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubbnq_altAlias : InstAlias<"if (!$Qv4.b) $Vx32.b-=$Vu32.b", (V6_vsubbnq VectorRegs:$Vx32, VecPredRegs:$Qv4, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubbnq_alt_128BAlias : InstAlias<"if (!$Qv4.b) $Vx32.b-=$Vu32.b", (V6_vsubbnq VectorRegs:$Vx32, VecPredRegs:$Qv4, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubbq_altAlias : InstAlias<"if ($Qv4.b) $Vx32.b-=$Vu32.b", (V6_vsubbq VectorRegs:$Vx32, VecPredRegs:$Qv4, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubbq_alt_128BAlias : InstAlias<"if ($Qv4.b) $Vx32.b-=$Vu32.b", (V6_vsubbq VectorRegs:$Vx32, VecPredRegs:$Qv4, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubh_altAlias : InstAlias<"$Vd32=vsubh($Vu32,$Vv32)", (V6_vsubh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubh_alt_128BAlias : InstAlias<"$Vd32=vsubh($Vu32,$Vv32)", (V6_vsubh VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubh_dv_altAlias : InstAlias<"$Vdd32=vsubh($Vuu32,$Vvv32)", (V6_vsubh_dv VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, VecDblRegs:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubh_dv_alt_128BAlias : InstAlias<"$Vdd32=vsubh($Vuu32,$Vvv32)", (V6_vsubh_dv VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, VecDblRegs:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubhnq_altAlias : InstAlias<"if (!$Qv4.h) $Vx32.h-=$Vu32.h", (V6_vsubhnq VectorRegs:$Vx32, VecPredRegs:$Qv4, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubhnq_alt_128BAlias : InstAlias<"if (!$Qv4.h) $Vx32.h-=$Vu32.h", (V6_vsubhnq VectorRegs:$Vx32, VecPredRegs:$Qv4, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubhq_altAlias : InstAlias<"if ($Qv4.h) $Vx32.h-=$Vu32.h", (V6_vsubhq VectorRegs:$Vx32, VecPredRegs:$Qv4, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubhq_alt_128BAlias : InstAlias<"if ($Qv4.h) $Vx32.h-=$Vu32.h", (V6_vsubhq VectorRegs:$Vx32, VecPredRegs:$Qv4, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubhsat_altAlias : InstAlias<"$Vd32=vsubh($Vu32,$Vv32):sat", (V6_vsubhsat VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubhsat_alt_128BAlias : InstAlias<"$Vd32=vsubh($Vu32,$Vv32):sat", (V6_vsubhsat VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubhsat_dv_altAlias : InstAlias<"$Vdd32=vsubh($Vuu32,$Vvv32):sat", (V6_vsubhsat_dv VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, VecDblRegs:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubhsat_dv_alt_128BAlias : InstAlias<"$Vdd32=vsubh($Vuu32,$Vvv32):sat", (V6_vsubhsat_dv VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, VecDblRegs:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubhw_altAlias : InstAlias<"$Vdd32=vsubh($Vu32,$Vv32)", (V6_vsubhw VecDblRegs:$Vdd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubhw_alt_128BAlias : InstAlias<"$Vdd32=vsubh($Vu32,$Vv32)", (V6_vsubhw VecDblRegs:$Vdd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsububh_altAlias : InstAlias<"$Vdd32=vsubub($Vu32,$Vv32)", (V6_vsububh VecDblRegs:$Vdd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsububh_alt_128BAlias : InstAlias<"$Vdd32=vsubub($Vu32,$Vv32)", (V6_vsububh VecDblRegs:$Vdd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsububsat_altAlias : InstAlias<"$Vd32=vsubub($Vu32,$Vv32):sat", (V6_vsububsat VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsububsat_alt_128BAlias : InstAlias<"$Vd32=vsubub($Vu32,$Vv32):sat", (V6_vsububsat VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsububsat_dv_altAlias : InstAlias<"$Vdd32=vsubub($Vuu32,$Vvv32):sat", (V6_vsububsat_dv VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, VecDblRegs:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsububsat_dv_alt_128BAlias : InstAlias<"$Vdd32=vsubub($Vuu32,$Vvv32):sat", (V6_vsububsat_dv VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, VecDblRegs:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubuhsat_altAlias : InstAlias<"$Vd32=vsubuh($Vu32,$Vv32):sat", (V6_vsubuhsat VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubuhsat_alt_128BAlias : InstAlias<"$Vd32=vsubuh($Vu32,$Vv32):sat", (V6_vsubuhsat VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubuhsat_dv_altAlias : InstAlias<"$Vdd32=vsubuh($Vuu32,$Vvv32):sat", (V6_vsubuhsat_dv VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, VecDblRegs:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubuhsat_dv_alt_128BAlias : InstAlias<"$Vdd32=vsubuh($Vuu32,$Vvv32):sat", (V6_vsubuhsat_dv VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, VecDblRegs:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubuhw_altAlias : InstAlias<"$Vdd32=vsubuh($Vu32,$Vv32)", (V6_vsubuhw VecDblRegs:$Vdd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubuhw_alt_128BAlias : InstAlias<"$Vdd32=vsubuh($Vu32,$Vv32)", (V6_vsubuhw VecDblRegs:$Vdd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubw_altAlias : InstAlias<"$Vd32=vsubw($Vu32,$Vv32)", (V6_vsubw VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubw_alt_128BAlias : InstAlias<"$Vd32=vsubw($Vu32,$Vv32)", (V6_vsubw VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubw_dv_altAlias : InstAlias<"$Vdd32=vsubw($Vuu32,$Vvv32)", (V6_vsubw_dv VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, VecDblRegs:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubw_dv_alt_128BAlias : InstAlias<"$Vdd32=vsubw($Vuu32,$Vvv32)", (V6_vsubw_dv VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, VecDblRegs:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubwnq_altAlias : InstAlias<"if (!$Qv4.w) $Vx32.w-=$Vu32.w", (V6_vsubwnq VectorRegs:$Vx32, VecPredRegs:$Qv4, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubwnq_alt_128BAlias : InstAlias<"if (!$Qv4.w) $Vx32.w-=$Vu32.w", (V6_vsubwnq VectorRegs:$Vx32, VecPredRegs:$Qv4, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubwq_altAlias : InstAlias<"if ($Qv4.w) $Vx32.w-=$Vu32.w", (V6_vsubwq VectorRegs:$Vx32, VecPredRegs:$Qv4, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubwq_alt_128BAlias : InstAlias<"if ($Qv4.w) $Vx32.w-=$Vu32.w", (V6_vsubwq VectorRegs:$Vx32, VecPredRegs:$Qv4, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubwsat_altAlias : InstAlias<"$Vd32=vsubw($Vu32,$Vv32):sat", (V6_vsubwsat VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubwsat_alt_128BAlias : InstAlias<"$Vd32=vsubw($Vu32,$Vv32):sat", (V6_vsubwsat VectorRegs:$Vd32, VectorRegs:$Vu32, VectorRegs:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubwsat_dv_altAlias : InstAlias<"$Vdd32=vsubw($Vuu32,$Vvv32):sat", (V6_vsubwsat_dv VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, VecDblRegs:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubwsat_dv_alt_128BAlias : InstAlias<"$Vdd32=vsubw($Vuu32,$Vvv32):sat", (V6_vsubwsat_dv VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, VecDblRegs:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vtmpyb_acc_altAlias : InstAlias<"$Vxx32+=vtmpyb($Vuu32,$Rt32)", (V6_vtmpyb_acc VecDblRegs:$Vxx32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vtmpyb_acc_alt_128BAlias : InstAlias<"$Vxx32+=vtmpyb($Vuu32,$Rt32)", (V6_vtmpyb_acc VecDblRegs:$Vxx32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vtmpyb_altAlias : InstAlias<"$Vdd32=vtmpyb($Vuu32,$Rt32)", (V6_vtmpyb VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vtmpyb_alt_128BAlias : InstAlias<"$Vdd32=vtmpyb($Vuu32,$Rt32)", (V6_vtmpyb VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vtmpybus_acc_altAlias : InstAlias<"$Vxx32+=vtmpybus($Vuu32,$Rt32)", (V6_vtmpybus_acc VecDblRegs:$Vxx32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vtmpybus_acc_alt_128BAlias : InstAlias<"$Vxx32+=vtmpybus($Vuu32,$Rt32)", (V6_vtmpybus_acc VecDblRegs:$Vxx32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vtmpybus_altAlias : InstAlias<"$Vdd32=vtmpybus($Vuu32,$Rt32)", (V6_vtmpybus VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vtmpybus_alt_128BAlias : InstAlias<"$Vdd32=vtmpybus($Vuu32,$Rt32)", (V6_vtmpybus VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vtmpyhb_acc_altAlias : InstAlias<"$Vxx32+=vtmpyhb($Vuu32,$Rt32)", (V6_vtmpyhb_acc VecDblRegs:$Vxx32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vtmpyhb_acc_alt_128BAlias : InstAlias<"$Vxx32+=vtmpyhb($Vuu32,$Rt32)", (V6_vtmpyhb_acc VecDblRegs:$Vxx32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vtmpyhb_altAlias : InstAlias<"$Vdd32=vtmpyhb($Vuu32,$Rt32)", (V6_vtmpyhb VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vtmpyhb_alt_128BAlias : InstAlias<"$Vdd32=vtmpyhb($Vuu32,$Rt32)", (V6_vtmpyhb VecDblRegs:$Vdd32, VecDblRegs:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vtran2x2_mapAlias : InstAlias<"vtrans2x2($Vy32,$Vx32,$Rt32)", (V6_vshuff VectorRegs:$Vy32, VectorRegs:$Vx32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vtran2x2_map_128BAlias : InstAlias<"vtrans2x2($Vy32,$Vx32,$Rt32)", (V6_vshuff VectorRegs:$Vy32, VectorRegs:$Vx32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vunpackb_altAlias : InstAlias<"$Vdd32=vunpackb($Vu32)", (V6_vunpackb VecDblRegs:$Vdd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vunpackb_alt_128BAlias : InstAlias<"$Vdd32=vunpackb($Vu32)", (V6_vunpackb VecDblRegs:$Vdd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vunpackh_altAlias : InstAlias<"$Vdd32=vunpackh($Vu32)", (V6_vunpackh VecDblRegs:$Vdd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vunpackh_alt_128BAlias : InstAlias<"$Vdd32=vunpackh($Vu32)", (V6_vunpackh VecDblRegs:$Vdd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vunpackoh_altAlias : InstAlias<"$Vxx32|=vunpackoh($Vu32)", (V6_vunpackoh VecDblRegs:$Vxx32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vunpackoh_alt_128BAlias : InstAlias<"$Vxx32|=vunpackoh($Vu32)", (V6_vunpackoh VecDblRegs:$Vxx32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vunpackub_altAlias : InstAlias<"$Vdd32=vunpackub($Vu32)", (V6_vunpackub VecDblRegs:$Vdd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vunpackub_alt_128BAlias : InstAlias<"$Vdd32=vunpackub($Vu32)", (V6_vunpackub VecDblRegs:$Vdd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vunpackuh_altAlias : InstAlias<"$Vdd32=vunpackuh($Vu32)", (V6_vunpackuh VecDblRegs:$Vdd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vunpackuh_alt_128BAlias : InstAlias<"$Vdd32=vunpackuh($Vu32)", (V6_vunpackuh VecDblRegs:$Vdd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vzb_altAlias : InstAlias<"$Vdd32=vzxtb($Vu32)", (V6_vzb VecDblRegs:$Vdd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vzb_alt_128BAlias : InstAlias<"$Vdd32=vzxtb($Vu32)", (V6_vzb VecDblRegs:$Vdd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vzh_altAlias : InstAlias<"$Vdd32=vzxth($Vu32)", (V6_vzh VecDblRegs:$Vdd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vzh_alt_128BAlias : InstAlias<"$Vdd32=vzxth($Vu32)", (V6_vzh VecDblRegs:$Vdd32, VectorRegs:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_MAP_equbAlias : InstAlias<"$Qd4=vcmp.eq($Vu32.ub,$Vv32.ub)", (V6_veqb HvxQR:$Qd4, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_MAP_equb_andAlias : InstAlias<"$Qx4&=vcmp.eq($Vu32.ub,$Vv32.ub)", (V6_veqb_and HvxQR:$Qx4, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_MAP_equb_iorAlias : InstAlias<"$Qx4|=vcmp.eq($Vu32.ub,$Vv32.ub)", (V6_veqb_or HvxQR:$Qx4, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_MAP_equb_xorAlias : InstAlias<"$Qx4^=vcmp.eq($Vu32.ub,$Vv32.ub)", (V6_veqb_xor HvxQR:$Qx4, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_MAP_equhAlias : InstAlias<"$Qd4=vcmp.eq($Vu32.uh,$Vv32.uh)", (V6_veqh HvxQR:$Qd4, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_MAP_equh_andAlias : InstAlias<"$Qx4&=vcmp.eq($Vu32.uh,$Vv32.uh)", (V6_veqh_and HvxQR:$Qx4, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_MAP_equh_iorAlias : InstAlias<"$Qx4|=vcmp.eq($Vu32.uh,$Vv32.uh)", (V6_veqh_or HvxQR:$Qx4, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_MAP_equh_xorAlias : InstAlias<"$Qx4^=vcmp.eq($Vu32.uh,$Vv32.uh)", (V6_veqh_xor HvxQR:$Qx4, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_MAP_equwAlias : InstAlias<"$Qd4=vcmp.eq($Vu32.uw,$Vv32.uw)", (V6_veqw HvxQR:$Qd4, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_MAP_equw_andAlias : InstAlias<"$Qx4&=vcmp.eq($Vu32.uw,$Vv32.uw)", (V6_veqw_and HvxQR:$Qx4, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_MAP_equw_iorAlias : InstAlias<"$Qx4|=vcmp.eq($Vu32.uw,$Vv32.uw)", (V6_veqw_or HvxQR:$Qx4, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_MAP_equw_xorAlias : InstAlias<"$Qx4^=vcmp.eq($Vu32.uw,$Vv32.uw)", (V6_veqw_xor HvxQR:$Qx4, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_extractw_altAlias : InstAlias<"$Rd32.w=vextract($Vu32,$Rs32)", (V6_extractw IntRegs:$Rd32, HvxVR:$Vu32, IntRegs:$Rs32)>, Requires<[UseHVX]>;
|
||||
def V6_ld0Alias : InstAlias<"$Vd32=vmem($Rt32)", (V6_vL32b_ai HvxVR:$Vd32, IntRegs:$Rt32, 0)>, Requires<[UseHVX]>;
|
||||
def V6_ldnt0Alias : InstAlias<"$Vd32=vmem($Rt32):nt", (V6_vL32b_nt_ai HvxVR:$Vd32, IntRegs:$Rt32, 0)>, Requires<[UseHVX]>;
|
||||
def V6_ldu0Alias : InstAlias<"$Vd32=vmemu($Rt32)", (V6_vL32Ub_ai HvxVR:$Vd32, IntRegs:$Rt32, 0)>, Requires<[UseHVX]>;
|
||||
def V6_st0Alias : InstAlias<"vmem($Rt32)=$Vs32", (V6_vS32b_ai IntRegs:$Rt32, 0, HvxVR:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_stn0Alias : InstAlias<"vmem($Rt32)=$Os8.new", (V6_vS32b_new_ai IntRegs:$Rt32, 0, HvxVR:$Os8)>, Requires<[UseHVX]>;
|
||||
def V6_stnnt0Alias : InstAlias<"vmem($Rt32):nt=$Os8.new", (V6_vS32b_nt_new_ai IntRegs:$Rt32, 0, HvxVR:$Os8)>, Requires<[UseHVX]>;
|
||||
def V6_stnp0Alias : InstAlias<"if (!$Pv4) vmem($Rt32)=$Vs32", (V6_vS32b_npred_ai PredRegs:$Pv4, IntRegs:$Rt32, 0, HvxVR:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_stnpnt0Alias : InstAlias<"if (!$Pv4) vmem($Rt32):nt=$Vs32", (V6_vS32b_nt_npred_ai PredRegs:$Pv4, IntRegs:$Rt32, 0, HvxVR:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_stnq0Alias : InstAlias<"if (!$Qv4) vmem($Rt32)=$Vs32", (V6_vS32b_nqpred_ai HvxQR:$Qv4, IntRegs:$Rt32, 0, HvxVR:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_stnqnt0Alias : InstAlias<"if (!$Qv4) vmem($Rt32):nt=$Vs32", (V6_vS32b_nt_nqpred_ai HvxQR:$Qv4, IntRegs:$Rt32, 0, HvxVR:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_stnt0Alias : InstAlias<"vmem($Rt32):nt=$Vs32", (V6_vS32b_nt_ai IntRegs:$Rt32, 0, HvxVR:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_stp0Alias : InstAlias<"if ($Pv4) vmem($Rt32)=$Vs32", (V6_vS32b_pred_ai PredRegs:$Pv4, IntRegs:$Rt32, 0, HvxVR:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_stpnt0Alias : InstAlias<"if ($Pv4) vmem($Rt32):nt=$Vs32", (V6_vS32b_nt_pred_ai PredRegs:$Pv4, IntRegs:$Rt32, 0, HvxVR:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_stq0Alias : InstAlias<"if ($Qv4) vmem($Rt32)=$Vs32", (V6_vS32b_qpred_ai HvxQR:$Qv4, IntRegs:$Rt32, 0, HvxVR:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_stqnt0Alias : InstAlias<"if ($Qv4) vmem($Rt32):nt=$Vs32", (V6_vS32b_nt_qpred_ai HvxQR:$Qv4, IntRegs:$Rt32, 0, HvxVR:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_stu0Alias : InstAlias<"vmemu($Rt32)=$Vs32", (V6_vS32Ub_ai IntRegs:$Rt32, 0, HvxVR:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_stunp0Alias : InstAlias<"if (!$Pv4) vmemu($Rt32)=$Vs32", (V6_vS32Ub_npred_ai PredRegs:$Pv4, IntRegs:$Rt32, 0, HvxVR:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_stup0Alias : InstAlias<"if ($Pv4) vmemu($Rt32)=$Vs32", (V6_vS32Ub_pred_ai PredRegs:$Pv4, IntRegs:$Rt32, 0, HvxVR:$Vs32)>, Requires<[UseHVX]>;
|
||||
def V6_vabsdiffh_altAlias : InstAlias<"$Vd32=vabsdiffh($Vu32,$Vv32)", (V6_vabsdiffh HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vabsdiffub_altAlias : InstAlias<"$Vd32=vabsdiffub($Vu32,$Vv32)", (V6_vabsdiffub HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vabsdiffuh_altAlias : InstAlias<"$Vd32=vabsdiffuh($Vu32,$Vv32)", (V6_vabsdiffuh HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vabsdiffw_altAlias : InstAlias<"$Vd32=vabsdiffw($Vu32,$Vv32)", (V6_vabsdiffw HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vabsh_altAlias : InstAlias<"$Vd32=vabsh($Vu32)", (V6_vabsh HvxVR:$Vd32, HvxVR:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vabsh_sat_altAlias : InstAlias<"$Vd32=vabsh($Vu32):sat", (V6_vabsh_sat HvxVR:$Vd32, HvxVR:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vabsuh_altAlias : InstAlias<"$Vd32.uh=vabs($Vu32.h)", (V6_vabsh HvxVR:$Vd32, HvxVR:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vabsuw_altAlias : InstAlias<"$Vd32.uw=vabs($Vu32.w)", (V6_vabsw HvxVR:$Vd32, HvxVR:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vabsw_altAlias : InstAlias<"$Vd32=vabsw($Vu32)", (V6_vabsw HvxVR:$Vd32, HvxVR:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vabsw_sat_altAlias : InstAlias<"$Vd32=vabsw($Vu32):sat", (V6_vabsw_sat HvxVR:$Vd32, HvxVR:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddb_altAlias : InstAlias<"$Vd32=vaddb($Vu32,$Vv32)", (V6_vaddb HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddb_dv_altAlias : InstAlias<"$Vdd32=vaddb($Vuu32,$Vvv32)", (V6_vaddb_dv HvxWR:$Vdd32, HvxWR:$Vuu32, HvxWR:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddbnq_altAlias : InstAlias<"if (!$Qv4.b) $Vx32.b+=$Vu32.b", (V6_vaddbnq HvxVR:$Vx32, HvxQR:$Qv4, HvxVR:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddbq_altAlias : InstAlias<"if ($Qv4.b) $Vx32.b+=$Vu32.b", (V6_vaddbq HvxVR:$Vx32, HvxQR:$Qv4, HvxVR:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddh_altAlias : InstAlias<"$Vd32=vaddh($Vu32,$Vv32)", (V6_vaddh HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddh_dv_altAlias : InstAlias<"$Vdd32=vaddh($Vuu32,$Vvv32)", (V6_vaddh_dv HvxWR:$Vdd32, HvxWR:$Vuu32, HvxWR:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddhnq_altAlias : InstAlias<"if (!$Qv4.h) $Vx32.h+=$Vu32.h", (V6_vaddhnq HvxVR:$Vx32, HvxQR:$Qv4, HvxVR:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddhq_altAlias : InstAlias<"if ($Qv4.h) $Vx32.h+=$Vu32.h", (V6_vaddhq HvxVR:$Vx32, HvxQR:$Qv4, HvxVR:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddhsat_altAlias : InstAlias<"$Vd32=vaddh($Vu32,$Vv32):sat", (V6_vaddhsat HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddhsat_dv_altAlias : InstAlias<"$Vdd32=vaddh($Vuu32,$Vvv32):sat", (V6_vaddhsat_dv HvxWR:$Vdd32, HvxWR:$Vuu32, HvxWR:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddhw_altAlias : InstAlias<"$Vdd32=vaddh($Vu32,$Vv32)", (V6_vaddhw HvxWR:$Vdd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddubh_altAlias : InstAlias<"$Vdd32=vaddub($Vu32,$Vv32)", (V6_vaddubh HvxWR:$Vdd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddubsat_altAlias : InstAlias<"$Vd32=vaddub($Vu32,$Vv32):sat", (V6_vaddubsat HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddubsat_dv_altAlias : InstAlias<"$Vdd32=vaddub($Vuu32,$Vvv32):sat", (V6_vaddubsat_dv HvxWR:$Vdd32, HvxWR:$Vuu32, HvxWR:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vadduhsat_altAlias : InstAlias<"$Vd32=vadduh($Vu32,$Vv32):sat", (V6_vadduhsat HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vadduhsat_dv_altAlias : InstAlias<"$Vdd32=vadduh($Vuu32,$Vvv32):sat", (V6_vadduhsat_dv HvxWR:$Vdd32, HvxWR:$Vuu32, HvxWR:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vadduhw_altAlias : InstAlias<"$Vdd32=vadduh($Vu32,$Vv32)", (V6_vadduhw HvxWR:$Vdd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddw_altAlias : InstAlias<"$Vd32=vaddw($Vu32,$Vv32)", (V6_vaddw HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddw_dv_altAlias : InstAlias<"$Vdd32=vaddw($Vuu32,$Vvv32)", (V6_vaddw_dv HvxWR:$Vdd32, HvxWR:$Vuu32, HvxWR:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddwnq_altAlias : InstAlias<"if (!$Qv4.w) $Vx32.w+=$Vu32.w", (V6_vaddwnq HvxVR:$Vx32, HvxQR:$Qv4, HvxVR:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddwq_altAlias : InstAlias<"if ($Qv4.w) $Vx32.w+=$Vu32.w", (V6_vaddwq HvxVR:$Vx32, HvxQR:$Qv4, HvxVR:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddwsat_altAlias : InstAlias<"$Vd32=vaddw($Vu32,$Vv32):sat", (V6_vaddwsat HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaddwsat_dv_altAlias : InstAlias<"$Vdd32=vaddw($Vuu32,$Vvv32):sat", (V6_vaddwsat_dv HvxWR:$Vdd32, HvxWR:$Vuu32, HvxWR:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vandqrt_acc_altAlias : InstAlias<"$Vx32.ub|=vand($Qu4.ub,$Rt32.ub)", (V6_vandqrt_acc HvxVR:$Vx32, HvxQR:$Qu4, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vandqrt_altAlias : InstAlias<"$Vd32.ub=vand($Qu4.ub,$Rt32.ub)", (V6_vandqrt HvxVR:$Vd32, HvxQR:$Qu4, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vandvrt_acc_altAlias : InstAlias<"$Qx4.ub|=vand($Vu32.ub,$Rt32.ub)", (V6_vandvrt_acc HvxQR:$Qx4, HvxVR:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vandvrt_altAlias : InstAlias<"$Qd4.ub=vand($Vu32.ub,$Rt32.ub)", (V6_vandvrt HvxQR:$Qd4, HvxVR:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vaslh_altAlias : InstAlias<"$Vd32=vaslh($Vu32,$Rt32)", (V6_vaslh HvxVR:$Vd32, HvxVR:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vaslhv_altAlias : InstAlias<"$Vd32=vaslh($Vu32,$Vv32)", (V6_vaslhv HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vaslw_acc_altAlias : InstAlias<"$Vx32+=vaslw($Vu32,$Rt32)", (V6_vaslw_acc HvxVR:$Vx32, HvxVR:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vaslw_altAlias : InstAlias<"$Vd32=vaslw($Vu32,$Rt32)", (V6_vaslw HvxVR:$Vd32, HvxVR:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vaslwv_altAlias : InstAlias<"$Vd32=vaslw($Vu32,$Vv32)", (V6_vaslwv HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vasrh_altAlias : InstAlias<"$Vd32=vasrh($Vu32,$Rt32)", (V6_vasrh HvxVR:$Vd32, HvxVR:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vasrhbrndsat_altAlias : InstAlias<"$Vd32=vasrhb($Vu32,$Vv32,$Rt8):rnd:sat", (V6_vasrhbrndsat HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32, IntRegsLow8:$Rt8)>;
|
||||
def V6_vasrhubrndsat_altAlias : InstAlias<"$Vd32=vasrhub($Vu32,$Vv32,$Rt8):rnd:sat", (V6_vasrhubrndsat HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32, IntRegsLow8:$Rt8)>;
|
||||
def V6_vasrhubsat_altAlias : InstAlias<"$Vd32=vasrhub($Vu32,$Vv32,$Rt8):sat", (V6_vasrhubsat HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32, IntRegsLow8:$Rt8)>;
|
||||
def V6_vasrhv_altAlias : InstAlias<"$Vd32=vasrh($Vu32,$Vv32)", (V6_vasrhv HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vasrw_acc_altAlias : InstAlias<"$Vx32+=vasrw($Vu32,$Rt32)", (V6_vasrw_acc HvxVR:$Vx32, HvxVR:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vasrw_altAlias : InstAlias<"$Vd32=vasrw($Vu32,$Rt32)", (V6_vasrw HvxVR:$Vd32, HvxVR:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vasrwh_altAlias : InstAlias<"$Vd32=vasrwh($Vu32,$Vv32,$Rt8)", (V6_vasrwhsat HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32, IntRegsLow8:$Rt8)>;
|
||||
def V6_vasrwhrndsat_altAlias : InstAlias<"$Vd32=vasrwh($Vu32,$Vv32,$Rt8):rnd:sat", (V6_vasrwhrndsat HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32, IntRegsLow8:$Rt8)>;
|
||||
def V6_vasrwhsat_altAlias : InstAlias<"$Vd32=vasrwh($Vu32,$Vv32,$Rt8):sat", (V6_vasrwhsat HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32, IntRegsLow8:$Rt8)>;
|
||||
def V6_vasrwuhsat_altAlias : InstAlias<"$Vd32=vasrwuh($Vu32,$Vv32,$Rt8):sat", (V6_vasrwuhsat HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32, IntRegsLow8:$Rt8)>;
|
||||
def V6_vasrwv_altAlias : InstAlias<"$Vd32=vasrw($Vu32,$Vv32)", (V6_vasrwv HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vavgh_altAlias : InstAlias<"$Vd32=vavgh($Vu32,$Vv32)", (V6_vavgh HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vavghrnd_altAlias : InstAlias<"$Vd32=vavgh($Vu32,$Vv32):rnd", (V6_vavghrnd HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vavgub_altAlias : InstAlias<"$Vd32=vavgub($Vu32,$Vv32)", (V6_vavgub HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vavgubrnd_altAlias : InstAlias<"$Vd32=vavgub($Vu32,$Vv32):rnd", (V6_vavgubrnd HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vavguh_altAlias : InstAlias<"$Vd32=vavguh($Vu32,$Vv32)", (V6_vavguh HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vavguhrnd_altAlias : InstAlias<"$Vd32=vavguh($Vu32,$Vv32):rnd", (V6_vavguhrnd HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vavgw_altAlias : InstAlias<"$Vd32=vavgw($Vu32,$Vv32)", (V6_vavgw HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vavgwrnd_altAlias : InstAlias<"$Vd32=vavgw($Vu32,$Vv32):rnd", (V6_vavgwrnd HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vcl0h_altAlias : InstAlias<"$Vd32=vcl0h($Vu32)", (V6_vcl0h HvxVR:$Vd32, HvxVR:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vcl0w_altAlias : InstAlias<"$Vd32=vcl0w($Vu32)", (V6_vcl0w HvxVR:$Vd32, HvxVR:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vd0Alias : InstAlias<"$Vd32=#0", (V6_vxor HvxVR:$Vd32, HvxVR:$Vd32, HvxVR:$Vd32)>, Requires<[UseHVX]>;
|
||||
def V6_vdd0Alias : InstAlias<"$Vdd32=#0", (V6_vsubw_dv HvxWR:$Vdd32, W15, W15)>, Requires<[UseHVX]>;
|
||||
def V6_vdealb4w_altAlias : InstAlias<"$Vd32=vdealb4w($Vu32,$Vv32)", (V6_vdealb4w HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vdealb_altAlias : InstAlias<"$Vd32=vdealb($Vu32)", (V6_vdealb HvxVR:$Vd32, HvxVR:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vdealh_altAlias : InstAlias<"$Vd32=vdealh($Vu32)", (V6_vdealh HvxVR:$Vd32, HvxVR:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpybus_acc_altAlias : InstAlias<"$Vx32+=vdmpybus($Vu32,$Rt32)", (V6_vdmpybus_acc HvxVR:$Vx32, HvxVR:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpybus_altAlias : InstAlias<"$Vd32=vdmpybus($Vu32,$Rt32)", (V6_vdmpybus HvxVR:$Vd32, HvxVR:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpybus_dv_acc_altAlias : InstAlias<"$Vxx32+=vdmpybus($Vuu32,$Rt32)", (V6_vdmpybus_dv_acc HvxWR:$Vxx32, HvxWR:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpybus_dv_altAlias : InstAlias<"$Vdd32=vdmpybus($Vuu32,$Rt32)", (V6_vdmpybus_dv HvxWR:$Vdd32, HvxWR:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhb_acc_altAlias : InstAlias<"$Vx32+=vdmpyhb($Vu32,$Rt32)", (V6_vdmpyhb_acc HvxVR:$Vx32, HvxVR:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhb_altAlias : InstAlias<"$Vd32=vdmpyhb($Vu32,$Rt32)", (V6_vdmpyhb HvxVR:$Vd32, HvxVR:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhb_dv_acc_altAlias : InstAlias<"$Vxx32+=vdmpyhb($Vuu32,$Rt32)", (V6_vdmpyhb_dv_acc HvxWR:$Vxx32, HvxWR:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhb_dv_altAlias : InstAlias<"$Vdd32=vdmpyhb($Vuu32,$Rt32)", (V6_vdmpyhb_dv HvxWR:$Vdd32, HvxWR:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhisat_acc_altAlias : InstAlias<"$Vx32+=vdmpyh($Vuu32,$Rt32):sat", (V6_vdmpyhisat_acc HvxVR:$Vx32, HvxWR:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhisat_altAlias : InstAlias<"$Vd32=vdmpyh($Vuu32,$Rt32):sat", (V6_vdmpyhisat HvxVR:$Vd32, HvxWR:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhsat_acc_altAlias : InstAlias<"$Vx32+=vdmpyh($Vu32,$Rt32):sat", (V6_vdmpyhsat_acc HvxVR:$Vx32, HvxVR:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhsat_altAlias : InstAlias<"$Vd32=vdmpyh($Vu32,$Rt32):sat", (V6_vdmpyhsat HvxVR:$Vd32, HvxVR:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhsuisat_acc_altAlias : InstAlias<"$Vx32+=vdmpyhsu($Vuu32,$Rt32,#1):sat", (V6_vdmpyhsuisat_acc HvxVR:$Vx32, HvxWR:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhsuisat_altAlias : InstAlias<"$Vd32=vdmpyhsu($Vuu32,$Rt32,#1):sat", (V6_vdmpyhsuisat HvxVR:$Vd32, HvxWR:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhsusat_acc_altAlias : InstAlias<"$Vx32+=vdmpyhsu($Vu32,$Rt32):sat", (V6_vdmpyhsusat_acc HvxVR:$Vx32, HvxVR:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhsusat_altAlias : InstAlias<"$Vd32=vdmpyhsu($Vu32,$Rt32):sat", (V6_vdmpyhsusat HvxVR:$Vd32, HvxVR:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhvsat_acc_altAlias : InstAlias<"$Vx32+=vdmpyh($Vu32,$Vv32):sat", (V6_vdmpyhvsat_acc HvxVR:$Vx32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vdmpyhvsat_altAlias : InstAlias<"$Vd32=vdmpyh($Vu32,$Vv32):sat", (V6_vdmpyhvsat HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vdsaduh_acc_altAlias : InstAlias<"$Vxx32+=vdsaduh($Vuu32,$Rt32)", (V6_vdsaduh_acc HvxWR:$Vxx32, HvxWR:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vdsaduh_altAlias : InstAlias<"$Vdd32=vdsaduh($Vuu32,$Rt32)", (V6_vdsaduh HvxWR:$Vdd32, HvxWR:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vlsrh_altAlias : InstAlias<"$Vd32=vlsrh($Vu32,$Rt32)", (V6_vlsrh HvxVR:$Vd32, HvxVR:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vlsrhv_altAlias : InstAlias<"$Vd32=vlsrh($Vu32,$Vv32)", (V6_vlsrhv HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vlsrw_altAlias : InstAlias<"$Vd32=vlsrw($Vu32,$Rt32)", (V6_vlsrw HvxVR:$Vd32, HvxVR:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vlsrwv_altAlias : InstAlias<"$Vd32=vlsrw($Vu32,$Vv32)", (V6_vlsrwv HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmaxh_altAlias : InstAlias<"$Vd32=vmaxh($Vu32,$Vv32)", (V6_vmaxh HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmaxub_altAlias : InstAlias<"$Vd32=vmaxub($Vu32,$Vv32)", (V6_vmaxub HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmaxuh_altAlias : InstAlias<"$Vd32=vmaxuh($Vu32,$Vv32)", (V6_vmaxuh HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmaxw_altAlias : InstAlias<"$Vd32=vmaxw($Vu32,$Vv32)", (V6_vmaxw HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vminh_altAlias : InstAlias<"$Vd32=vminh($Vu32,$Vv32)", (V6_vminh HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vminub_altAlias : InstAlias<"$Vd32=vminub($Vu32,$Vv32)", (V6_vminub HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vminuh_altAlias : InstAlias<"$Vd32=vminuh($Vu32,$Vv32)", (V6_vminuh HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vminw_altAlias : InstAlias<"$Vd32=vminw($Vu32,$Vv32)", (V6_vminw HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpabus_acc_altAlias : InstAlias<"$Vxx32+=vmpabus($Vuu32,$Rt32)", (V6_vmpabus_acc HvxWR:$Vxx32, HvxWR:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpabus_altAlias : InstAlias<"$Vdd32=vmpabus($Vuu32,$Rt32)", (V6_vmpabus HvxWR:$Vdd32, HvxWR:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpabusv_altAlias : InstAlias<"$Vdd32=vmpabus($Vuu32,$Vvv32)", (V6_vmpabusv HvxWR:$Vdd32, HvxWR:$Vuu32, HvxWR:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpabuuv_altAlias : InstAlias<"$Vdd32=vmpabuu($Vuu32,$Vvv32)", (V6_vmpabuuv HvxWR:$Vdd32, HvxWR:$Vuu32, HvxWR:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpahb_acc_altAlias : InstAlias<"$Vxx32+=vmpahb($Vuu32,$Rt32)", (V6_vmpahb_acc HvxWR:$Vxx32, HvxWR:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpahb_altAlias : InstAlias<"$Vdd32=vmpahb($Vuu32,$Rt32)", (V6_vmpahb HvxWR:$Vdd32, HvxWR:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpybus_acc_altAlias : InstAlias<"$Vxx32+=vmpybus($Vu32,$Rt32)", (V6_vmpybus_acc HvxWR:$Vxx32, HvxVR:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpybus_altAlias : InstAlias<"$Vdd32=vmpybus($Vu32,$Rt32)", (V6_vmpybus HvxWR:$Vdd32, HvxVR:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpybusv_acc_altAlias : InstAlias<"$Vxx32+=vmpybus($Vu32,$Vv32)", (V6_vmpybusv_acc HvxWR:$Vxx32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpybusv_altAlias : InstAlias<"$Vdd32=vmpybus($Vu32,$Vv32)", (V6_vmpybusv HvxWR:$Vdd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpybv_acc_altAlias : InstAlias<"$Vxx32+=vmpyb($Vu32,$Vv32)", (V6_vmpybv_acc HvxWR:$Vxx32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpybv_altAlias : InstAlias<"$Vdd32=vmpyb($Vu32,$Vv32)", (V6_vmpybv HvxWR:$Vdd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyewuh_altAlias : InstAlias<"$Vd32=vmpyewuh($Vu32,$Vv32)", (V6_vmpyewuh HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyh_altAlias : InstAlias<"$Vdd32=vmpyh($Vu32,$Rt32)", (V6_vmpyh HvxWR:$Vdd32, HvxVR:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyhsat_acc_altAlias : InstAlias<"$Vxx32+=vmpyh($Vu32,$Rt32):sat", (V6_vmpyhsat_acc HvxWR:$Vxx32, HvxVR:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyhsrs_altAlias : InstAlias<"$Vd32=vmpyh($Vu32,$Rt32):<<1:rnd:sat", (V6_vmpyhsrs HvxVR:$Vd32, HvxVR:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyhss_altAlias : InstAlias<"$Vd32=vmpyh($Vu32,$Rt32):<<1:sat", (V6_vmpyhss HvxVR:$Vd32, HvxVR:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyhus_acc_altAlias : InstAlias<"$Vxx32+=vmpyhus($Vu32,$Vv32)", (V6_vmpyhus_acc HvxWR:$Vxx32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyhus_altAlias : InstAlias<"$Vdd32=vmpyhus($Vu32,$Vv32)", (V6_vmpyhus HvxWR:$Vdd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyhv_acc_altAlias : InstAlias<"$Vxx32+=vmpyh($Vu32,$Vv32)", (V6_vmpyhv_acc HvxWR:$Vxx32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyhv_altAlias : InstAlias<"$Vdd32=vmpyh($Vu32,$Vv32)", (V6_vmpyhv HvxWR:$Vdd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyhvsrs_altAlias : InstAlias<"$Vd32=vmpyh($Vu32,$Vv32):<<1:rnd:sat", (V6_vmpyhvsrs HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyiewh_acc_altAlias : InstAlias<"$Vx32+=vmpyiewh($Vu32,$Vv32)", (V6_vmpyiewh_acc HvxVR:$Vx32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyiewuh_acc_altAlias : InstAlias<"$Vx32+=vmpyiewuh($Vu32,$Vv32)", (V6_vmpyiewuh_acc HvxVR:$Vx32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyiewuh_altAlias : InstAlias<"$Vd32=vmpyiewuh($Vu32,$Vv32)", (V6_vmpyiewuh HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyih_acc_altAlias : InstAlias<"$Vx32+=vmpyih($Vu32,$Vv32)", (V6_vmpyih_acc HvxVR:$Vx32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyih_altAlias : InstAlias<"$Vd32=vmpyih($Vu32,$Vv32)", (V6_vmpyih HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyihb_acc_altAlias : InstAlias<"$Vx32+=vmpyihb($Vu32,$Rt32)", (V6_vmpyihb_acc HvxVR:$Vx32, HvxVR:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyihb_altAlias : InstAlias<"$Vd32=vmpyihb($Vu32,$Rt32)", (V6_vmpyihb HvxVR:$Vd32, HvxVR:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyiowh_altAlias : InstAlias<"$Vd32=vmpyiowh($Vu32,$Vv32)", (V6_vmpyiowh HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyiwb_acc_altAlias : InstAlias<"$Vx32+=vmpyiwb($Vu32,$Rt32)", (V6_vmpyiwb_acc HvxVR:$Vx32, HvxVR:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyiwb_altAlias : InstAlias<"$Vd32=vmpyiwb($Vu32,$Rt32)", (V6_vmpyiwb HvxVR:$Vd32, HvxVR:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyiwh_acc_altAlias : InstAlias<"$Vx32+=vmpyiwh($Vu32,$Rt32)", (V6_vmpyiwh_acc HvxVR:$Vx32, HvxVR:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyiwh_altAlias : InstAlias<"$Vd32=vmpyiwh($Vu32,$Rt32)", (V6_vmpyiwh HvxVR:$Vd32, HvxVR:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyowh_altAlias : InstAlias<"$Vd32=vmpyowh($Vu32,$Vv32):<<1:sat", (V6_vmpyowh HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyowh_rnd_altAlias : InstAlias<"$Vd32=vmpyowh($Vu32,$Vv32):<<1:rnd:sat", (V6_vmpyowh_rnd HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyub_acc_altAlias : InstAlias<"$Vxx32+=vmpyub($Vu32,$Rt32)", (V6_vmpyub_acc HvxWR:$Vxx32, HvxVR:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyub_altAlias : InstAlias<"$Vdd32=vmpyub($Vu32,$Rt32)", (V6_vmpyub HvxWR:$Vdd32, HvxVR:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyubv_acc_altAlias : InstAlias<"$Vxx32+=vmpyub($Vu32,$Vv32)", (V6_vmpyubv_acc HvxWR:$Vxx32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyubv_altAlias : InstAlias<"$Vdd32=vmpyub($Vu32,$Vv32)", (V6_vmpyubv HvxWR:$Vdd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyuh_acc_altAlias : InstAlias<"$Vxx32+=vmpyuh($Vu32,$Rt32)", (V6_vmpyuh_acc HvxWR:$Vxx32, HvxVR:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyuh_altAlias : InstAlias<"$Vdd32=vmpyuh($Vu32,$Rt32)", (V6_vmpyuh HvxWR:$Vdd32, HvxVR:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyuhv_acc_altAlias : InstAlias<"$Vxx32+=vmpyuh($Vu32,$Vv32)", (V6_vmpyuhv_acc HvxWR:$Vxx32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vmpyuhv_altAlias : InstAlias<"$Vdd32=vmpyuh($Vu32,$Vv32)", (V6_vmpyuhv HvxWR:$Vdd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vnavgh_altAlias : InstAlias<"$Vd32=vnavgh($Vu32,$Vv32)", (V6_vnavgh HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vnavgub_altAlias : InstAlias<"$Vd32=vnavgub($Vu32,$Vv32)", (V6_vnavgub HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vnavgw_altAlias : InstAlias<"$Vd32=vnavgw($Vu32,$Vv32)", (V6_vnavgw HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vnormamth_altAlias : InstAlias<"$Vd32=vnormamth($Vu32)", (V6_vnormamth HvxVR:$Vd32, HvxVR:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vnormamtw_altAlias : InstAlias<"$Vd32=vnormamtw($Vu32)", (V6_vnormamtw HvxVR:$Vd32, HvxVR:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vpackeb_altAlias : InstAlias<"$Vd32=vpackeb($Vu32,$Vv32)", (V6_vpackeb HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vpackeh_altAlias : InstAlias<"$Vd32=vpackeh($Vu32,$Vv32)", (V6_vpackeh HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vpackhb_sat_altAlias : InstAlias<"$Vd32=vpackhb($Vu32,$Vv32):sat", (V6_vpackhb_sat HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vpackhub_sat_altAlias : InstAlias<"$Vd32=vpackhub($Vu32,$Vv32):sat", (V6_vpackhub_sat HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vpackob_altAlias : InstAlias<"$Vd32=vpackob($Vu32,$Vv32)", (V6_vpackob HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vpackoh_altAlias : InstAlias<"$Vd32=vpackoh($Vu32,$Vv32)", (V6_vpackoh HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vpackwh_sat_altAlias : InstAlias<"$Vd32=vpackwh($Vu32,$Vv32):sat", (V6_vpackwh_sat HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vpackwuh_sat_altAlias : InstAlias<"$Vd32=vpackwuh($Vu32,$Vv32):sat", (V6_vpackwuh_sat HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vpopcounth_altAlias : InstAlias<"$Vd32=vpopcounth($Vu32)", (V6_vpopcounth HvxVR:$Vd32, HvxVR:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpybus_acc_altAlias : InstAlias<"$Vx32+=vrmpybus($Vu32,$Rt32)", (V6_vrmpybus_acc HvxVR:$Vx32, HvxVR:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpybus_altAlias : InstAlias<"$Vd32=vrmpybus($Vu32,$Rt32)", (V6_vrmpybus HvxVR:$Vd32, HvxVR:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpybusi_acc_altAlias : InstAlias<"$Vxx32+=vrmpybus($Vuu32,$Rt32,#$Ii)", (V6_vrmpybusi_acc HvxWR:$Vxx32, HvxWR:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpybusi_altAlias : InstAlias<"$Vdd32=vrmpybus($Vuu32,$Rt32,#$Ii)", (V6_vrmpybusi HvxWR:$Vdd32, HvxWR:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpybusv_acc_altAlias : InstAlias<"$Vx32+=vrmpybus($Vu32,$Vv32)", (V6_vrmpybusv_acc HvxVR:$Vx32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpybusv_altAlias : InstAlias<"$Vd32=vrmpybus($Vu32,$Vv32)", (V6_vrmpybusv HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpybv_acc_altAlias : InstAlias<"$Vx32+=vrmpyb($Vu32,$Vv32)", (V6_vrmpybv_acc HvxVR:$Vx32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpybv_altAlias : InstAlias<"$Vd32=vrmpyb($Vu32,$Vv32)", (V6_vrmpybv HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpyub_acc_altAlias : InstAlias<"$Vx32+=vrmpyub($Vu32,$Rt32)", (V6_vrmpyub_acc HvxVR:$Vx32, HvxVR:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpyub_altAlias : InstAlias<"$Vd32=vrmpyub($Vu32,$Rt32)", (V6_vrmpyub HvxVR:$Vd32, HvxVR:$Vu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpyubi_acc_altAlias : InstAlias<"$Vxx32+=vrmpyub($Vuu32,$Rt32,#$Ii)", (V6_vrmpyubi_acc HvxWR:$Vxx32, HvxWR:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpyubi_altAlias : InstAlias<"$Vdd32=vrmpyub($Vuu32,$Rt32,#$Ii)", (V6_vrmpyubi HvxWR:$Vdd32, HvxWR:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpyubv_acc_altAlias : InstAlias<"$Vx32+=vrmpyub($Vu32,$Vv32)", (V6_vrmpyubv_acc HvxVR:$Vx32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vrmpyubv_altAlias : InstAlias<"$Vd32=vrmpyub($Vu32,$Vv32)", (V6_vrmpyubv HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vroundhb_altAlias : InstAlias<"$Vd32=vroundhb($Vu32,$Vv32):sat", (V6_vroundhb HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vroundhub_altAlias : InstAlias<"$Vd32=vroundhub($Vu32,$Vv32):sat", (V6_vroundhub HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vroundwh_altAlias : InstAlias<"$Vd32=vroundwh($Vu32,$Vv32):sat", (V6_vroundwh HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vroundwuh_altAlias : InstAlias<"$Vd32=vroundwuh($Vu32,$Vv32):sat", (V6_vroundwuh HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vrsadubi_acc_altAlias : InstAlias<"$Vxx32+=vrsadub($Vuu32,$Rt32,#$Ii)", (V6_vrsadubi_acc HvxWR:$Vxx32, HvxWR:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii)>, Requires<[UseHVX]>;
|
||||
def V6_vrsadubi_altAlias : InstAlias<"$Vdd32=vrsadub($Vuu32,$Rt32,#$Ii)", (V6_vrsadubi HvxWR:$Vdd32, HvxWR:$Vuu32, IntRegs:$Rt32, u1_0Imm:$Ii)>, Requires<[UseHVX]>;
|
||||
def V6_vsathub_altAlias : InstAlias<"$Vd32=vsathub($Vu32,$Vv32)", (V6_vsathub HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsatwh_altAlias : InstAlias<"$Vd32=vsatwh($Vu32,$Vv32)", (V6_vsatwh HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsb_altAlias : InstAlias<"$Vdd32=vsxtb($Vu32)", (V6_vsb HvxWR:$Vdd32, HvxVR:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vsh_altAlias : InstAlias<"$Vdd32=vsxth($Vu32)", (V6_vsh HvxWR:$Vdd32, HvxVR:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vshufeh_altAlias : InstAlias<"$Vd32=vshuffeh($Vu32,$Vv32)", (V6_vshufeh HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vshuffb_altAlias : InstAlias<"$Vd32=vshuffb($Vu32)", (V6_vshuffb HvxVR:$Vd32, HvxVR:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vshuffeb_altAlias : InstAlias<"$Vd32=vshuffeb($Vu32,$Vv32)", (V6_vshuffeb HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vshuffh_altAlias : InstAlias<"$Vd32=vshuffh($Vu32)", (V6_vshuffh HvxVR:$Vd32, HvxVR:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vshuffob_altAlias : InstAlias<"$Vd32=vshuffob($Vu32,$Vv32)", (V6_vshuffob HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vshufoeb_altAlias : InstAlias<"$Vdd32=vshuffoeb($Vu32,$Vv32)", (V6_vshufoeb HvxWR:$Vdd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vshufoeh_altAlias : InstAlias<"$Vdd32=vshuffoeh($Vu32,$Vv32)", (V6_vshufoeh HvxWR:$Vdd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vshufoh_altAlias : InstAlias<"$Vd32=vshuffoh($Vu32,$Vv32)", (V6_vshufoh HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubb_altAlias : InstAlias<"$Vd32=vsubb($Vu32,$Vv32)", (V6_vsubb HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubb_dv_altAlias : InstAlias<"$Vdd32=vsubb($Vuu32,$Vvv32)", (V6_vsubb_dv HvxWR:$Vdd32, HvxWR:$Vuu32, HvxWR:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubbnq_altAlias : InstAlias<"if (!$Qv4.b) $Vx32.b-=$Vu32.b", (V6_vsubbnq HvxVR:$Vx32, HvxQR:$Qv4, HvxVR:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubbq_altAlias : InstAlias<"if ($Qv4.b) $Vx32.b-=$Vu32.b", (V6_vsubbq HvxVR:$Vx32, HvxQR:$Qv4, HvxVR:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubh_altAlias : InstAlias<"$Vd32=vsubh($Vu32,$Vv32)", (V6_vsubh HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubh_dv_altAlias : InstAlias<"$Vdd32=vsubh($Vuu32,$Vvv32)", (V6_vsubh_dv HvxWR:$Vdd32, HvxWR:$Vuu32, HvxWR:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubhnq_altAlias : InstAlias<"if (!$Qv4.h) $Vx32.h-=$Vu32.h", (V6_vsubhnq HvxVR:$Vx32, HvxQR:$Qv4, HvxVR:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubhq_altAlias : InstAlias<"if ($Qv4.h) $Vx32.h-=$Vu32.h", (V6_vsubhq HvxVR:$Vx32, HvxQR:$Qv4, HvxVR:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubhsat_altAlias : InstAlias<"$Vd32=vsubh($Vu32,$Vv32):sat", (V6_vsubhsat HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubhsat_dv_altAlias : InstAlias<"$Vdd32=vsubh($Vuu32,$Vvv32):sat", (V6_vsubhsat_dv HvxWR:$Vdd32, HvxWR:$Vuu32, HvxWR:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubhw_altAlias : InstAlias<"$Vdd32=vsubh($Vu32,$Vv32)", (V6_vsubhw HvxWR:$Vdd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsububh_altAlias : InstAlias<"$Vdd32=vsubub($Vu32,$Vv32)", (V6_vsububh HvxWR:$Vdd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsububsat_altAlias : InstAlias<"$Vd32=vsubub($Vu32,$Vv32):sat", (V6_vsububsat HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsububsat_dv_altAlias : InstAlias<"$Vdd32=vsubub($Vuu32,$Vvv32):sat", (V6_vsububsat_dv HvxWR:$Vdd32, HvxWR:$Vuu32, HvxWR:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubuhsat_altAlias : InstAlias<"$Vd32=vsubuh($Vu32,$Vv32):sat", (V6_vsubuhsat HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubuhsat_dv_altAlias : InstAlias<"$Vdd32=vsubuh($Vuu32,$Vvv32):sat", (V6_vsubuhsat_dv HvxWR:$Vdd32, HvxWR:$Vuu32, HvxWR:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubuhw_altAlias : InstAlias<"$Vdd32=vsubuh($Vu32,$Vv32)", (V6_vsubuhw HvxWR:$Vdd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubw_altAlias : InstAlias<"$Vd32=vsubw($Vu32,$Vv32)", (V6_vsubw HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubw_dv_altAlias : InstAlias<"$Vdd32=vsubw($Vuu32,$Vvv32)", (V6_vsubw_dv HvxWR:$Vdd32, HvxWR:$Vuu32, HvxWR:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubwnq_altAlias : InstAlias<"if (!$Qv4.w) $Vx32.w-=$Vu32.w", (V6_vsubwnq HvxVR:$Vx32, HvxQR:$Qv4, HvxVR:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubwq_altAlias : InstAlias<"if ($Qv4.w) $Vx32.w-=$Vu32.w", (V6_vsubwq HvxVR:$Vx32, HvxQR:$Qv4, HvxVR:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubwsat_altAlias : InstAlias<"$Vd32=vsubw($Vu32,$Vv32):sat", (V6_vsubwsat HvxVR:$Vd32, HvxVR:$Vu32, HvxVR:$Vv32)>, Requires<[UseHVX]>;
|
||||
def V6_vsubwsat_dv_altAlias : InstAlias<"$Vdd32=vsubw($Vuu32,$Vvv32):sat", (V6_vsubwsat_dv HvxWR:$Vdd32, HvxWR:$Vuu32, HvxWR:$Vvv32)>, Requires<[UseHVX]>;
|
||||
def V6_vtmpyb_acc_altAlias : InstAlias<"$Vxx32+=vtmpyb($Vuu32,$Rt32)", (V6_vtmpyb_acc HvxWR:$Vxx32, HvxWR:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vtmpyb_altAlias : InstAlias<"$Vdd32=vtmpyb($Vuu32,$Rt32)", (V6_vtmpyb HvxWR:$Vdd32, HvxWR:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vtmpybus_acc_altAlias : InstAlias<"$Vxx32+=vtmpybus($Vuu32,$Rt32)", (V6_vtmpybus_acc HvxWR:$Vxx32, HvxWR:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vtmpybus_altAlias : InstAlias<"$Vdd32=vtmpybus($Vuu32,$Rt32)", (V6_vtmpybus HvxWR:$Vdd32, HvxWR:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vtmpyhb_acc_altAlias : InstAlias<"$Vxx32+=vtmpyhb($Vuu32,$Rt32)", (V6_vtmpyhb_acc HvxWR:$Vxx32, HvxWR:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vtmpyhb_altAlias : InstAlias<"$Vdd32=vtmpyhb($Vuu32,$Rt32)", (V6_vtmpyhb HvxWR:$Vdd32, HvxWR:$Vuu32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vtran2x2_mapAlias : InstAlias<"vtrans2x2($Vy32,$Vx32,$Rt32)", (V6_vshuff HvxVR:$Vy32, HvxVR:$Vx32, IntRegs:$Rt32)>, Requires<[UseHVX]>;
|
||||
def V6_vunpackb_altAlias : InstAlias<"$Vdd32=vunpackb($Vu32)", (V6_vunpackb HvxWR:$Vdd32, HvxVR:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vunpackh_altAlias : InstAlias<"$Vdd32=vunpackh($Vu32)", (V6_vunpackh HvxWR:$Vdd32, HvxVR:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vunpackoh_altAlias : InstAlias<"$Vxx32|=vunpackoh($Vu32)", (V6_vunpackoh HvxWR:$Vxx32, HvxVR:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vunpackub_altAlias : InstAlias<"$Vdd32=vunpackub($Vu32)", (V6_vunpackub HvxWR:$Vdd32, HvxVR:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vunpackuh_altAlias : InstAlias<"$Vdd32=vunpackuh($Vu32)", (V6_vunpackuh HvxWR:$Vdd32, HvxVR:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vzb_altAlias : InstAlias<"$Vdd32=vzxtb($Vu32)", (V6_vzb HvxWR:$Vdd32, HvxVR:$Vu32)>, Requires<[UseHVX]>;
|
||||
def V6_vzh_altAlias : InstAlias<"$Vdd32=vzxth($Vu32)", (V6_vzh HvxWR:$Vdd32, HvxVR:$Vu32)>, Requires<[UseHVX]>;
|
||||
def Y2_dcfetchAlias : InstAlias<"dcfetch($Rs32)", (Y2_dcfetchbo IntRegs:$Rs32, 0)>;
|
||||
|
@ -389,8 +389,7 @@ bool HexagonEarlyIfConversion::isValidCandidate(const MachineBasicBlock *B)
|
||||
continue;
|
||||
switch (MRI->getRegClass(R)->getID()) {
|
||||
case Hexagon::PredRegsRegClassID:
|
||||
case Hexagon::VecPredRegsRegClassID:
|
||||
case Hexagon::VecPredRegs128BRegClassID:
|
||||
case Hexagon::HvxQRRegClassID:
|
||||
break;
|
||||
default:
|
||||
continue;
|
||||
@ -771,18 +770,12 @@ unsigned HexagonEarlyIfConversion::buildMux(MachineBasicBlock *B,
|
||||
case Hexagon::DoubleRegsRegClassID:
|
||||
Opc = Hexagon::PS_pselect;
|
||||
break;
|
||||
case Hexagon::VectorRegsRegClassID:
|
||||
case Hexagon::HvxVRRegClassID:
|
||||
Opc = Hexagon::PS_vselect;
|
||||
break;
|
||||
case Hexagon::VecDblRegsRegClassID:
|
||||
case Hexagon::HvxWRRegClassID:
|
||||
Opc = Hexagon::PS_wselect;
|
||||
break;
|
||||
case Hexagon::VectorRegs128BRegClassID:
|
||||
Opc = Hexagon::PS_vselect_128B;
|
||||
break;
|
||||
case Hexagon::VecDblRegs128BRegClassID:
|
||||
Opc = Hexagon::PS_wselect_128B;
|
||||
break;
|
||||
default:
|
||||
llvm_unreachable("unexpected register type");
|
||||
}
|
||||
|
@ -87,6 +87,7 @@
|
||||
// to be added, and updating the live ranges will be more involved.
|
||||
|
||||
#include "HexagonInstrInfo.h"
|
||||
#include "HexagonRegisterInfo.h"
|
||||
#include "llvm/ADT/DenseMap.h"
|
||||
#include "llvm/ADT/SetVector.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
|
@ -400,8 +400,7 @@ void HexagonFrameLowering::findShrunkPrologEpilog(MachineFunction &MF,
|
||||
ShrinkCounter++;
|
||||
}
|
||||
|
||||
auto &HST = MF.getSubtarget<HexagonSubtarget>();
|
||||
auto &HRI = *HST.getRegisterInfo();
|
||||
auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
|
||||
|
||||
MachineDominatorTree MDT;
|
||||
MDT.runOnMachineFunction(MF);
|
||||
@ -498,8 +497,7 @@ void HexagonFrameLowering::findShrunkPrologEpilog(MachineFunction &MF,
|
||||
/// in one place allows shrink-wrapping of the stack frame.
|
||||
void HexagonFrameLowering::emitPrologue(MachineFunction &MF,
|
||||
MachineBasicBlock &MBB) const {
|
||||
auto &HST = MF.getSubtarget<HexagonSubtarget>();
|
||||
auto &HRI = *HST.getRegisterInfo();
|
||||
auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
|
||||
|
||||
MachineFrameInfo &MFI = MF.getFrameInfo();
|
||||
const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
|
||||
@ -1603,7 +1601,6 @@ bool HexagonFrameLowering::expandLoadInt(MachineBasicBlock &B,
|
||||
bool HexagonFrameLowering::expandStoreVecPred(MachineBasicBlock &B,
|
||||
MachineBasicBlock::iterator It, MachineRegisterInfo &MRI,
|
||||
const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
|
||||
auto &HST = B.getParent()->getSubtarget<HexagonSubtarget>();
|
||||
MachineInstr *MI = &*It;
|
||||
if (!MI->getOperand(0).isFI())
|
||||
return false;
|
||||
@ -1612,10 +1609,7 @@ bool HexagonFrameLowering::expandStoreVecPred(MachineBasicBlock &B,
|
||||
unsigned SrcR = MI->getOperand(2).getReg();
|
||||
bool IsKill = MI->getOperand(2).isKill();
|
||||
int FI = MI->getOperand(0).getIndex();
|
||||
|
||||
bool Is128B = HST.useHVXDblOps();
|
||||
auto *RC = !Is128B ? &Hexagon::VectorRegsRegClass
|
||||
: &Hexagon::VectorRegs128BRegClass;
|
||||
auto *RC = &Hexagon::HvxVRRegClass;
|
||||
|
||||
// Insert transfer to general vector register.
|
||||
// TmpR0 = A2_tfrsi 0x01010101
|
||||
@ -1627,8 +1621,7 @@ bool HexagonFrameLowering::expandStoreVecPred(MachineBasicBlock &B,
|
||||
BuildMI(B, It, DL, HII.get(Hexagon::A2_tfrsi), TmpR0)
|
||||
.addImm(0x01010101);
|
||||
|
||||
unsigned VandOpc = !Is128B ? Hexagon::V6_vandqrt : Hexagon::V6_vandqrt_128B;
|
||||
BuildMI(B, It, DL, HII.get(VandOpc), TmpR1)
|
||||
BuildMI(B, It, DL, HII.get(Hexagon::V6_vandqrt), TmpR1)
|
||||
.addReg(SrcR, getKillRegState(IsKill))
|
||||
.addReg(TmpR0, RegState::Kill);
|
||||
|
||||
@ -1645,7 +1638,6 @@ bool HexagonFrameLowering::expandStoreVecPred(MachineBasicBlock &B,
|
||||
bool HexagonFrameLowering::expandLoadVecPred(MachineBasicBlock &B,
|
||||
MachineBasicBlock::iterator It, MachineRegisterInfo &MRI,
|
||||
const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
|
||||
auto &HST = B.getParent()->getSubtarget<HexagonSubtarget>();
|
||||
MachineInstr *MI = &*It;
|
||||
if (!MI->getOperand(1).isFI())
|
||||
return false;
|
||||
@ -1653,10 +1645,7 @@ bool HexagonFrameLowering::expandLoadVecPred(MachineBasicBlock &B,
|
||||
DebugLoc DL = MI->getDebugLoc();
|
||||
unsigned DstR = MI->getOperand(0).getReg();
|
||||
int FI = MI->getOperand(1).getIndex();
|
||||
|
||||
bool Is128B = HST.useHVXDblOps();
|
||||
auto *RC = !Is128B ? &Hexagon::VectorRegsRegClass
|
||||
: &Hexagon::VectorRegs128BRegClass;
|
||||
auto *RC = &Hexagon::HvxVRRegClass;
|
||||
|
||||
// TmpR0 = A2_tfrsi 0x01010101
|
||||
// TmpR1 = load FI, 0
|
||||
@ -1666,12 +1655,12 @@ bool HexagonFrameLowering::expandLoadVecPred(MachineBasicBlock &B,
|
||||
|
||||
BuildMI(B, It, DL, HII.get(Hexagon::A2_tfrsi), TmpR0)
|
||||
.addImm(0x01010101);
|
||||
auto *HRI = B.getParent()->getSubtarget<HexagonSubtarget>().getRegisterInfo();
|
||||
MachineFunction &MF = *B.getParent();
|
||||
auto *HRI = MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
|
||||
HII.loadRegFromStackSlot(B, It, TmpR1, FI, RC, HRI);
|
||||
expandLoadVec(B, std::prev(It), MRI, HII, NewRegs);
|
||||
|
||||
unsigned VandOpc = !Is128B ? Hexagon::V6_vandvrt : Hexagon::V6_vandvrt_128B;
|
||||
BuildMI(B, It, DL, HII.get(VandOpc), DstR)
|
||||
BuildMI(B, It, DL, HII.get(Hexagon::V6_vandvrt), DstR)
|
||||
.addReg(TmpR1, RegState::Kill)
|
||||
.addReg(TmpR0, RegState::Kill);
|
||||
|
||||
@ -1685,7 +1674,6 @@ bool HexagonFrameLowering::expandStoreVec2(MachineBasicBlock &B,
|
||||
MachineBasicBlock::iterator It, MachineRegisterInfo &MRI,
|
||||
const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
|
||||
MachineFunction &MF = *B.getParent();
|
||||
auto &HST = MF.getSubtarget<HexagonSubtarget>();
|
||||
auto &MFI = MF.getFrameInfo();
|
||||
auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
|
||||
MachineInstr *MI = &*It;
|
||||
@ -1716,21 +1704,15 @@ bool HexagonFrameLowering::expandStoreVec2(MachineBasicBlock &B,
|
||||
bool IsKill = MI->getOperand(2).isKill();
|
||||
int FI = MI->getOperand(0).getIndex();
|
||||
|
||||
bool Is128B = HST.useHVXDblOps();
|
||||
const auto &RC = !Is128B ? Hexagon::VectorRegsRegClass
|
||||
: Hexagon::VectorRegs128BRegClass;
|
||||
unsigned Size = HRI.getSpillSize(RC);
|
||||
unsigned NeedAlign = HRI.getSpillAlignment(RC);
|
||||
unsigned Size = HRI.getSpillSize(Hexagon::HvxVRRegClass);
|
||||
unsigned NeedAlign = HRI.getSpillAlignment(Hexagon::HvxVRRegClass);
|
||||
unsigned HasAlign = MFI.getObjectAlignment(FI);
|
||||
unsigned StoreOpc;
|
||||
|
||||
// Store low part.
|
||||
if (LPR.contains(SrcLo)) {
|
||||
if (NeedAlign <= HasAlign)
|
||||
StoreOpc = !Is128B ? Hexagon::V6_vS32b_ai : Hexagon::V6_vS32b_ai_128B;
|
||||
else
|
||||
StoreOpc = !Is128B ? Hexagon::V6_vS32Ub_ai : Hexagon::V6_vS32Ub_ai_128B;
|
||||
|
||||
StoreOpc = NeedAlign <= HasAlign ? Hexagon::V6_vS32b_ai
|
||||
: Hexagon::V6_vS32Ub_ai;
|
||||
BuildMI(B, It, DL, HII.get(StoreOpc))
|
||||
.addFrameIndex(FI)
|
||||
.addImm(0)
|
||||
@ -1740,11 +1722,8 @@ bool HexagonFrameLowering::expandStoreVec2(MachineBasicBlock &B,
|
||||
|
||||
// Store high part.
|
||||
if (LPR.contains(SrcHi)) {
|
||||
if (NeedAlign <= MinAlign(HasAlign, Size))
|
||||
StoreOpc = !Is128B ? Hexagon::V6_vS32b_ai : Hexagon::V6_vS32b_ai_128B;
|
||||
else
|
||||
StoreOpc = !Is128B ? Hexagon::V6_vS32Ub_ai : Hexagon::V6_vS32Ub_ai_128B;
|
||||
|
||||
StoreOpc = NeedAlign <= MinAlign(HasAlign, Size) ? Hexagon::V6_vS32b_ai
|
||||
: Hexagon::V6_vS32Ub_ai;
|
||||
BuildMI(B, It, DL, HII.get(StoreOpc))
|
||||
.addFrameIndex(FI)
|
||||
.addImm(Size)
|
||||
@ -1760,7 +1739,6 @@ bool HexagonFrameLowering::expandLoadVec2(MachineBasicBlock &B,
|
||||
MachineBasicBlock::iterator It, MachineRegisterInfo &MRI,
|
||||
const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
|
||||
MachineFunction &MF = *B.getParent();
|
||||
auto &HST = MF.getSubtarget<HexagonSubtarget>();
|
||||
auto &MFI = MF.getFrameInfo();
|
||||
auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
|
||||
MachineInstr *MI = &*It;
|
||||
@ -1773,31 +1751,22 @@ bool HexagonFrameLowering::expandLoadVec2(MachineBasicBlock &B,
|
||||
unsigned DstLo = HRI.getSubReg(DstR, Hexagon::vsub_lo);
|
||||
int FI = MI->getOperand(1).getIndex();
|
||||
|
||||
bool Is128B = HST.useHVXDblOps();
|
||||
const auto &RC = !Is128B ? Hexagon::VectorRegsRegClass
|
||||
: Hexagon::VectorRegs128BRegClass;
|
||||
unsigned Size = HRI.getSpillSize(RC);
|
||||
unsigned NeedAlign = HRI.getSpillAlignment(RC);
|
||||
unsigned Size = HRI.getSpillSize(Hexagon::HvxVRRegClass);
|
||||
unsigned NeedAlign = HRI.getSpillAlignment(Hexagon::HvxVRRegClass);
|
||||
unsigned HasAlign = MFI.getObjectAlignment(FI);
|
||||
unsigned LoadOpc;
|
||||
|
||||
// Load low part.
|
||||
if (NeedAlign <= HasAlign)
|
||||
LoadOpc = !Is128B ? Hexagon::V6_vL32b_ai : Hexagon::V6_vL32b_ai_128B;
|
||||
else
|
||||
LoadOpc = !Is128B ? Hexagon::V6_vL32Ub_ai : Hexagon::V6_vL32Ub_ai_128B;
|
||||
|
||||
LoadOpc = NeedAlign <= HasAlign ? Hexagon::V6_vL32b_ai
|
||||
: Hexagon::V6_vL32Ub_ai;
|
||||
BuildMI(B, It, DL, HII.get(LoadOpc), DstLo)
|
||||
.addFrameIndex(FI)
|
||||
.addImm(0)
|
||||
.setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
|
||||
|
||||
// Load high part.
|
||||
if (NeedAlign <= MinAlign(HasAlign, Size))
|
||||
LoadOpc = !Is128B ? Hexagon::V6_vL32b_ai : Hexagon::V6_vL32b_ai_128B;
|
||||
else
|
||||
LoadOpc = !Is128B ? Hexagon::V6_vL32Ub_ai : Hexagon::V6_vL32Ub_ai_128B;
|
||||
|
||||
LoadOpc = NeedAlign <= MinAlign(HasAlign, Size) ? Hexagon::V6_vL32b_ai
|
||||
: Hexagon::V6_vL32Ub_ai;
|
||||
BuildMI(B, It, DL, HII.get(LoadOpc), DstHi)
|
||||
.addFrameIndex(FI)
|
||||
.addImm(Size)
|
||||
@ -1811,30 +1780,21 @@ bool HexagonFrameLowering::expandStoreVec(MachineBasicBlock &B,
|
||||
MachineBasicBlock::iterator It, MachineRegisterInfo &MRI,
|
||||
const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
|
||||
MachineFunction &MF = *B.getParent();
|
||||
auto &HST = MF.getSubtarget<HexagonSubtarget>();
|
||||
auto &MFI = MF.getFrameInfo();
|
||||
MachineInstr *MI = &*It;
|
||||
if (!MI->getOperand(0).isFI())
|
||||
return false;
|
||||
|
||||
auto &HRI = *HST.getRegisterInfo();
|
||||
auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
|
||||
DebugLoc DL = MI->getDebugLoc();
|
||||
unsigned SrcR = MI->getOperand(2).getReg();
|
||||
bool IsKill = MI->getOperand(2).isKill();
|
||||
int FI = MI->getOperand(0).getIndex();
|
||||
|
||||
bool Is128B = HST.useHVXDblOps();
|
||||
const auto &RC = !Is128B ? Hexagon::VectorRegsRegClass
|
||||
: Hexagon::VectorRegs128BRegClass;
|
||||
unsigned NeedAlign = HRI.getSpillAlignment(RC);
|
||||
unsigned NeedAlign = HRI.getSpillAlignment(Hexagon::HvxVRRegClass);
|
||||
unsigned HasAlign = MFI.getObjectAlignment(FI);
|
||||
unsigned StoreOpc;
|
||||
|
||||
if (NeedAlign <= HasAlign)
|
||||
StoreOpc = !Is128B ? Hexagon::V6_vS32b_ai : Hexagon::V6_vS32b_ai_128B;
|
||||
else
|
||||
StoreOpc = !Is128B ? Hexagon::V6_vS32Ub_ai : Hexagon::V6_vS32Ub_ai_128B;
|
||||
|
||||
unsigned StoreOpc = NeedAlign <= HasAlign ? Hexagon::V6_vS32b_ai
|
||||
: Hexagon::V6_vS32Ub_ai;
|
||||
BuildMI(B, It, DL, HII.get(StoreOpc))
|
||||
.addFrameIndex(FI)
|
||||
.addImm(0)
|
||||
@ -1849,29 +1809,20 @@ bool HexagonFrameLowering::expandLoadVec(MachineBasicBlock &B,
|
||||
MachineBasicBlock::iterator It, MachineRegisterInfo &MRI,
|
||||
const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
|
||||
MachineFunction &MF = *B.getParent();
|
||||
auto &HST = MF.getSubtarget<HexagonSubtarget>();
|
||||
auto &MFI = MF.getFrameInfo();
|
||||
MachineInstr *MI = &*It;
|
||||
if (!MI->getOperand(1).isFI())
|
||||
return false;
|
||||
|
||||
auto &HRI = *HST.getRegisterInfo();
|
||||
auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
|
||||
DebugLoc DL = MI->getDebugLoc();
|
||||
unsigned DstR = MI->getOperand(0).getReg();
|
||||
int FI = MI->getOperand(1).getIndex();
|
||||
|
||||
bool Is128B = HST.useHVXDblOps();
|
||||
const auto &RC = !Is128B ? Hexagon::VectorRegsRegClass
|
||||
: Hexagon::VectorRegs128BRegClass;
|
||||
unsigned NeedAlign = HRI.getSpillAlignment(RC);
|
||||
unsigned NeedAlign = HRI.getSpillAlignment(Hexagon::HvxVRRegClass);
|
||||
unsigned HasAlign = MFI.getObjectAlignment(FI);
|
||||
unsigned LoadOpc;
|
||||
|
||||
if (NeedAlign <= HasAlign)
|
||||
LoadOpc = !Is128B ? Hexagon::V6_vL32b_ai : Hexagon::V6_vL32b_ai_128B;
|
||||
else
|
||||
LoadOpc = !Is128B ? Hexagon::V6_vL32Ub_ai : Hexagon::V6_vL32Ub_ai_128B;
|
||||
|
||||
unsigned LoadOpc = NeedAlign <= HasAlign ? Hexagon::V6_vL32b_ai
|
||||
: Hexagon::V6_vL32Ub_ai;
|
||||
BuildMI(B, It, DL, HII.get(LoadOpc), DstR)
|
||||
.addFrameIndex(FI)
|
||||
.addImm(0)
|
||||
@ -1883,8 +1834,7 @@ bool HexagonFrameLowering::expandLoadVec(MachineBasicBlock &B,
|
||||
|
||||
bool HexagonFrameLowering::expandSpillMacros(MachineFunction &MF,
|
||||
SmallVectorImpl<unsigned> &NewRegs) const {
|
||||
auto &HST = MF.getSubtarget<HexagonSubtarget>();
|
||||
auto &HII = *HST.getInstrInfo();
|
||||
auto &HII = *MF.getSubtarget<HexagonSubtarget>().getInstrInfo();
|
||||
MachineRegisterInfo &MRI = MF.getRegInfo();
|
||||
bool Changed = false;
|
||||
|
||||
@ -1909,23 +1859,17 @@ bool HexagonFrameLowering::expandSpillMacros(MachineFunction &MF,
|
||||
Changed |= expandLoadInt(B, I, MRI, HII, NewRegs);
|
||||
break;
|
||||
case Hexagon::PS_vstorerq_ai:
|
||||
case Hexagon::PS_vstorerq_ai_128B:
|
||||
Changed |= expandStoreVecPred(B, I, MRI, HII, NewRegs);
|
||||
break;
|
||||
case Hexagon::PS_vloadrq_ai:
|
||||
case Hexagon::PS_vloadrq_ai_128B:
|
||||
Changed |= expandLoadVecPred(B, I, MRI, HII, NewRegs);
|
||||
break;
|
||||
case Hexagon::PS_vloadrw_ai:
|
||||
case Hexagon::PS_vloadrwu_ai:
|
||||
case Hexagon::PS_vloadrw_ai_128B:
|
||||
case Hexagon::PS_vloadrwu_ai_128B:
|
||||
Changed |= expandLoadVec2(B, I, MRI, HII, NewRegs);
|
||||
break;
|
||||
case Hexagon::PS_vstorerw_ai:
|
||||
case Hexagon::PS_vstorerwu_ai:
|
||||
case Hexagon::PS_vstorerw_ai_128B:
|
||||
case Hexagon::PS_vstorerwu_ai_128B:
|
||||
Changed |= expandStoreVec2(B, I, MRI, HII, NewRegs);
|
||||
break;
|
||||
}
|
||||
@ -1938,8 +1882,7 @@ bool HexagonFrameLowering::expandSpillMacros(MachineFunction &MF,
|
||||
void HexagonFrameLowering::determineCalleeSaves(MachineFunction &MF,
|
||||
BitVector &SavedRegs,
|
||||
RegScavenger *RS) const {
|
||||
auto &HST = MF.getSubtarget<HexagonSubtarget>();
|
||||
auto &HRI = *HST.getRegisterInfo();
|
||||
auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
|
||||
|
||||
SavedRegs.resize(HRI.getNumRegs());
|
||||
|
||||
|
@ -1238,7 +1238,7 @@ bool HexagonHardwareLoops::convertToHardwareLoop(MachineLoop *L,
|
||||
// if the immediate fits in the instructions. Otherwise, we need to
|
||||
// create a new virtual register.
|
||||
int64_t CountImm = TripCount->getImm();
|
||||
if (!TII->isValidOffset(LOOP_i, CountImm)) {
|
||||
if (!TII->isValidOffset(LOOP_i, CountImm, TRI)) {
|
||||
unsigned CountReg = MRI->createVirtualRegister(&Hexagon::IntRegsRegClass);
|
||||
BuildMI(*Preheader, InsertPos, DL, TII->get(Hexagon::A2_tfrsi), CountReg)
|
||||
.addImm(CountImm);
|
||||
|
@ -241,6 +241,10 @@ void HexagonDAGToDAGISel::SelectIndexedLoad(LoadSDNode *LD, const SDLoc &dl) {
|
||||
case MVT::v32i16:
|
||||
case MVT::v16i32:
|
||||
case MVT::v8i64:
|
||||
case MVT::v128i8:
|
||||
case MVT::v64i16:
|
||||
case MVT::v32i32:
|
||||
case MVT::v16i64:
|
||||
if (isAlignedMemNode(LD)) {
|
||||
if (LD->isNonTemporal())
|
||||
Opcode = IsValidInc ? Hexagon::V6_vL32b_nt_pi : Hexagon::V6_vL32b_nt_ai;
|
||||
@ -250,23 +254,6 @@ void HexagonDAGToDAGISel::SelectIndexedLoad(LoadSDNode *LD, const SDLoc &dl) {
|
||||
Opcode = IsValidInc ? Hexagon::V6_vL32Ub_pi : Hexagon::V6_vL32Ub_ai;
|
||||
}
|
||||
break;
|
||||
// 128B
|
||||
case MVT::v128i8:
|
||||
case MVT::v64i16:
|
||||
case MVT::v32i32:
|
||||
case MVT::v16i64:
|
||||
if (isAlignedMemNode(LD)) {
|
||||
if (LD->isNonTemporal())
|
||||
Opcode = IsValidInc ? Hexagon::V6_vL32b_nt_pi_128B
|
||||
: Hexagon::V6_vL32b_nt_ai_128B;
|
||||
else
|
||||
Opcode = IsValidInc ? Hexagon::V6_vL32b_pi_128B
|
||||
: Hexagon::V6_vL32b_ai_128B;
|
||||
} else {
|
||||
Opcode = IsValidInc ? Hexagon::V6_vL32Ub_pi_128B
|
||||
: Hexagon::V6_vL32Ub_ai_128B;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
llvm_unreachable("Unexpected memory type in indexed load");
|
||||
}
|
||||
@ -533,11 +520,14 @@ void HexagonDAGToDAGISel::SelectIndexedStore(StoreSDNode *ST, const SDLoc &dl) {
|
||||
case MVT::i64:
|
||||
Opcode = IsValidInc ? Hexagon::S2_storerd_pi : Hexagon::S2_storerd_io;
|
||||
break;
|
||||
// 64B
|
||||
case MVT::v64i8:
|
||||
case MVT::v32i16:
|
||||
case MVT::v16i32:
|
||||
case MVT::v8i64:
|
||||
case MVT::v128i8:
|
||||
case MVT::v64i16:
|
||||
case MVT::v32i32:
|
||||
case MVT::v16i64:
|
||||
if (isAlignedMemNode(ST)) {
|
||||
if (ST->isNonTemporal())
|
||||
Opcode = IsValidInc ? Hexagon::V6_vS32b_nt_pi : Hexagon::V6_vS32b_nt_ai;
|
||||
@ -547,23 +537,6 @@ void HexagonDAGToDAGISel::SelectIndexedStore(StoreSDNode *ST, const SDLoc &dl) {
|
||||
Opcode = IsValidInc ? Hexagon::V6_vS32Ub_pi : Hexagon::V6_vS32Ub_ai;
|
||||
}
|
||||
break;
|
||||
// 128B
|
||||
case MVT::v128i8:
|
||||
case MVT::v64i16:
|
||||
case MVT::v32i32:
|
||||
case MVT::v16i64:
|
||||
if (isAlignedMemNode(ST)) {
|
||||
if (ST->isNonTemporal())
|
||||
Opcode = IsValidInc ? Hexagon::V6_vS32b_nt_pi_128B
|
||||
: Hexagon::V6_vS32b_nt_ai_128B;
|
||||
else
|
||||
Opcode = IsValidInc ? Hexagon::V6_vS32b_pi_128B
|
||||
: Hexagon::V6_vS32b_ai_128B;
|
||||
} else {
|
||||
Opcode = IsValidInc ? Hexagon::V6_vS32Ub_pi_128B
|
||||
: Hexagon::V6_vS32Ub_ai_128B;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
llvm_unreachable("Unexpected memory type in indexed store");
|
||||
}
|
||||
|
@ -381,7 +381,6 @@ static bool CC_HexagonVector(unsigned ValNo, MVT ValVT,
|
||||
State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
|
||||
return false;
|
||||
}
|
||||
// 128B Mode
|
||||
if ((UseHVX && UseHVXDbl) &&
|
||||
(LocVT == MVT::v32i64 || LocVT == MVT::v64i32 || LocVT == MVT::v128i16 ||
|
||||
LocVT == MVT::v256i8)) {
|
||||
@ -1191,14 +1190,14 @@ SDValue HexagonTargetLowering::LowerFormalArguments(
|
||||
} else if ((RegVT == MVT::v8i64 || RegVT == MVT::v16i32 ||
|
||||
RegVT == MVT::v32i16 || RegVT == MVT::v64i8)) {
|
||||
unsigned VReg =
|
||||
RegInfo.createVirtualRegister(&Hexagon::VectorRegsRegClass);
|
||||
RegInfo.createVirtualRegister(&Hexagon::HvxVRRegClass);
|
||||
RegInfo.addLiveIn(VA.getLocReg(), VReg);
|
||||
InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
|
||||
} else if (UseHVX && UseHVXDbl &&
|
||||
((RegVT == MVT::v16i64 || RegVT == MVT::v32i32 ||
|
||||
RegVT == MVT::v64i16 || RegVT == MVT::v128i8))) {
|
||||
unsigned VReg =
|
||||
RegInfo.createVirtualRegister(&Hexagon::VectorRegs128BRegClass);
|
||||
RegInfo.createVirtualRegister(&Hexagon::HvxVRRegClass);
|
||||
RegInfo.addLiveIn(VA.getLocReg(), VReg);
|
||||
InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
|
||||
|
||||
@ -1206,20 +1205,20 @@ SDValue HexagonTargetLowering::LowerFormalArguments(
|
||||
} else if ((RegVT == MVT::v16i64 || RegVT == MVT::v32i32 ||
|
||||
RegVT == MVT::v64i16 || RegVT == MVT::v128i8)) {
|
||||
unsigned VReg =
|
||||
RegInfo.createVirtualRegister(&Hexagon::VecDblRegsRegClass);
|
||||
RegInfo.createVirtualRegister(&Hexagon::HvxWRRegClass);
|
||||
RegInfo.addLiveIn(VA.getLocReg(), VReg);
|
||||
InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
|
||||
} else if (UseHVX && UseHVXDbl &&
|
||||
((RegVT == MVT::v32i64 || RegVT == MVT::v64i32 ||
|
||||
RegVT == MVT::v128i16 || RegVT == MVT::v256i8))) {
|
||||
unsigned VReg =
|
||||
RegInfo.createVirtualRegister(&Hexagon::VecDblRegs128BRegClass);
|
||||
RegInfo.createVirtualRegister(&Hexagon::HvxWRRegClass);
|
||||
RegInfo.addLiveIn(VA.getLocReg(), VReg);
|
||||
InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
|
||||
} else if (RegVT == MVT::v512i1 || RegVT == MVT::v1024i1) {
|
||||
assert(0 && "need to support VecPred regs");
|
||||
unsigned VReg =
|
||||
RegInfo.createVirtualRegister(&Hexagon::VecPredRegsRegClass);
|
||||
RegInfo.createVirtualRegister(&Hexagon::HvxQRRegClass);
|
||||
RegInfo.addLiveIn(VA.getLocReg(), VReg);
|
||||
InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
|
||||
} else {
|
||||
@ -1759,25 +1758,25 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM,
|
||||
|
||||
if (Subtarget.hasV60TOps()) {
|
||||
if (Subtarget.useHVXSglOps()) {
|
||||
addRegisterClass(MVT::v64i8, &Hexagon::VectorRegsRegClass);
|
||||
addRegisterClass(MVT::v32i16, &Hexagon::VectorRegsRegClass);
|
||||
addRegisterClass(MVT::v16i32, &Hexagon::VectorRegsRegClass);
|
||||
addRegisterClass(MVT::v8i64, &Hexagon::VectorRegsRegClass);
|
||||
addRegisterClass(MVT::v128i8, &Hexagon::VecDblRegsRegClass);
|
||||
addRegisterClass(MVT::v64i16, &Hexagon::VecDblRegsRegClass);
|
||||
addRegisterClass(MVT::v32i32, &Hexagon::VecDblRegsRegClass);
|
||||
addRegisterClass(MVT::v16i64, &Hexagon::VecDblRegsRegClass);
|
||||
addRegisterClass(MVT::v512i1, &Hexagon::VecPredRegsRegClass);
|
||||
addRegisterClass(MVT::v64i8, &Hexagon::HvxVRRegClass);
|
||||
addRegisterClass(MVT::v32i16, &Hexagon::HvxVRRegClass);
|
||||
addRegisterClass(MVT::v16i32, &Hexagon::HvxVRRegClass);
|
||||
addRegisterClass(MVT::v8i64, &Hexagon::HvxVRRegClass);
|
||||
addRegisterClass(MVT::v128i8, &Hexagon::HvxWRRegClass);
|
||||
addRegisterClass(MVT::v64i16, &Hexagon::HvxWRRegClass);
|
||||
addRegisterClass(MVT::v32i32, &Hexagon::HvxWRRegClass);
|
||||
addRegisterClass(MVT::v16i64, &Hexagon::HvxWRRegClass);
|
||||
addRegisterClass(MVT::v512i1, &Hexagon::HvxQRRegClass);
|
||||
} else if (Subtarget.useHVXDblOps()) {
|
||||
addRegisterClass(MVT::v128i8, &Hexagon::VectorRegs128BRegClass);
|
||||
addRegisterClass(MVT::v64i16, &Hexagon::VectorRegs128BRegClass);
|
||||
addRegisterClass(MVT::v32i32, &Hexagon::VectorRegs128BRegClass);
|
||||
addRegisterClass(MVT::v16i64, &Hexagon::VectorRegs128BRegClass);
|
||||
addRegisterClass(MVT::v256i8, &Hexagon::VecDblRegs128BRegClass);
|
||||
addRegisterClass(MVT::v128i16, &Hexagon::VecDblRegs128BRegClass);
|
||||
addRegisterClass(MVT::v64i32, &Hexagon::VecDblRegs128BRegClass);
|
||||
addRegisterClass(MVT::v32i64, &Hexagon::VecDblRegs128BRegClass);
|
||||
addRegisterClass(MVT::v1024i1, &Hexagon::VecPredRegs128BRegClass);
|
||||
addRegisterClass(MVT::v128i8, &Hexagon::HvxVRRegClass);
|
||||
addRegisterClass(MVT::v64i16, &Hexagon::HvxVRRegClass);
|
||||
addRegisterClass(MVT::v32i32, &Hexagon::HvxVRRegClass);
|
||||
addRegisterClass(MVT::v16i64, &Hexagon::HvxVRRegClass);
|
||||
addRegisterClass(MVT::v256i8, &Hexagon::HvxWRRegClass);
|
||||
addRegisterClass(MVT::v128i16, &Hexagon::HvxWRRegClass);
|
||||
addRegisterClass(MVT::v64i32, &Hexagon::HvxWRRegClass);
|
||||
addRegisterClass(MVT::v32i64, &Hexagon::HvxWRRegClass);
|
||||
addRegisterClass(MVT::v1024i1, &Hexagon::HvxQRRegClass);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2999,9 +2998,9 @@ HexagonTargetLowering::getRegForInlineAsmConstraint(
|
||||
default:
|
||||
llvm_unreachable("getRegForInlineAsmConstraint Unhandled vector size");
|
||||
case 512:
|
||||
return std::make_pair(0U, &Hexagon::VecPredRegsRegClass);
|
||||
return std::make_pair(0U, &Hexagon::HvxQRRegClass);
|
||||
case 1024:
|
||||
return std::make_pair(0U, &Hexagon::VecPredRegs128BRegClass);
|
||||
return std::make_pair(0U, &Hexagon::HvxQRRegClass);
|
||||
}
|
||||
break;
|
||||
case 'v': // V0-V31
|
||||
@ -3009,13 +3008,13 @@ HexagonTargetLowering::getRegForInlineAsmConstraint(
|
||||
default:
|
||||
llvm_unreachable("getRegForInlineAsmConstraint Unhandled vector size");
|
||||
case 512:
|
||||
return std::make_pair(0U, &Hexagon::VectorRegsRegClass);
|
||||
return std::make_pair(0U, &Hexagon::HvxVRRegClass);
|
||||
case 1024:
|
||||
if (Subtarget.hasV60TOps() && UseHVX && UseHVXDbl)
|
||||
return std::make_pair(0U, &Hexagon::VectorRegs128BRegClass);
|
||||
return std::make_pair(0U, &Hexagon::VecDblRegsRegClass);
|
||||
return std::make_pair(0U, &Hexagon::HvxVRRegClass);
|
||||
return std::make_pair(0U, &Hexagon::HvxWRRegClass);
|
||||
case 2048:
|
||||
return std::make_pair(0U, &Hexagon::VecDblRegs128BRegClass);
|
||||
return std::make_pair(0U, &Hexagon::HvxWRRegClass);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
@ -3207,7 +3206,7 @@ HexagonTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
|
||||
case MVT::v32i16:
|
||||
case MVT::v16i32:
|
||||
case MVT::v8i64:
|
||||
RRC = &Hexagon::VectorRegsRegClass;
|
||||
RRC = &Hexagon::HvxVRRegClass;
|
||||
break;
|
||||
case MVT::v128i8:
|
||||
case MVT::v64i16:
|
||||
@ -3215,15 +3214,15 @@ HexagonTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
|
||||
case MVT::v16i64:
|
||||
if (Subtarget.hasV60TOps() && Subtarget.useHVXOps() &&
|
||||
Subtarget.useHVXDblOps())
|
||||
RRC = &Hexagon::VectorRegs128BRegClass;
|
||||
RRC = &Hexagon::HvxVRRegClass;
|
||||
else
|
||||
RRC = &Hexagon::VecDblRegsRegClass;
|
||||
RRC = &Hexagon::HvxWRRegClass;
|
||||
break;
|
||||
case MVT::v256i8:
|
||||
case MVT::v128i16:
|
||||
case MVT::v64i32:
|
||||
case MVT::v32i64:
|
||||
RRC = &Hexagon::VecDblRegs128BRegClass;
|
||||
RRC = &Hexagon::HvxWRRegClass;
|
||||
break;
|
||||
}
|
||||
return std::make_pair(RRC, Cost);
|
||||
|
@ -31,8 +31,7 @@ def ByteAccess : MemAccessSize<1>;
|
||||
def HalfWordAccess : MemAccessSize<2>;
|
||||
def WordAccess : MemAccessSize<3>;
|
||||
def DoubleWordAccess : MemAccessSize<4>;
|
||||
def Vector64Access : MemAccessSize<5>;
|
||||
def Vector128Access : MemAccessSize<6>;
|
||||
def HVXVectorAccess : MemAccessSize<5>;
|
||||
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
@ -104,25 +104,12 @@ const int Hexagon_MEMB_OFFSET_MAX = 1023;
|
||||
const int Hexagon_MEMB_OFFSET_MIN = -1024;
|
||||
const int Hexagon_ADDI_OFFSET_MAX = 32767;
|
||||
const int Hexagon_ADDI_OFFSET_MIN = -32768;
|
||||
const int Hexagon_MEMD_AUTOINC_MAX = 56;
|
||||
const int Hexagon_MEMD_AUTOINC_MIN = -64;
|
||||
const int Hexagon_MEMW_AUTOINC_MAX = 28;
|
||||
const int Hexagon_MEMW_AUTOINC_MIN = -32;
|
||||
const int Hexagon_MEMH_AUTOINC_MAX = 14;
|
||||
const int Hexagon_MEMH_AUTOINC_MIN = -16;
|
||||
const int Hexagon_MEMB_AUTOINC_MAX = 7;
|
||||
const int Hexagon_MEMB_AUTOINC_MIN = -8;
|
||||
const int Hexagon_MEMV_AUTOINC_MAX = 192; // #s3
|
||||
const int Hexagon_MEMV_AUTOINC_MIN = -256; // #s3
|
||||
const int Hexagon_MEMV_AUTOINC_MAX_128B = 384; // #s3
|
||||
const int Hexagon_MEMV_AUTOINC_MIN_128B = -512; // #s3
|
||||
|
||||
// Pin the vtable to this file.
|
||||
void HexagonInstrInfo::anchor() {}
|
||||
|
||||
HexagonInstrInfo::HexagonInstrInfo(HexagonSubtarget &ST)
|
||||
: HexagonGenInstrInfo(Hexagon::ADJCALLSTACKDOWN, Hexagon::ADJCALLSTACKUP),
|
||||
RI() {}
|
||||
: HexagonGenInstrInfo(Hexagon::ADJCALLSTACKDOWN, Hexagon::ADJCALLSTACKUP) {}
|
||||
|
||||
static bool isIntRegForSubInst(unsigned Reg) {
|
||||
return (Reg >= Hexagon::R0 && Reg <= Hexagon::R7) ||
|
||||
@ -251,18 +238,12 @@ unsigned HexagonInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
|
||||
case Hexagon::L2_loadrd_io:
|
||||
case Hexagon::V6_vL32b_ai:
|
||||
case Hexagon::V6_vL32b_nt_ai:
|
||||
case Hexagon::V6_vL32b_ai_128B:
|
||||
case Hexagon::V6_vL32b_nt_ai_128B:
|
||||
case Hexagon::V6_vL32Ub_ai:
|
||||
case Hexagon::V6_vL32Ub_ai_128B:
|
||||
case Hexagon::LDriw_pred:
|
||||
case Hexagon::LDriw_mod:
|
||||
case Hexagon::PS_vloadrq_ai:
|
||||
case Hexagon::PS_vloadrw_ai:
|
||||
case Hexagon::PS_vloadrw_nt_ai:
|
||||
case Hexagon::PS_vloadrq_ai_128B:
|
||||
case Hexagon::PS_vloadrw_ai_128B:
|
||||
case Hexagon::PS_vloadrw_nt_ai_128B: {
|
||||
case Hexagon::PS_vloadrw_nt_ai: {
|
||||
const MachineOperand OpFI = MI.getOperand(1);
|
||||
if (!OpFI.isFI())
|
||||
return 0;
|
||||
@ -306,15 +287,11 @@ unsigned HexagonInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
|
||||
case Hexagon::S2_storeri_io:
|
||||
case Hexagon::S2_storerd_io:
|
||||
case Hexagon::V6_vS32b_ai:
|
||||
case Hexagon::V6_vS32b_ai_128B:
|
||||
case Hexagon::V6_vS32Ub_ai:
|
||||
case Hexagon::V6_vS32Ub_ai_128B:
|
||||
case Hexagon::STriw_pred:
|
||||
case Hexagon::STriw_mod:
|
||||
case Hexagon::PS_vstorerq_ai:
|
||||
case Hexagon::PS_vstorerw_ai:
|
||||
case Hexagon::PS_vstorerq_ai_128B:
|
||||
case Hexagon::PS_vstorerw_ai_128B: {
|
||||
case Hexagon::PS_vstorerw_ai: {
|
||||
const MachineOperand &OpFI = MI.getOperand(0);
|
||||
if (!OpFI.isFI())
|
||||
return 0;
|
||||
@ -715,10 +692,11 @@ unsigned HexagonInstrInfo::reduceLoopCount(MachineBasicBlock &MBB,
|
||||
unsigned NewLoopCount = createVR(MF, MVT::i32);
|
||||
MachineInstr *NewAdd = BuildMI(&MBB, DL, get(Hexagon::A2_addi), NewLoopCount).
|
||||
addReg(LoopCount).addImm(-1);
|
||||
const auto &HRI = *MF->getSubtarget<HexagonSubtarget>().getRegisterInfo();
|
||||
// Update the previously generated instructions with the new loop counter.
|
||||
for (SmallVectorImpl<MachineInstr *>::iterator I = PrevInsts.begin(),
|
||||
E = PrevInsts.end(); I != E; ++I)
|
||||
(*I)->substituteRegister(LoopCount, NewLoopCount, 0, getRegisterInfo());
|
||||
(*I)->substituteRegister(LoopCount, NewLoopCount, 0, HRI);
|
||||
PrevInsts.clear();
|
||||
PrevInsts.push_back(NewCmp);
|
||||
PrevInsts.push_back(NewAdd);
|
||||
@ -757,7 +735,8 @@ void HexagonInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator I,
|
||||
const DebugLoc &DL, unsigned DestReg,
|
||||
unsigned SrcReg, bool KillSrc) const {
|
||||
auto &HRI = getRegisterInfo();
|
||||
MachineFunction &MF = *MBB.getParent();
|
||||
auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
|
||||
unsigned KillFlag = getKillRegState(KillSrc);
|
||||
|
||||
if (Hexagon::IntRegsRegClass.contains(SrcReg, DestReg)) {
|
||||
@ -812,12 +791,12 @@ void HexagonInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
|
||||
.addReg(SrcReg, KillFlag);
|
||||
return;
|
||||
}
|
||||
if (Hexagon::VectorRegsRegClass.contains(SrcReg, DestReg)) {
|
||||
if (Hexagon::HvxVRRegClass.contains(SrcReg, DestReg)) {
|
||||
BuildMI(MBB, I, DL, get(Hexagon::V6_vassign), DestReg).
|
||||
addReg(SrcReg, KillFlag);
|
||||
return;
|
||||
}
|
||||
if (Hexagon::VecDblRegsRegClass.contains(SrcReg, DestReg)) {
|
||||
if (Hexagon::HvxWRRegClass.contains(SrcReg, DestReg)) {
|
||||
unsigned LoSrc = HRI.getSubReg(SrcReg, Hexagon::vsub_lo);
|
||||
unsigned HiSrc = HRI.getSubReg(SrcReg, Hexagon::vsub_hi);
|
||||
BuildMI(MBB, I, DL, get(Hexagon::V6_vcombine), DestReg)
|
||||
@ -825,33 +804,22 @@ void HexagonInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
|
||||
.addReg(LoSrc, KillFlag);
|
||||
return;
|
||||
}
|
||||
if (Hexagon::VecPredRegsRegClass.contains(SrcReg, DestReg)) {
|
||||
if (Hexagon::HvxQRRegClass.contains(SrcReg, DestReg)) {
|
||||
BuildMI(MBB, I, DL, get(Hexagon::V6_pred_and), DestReg)
|
||||
.addReg(SrcReg)
|
||||
.addReg(SrcReg, KillFlag);
|
||||
return;
|
||||
}
|
||||
if (Hexagon::VecPredRegsRegClass.contains(SrcReg) &&
|
||||
Hexagon::VectorRegsRegClass.contains(DestReg)) {
|
||||
if (Hexagon::HvxQRRegClass.contains(SrcReg) &&
|
||||
Hexagon::HvxVRRegClass.contains(DestReg)) {
|
||||
llvm_unreachable("Unimplemented pred to vec");
|
||||
return;
|
||||
}
|
||||
if (Hexagon::VecPredRegsRegClass.contains(DestReg) &&
|
||||
Hexagon::VectorRegsRegClass.contains(SrcReg)) {
|
||||
if (Hexagon::HvxQRRegClass.contains(DestReg) &&
|
||||
Hexagon::HvxVRRegClass.contains(SrcReg)) {
|
||||
llvm_unreachable("Unimplemented vec to pred");
|
||||
return;
|
||||
}
|
||||
if (Hexagon::VecPredRegs128BRegClass.contains(SrcReg, DestReg)) {
|
||||
unsigned HiDst = HRI.getSubReg(DestReg, Hexagon::vsub_hi);
|
||||
unsigned LoDst = HRI.getSubReg(DestReg, Hexagon::vsub_lo);
|
||||
unsigned HiSrc = HRI.getSubReg(SrcReg, Hexagon::vsub_hi);
|
||||
unsigned LoSrc = HRI.getSubReg(SrcReg, Hexagon::vsub_lo);
|
||||
BuildMI(MBB, I, DL, get(Hexagon::V6_pred_and), HiDst)
|
||||
.addReg(HiSrc, KillFlag);
|
||||
BuildMI(MBB, I, DL, get(Hexagon::V6_pred_and), LoDst)
|
||||
.addReg(LoSrc, KillFlag);
|
||||
return;
|
||||
}
|
||||
|
||||
#ifndef NDEBUG
|
||||
// Show the invalid registers to ease debugging.
|
||||
@ -868,7 +836,8 @@ void HexagonInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
|
||||
DebugLoc DL = MBB.findDebugLoc(I);
|
||||
MachineFunction &MF = *MBB.getParent();
|
||||
MachineFrameInfo &MFI = MF.getFrameInfo();
|
||||
unsigned Align = MFI.getObjectAlignment(FI);
|
||||
unsigned SlotAlign = MFI.getObjectAlignment(FI);
|
||||
unsigned RegAlign = TRI->getSpillAlignment(*RC);
|
||||
unsigned KillFlag = getKillRegState(isKill);
|
||||
bool HasAlloca = MFI.hasVarSizedObjects();
|
||||
const auto &HST = MF.getSubtarget<HexagonSubtarget>();
|
||||
@ -876,7 +845,7 @@ void HexagonInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
|
||||
|
||||
MachineMemOperand *MMO = MF.getMachineMemOperand(
|
||||
MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore,
|
||||
MFI.getObjectSize(FI), Align);
|
||||
MFI.getObjectSize(FI), SlotAlign);
|
||||
|
||||
if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) {
|
||||
BuildMI(MBB, I, DL, get(Hexagon::S2_storeri_io))
|
||||
@ -894,50 +863,34 @@ void HexagonInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
|
||||
BuildMI(MBB, I, DL, get(Hexagon::STriw_mod))
|
||||
.addFrameIndex(FI).addImm(0)
|
||||
.addReg(SrcReg, KillFlag).addMemOperand(MMO);
|
||||
} else if (Hexagon::VecPredRegs128BRegClass.hasSubClassEq(RC)) {
|
||||
BuildMI(MBB, I, DL, get(Hexagon::PS_vstorerq_ai_128B))
|
||||
.addFrameIndex(FI).addImm(0)
|
||||
.addReg(SrcReg, KillFlag).addMemOperand(MMO);
|
||||
} else if (Hexagon::VecPredRegsRegClass.hasSubClassEq(RC)) {
|
||||
} else if (Hexagon::HvxQRRegClass.hasSubClassEq(RC)) {
|
||||
BuildMI(MBB, I, DL, get(Hexagon::PS_vstorerq_ai))
|
||||
.addFrameIndex(FI).addImm(0)
|
||||
.addReg(SrcReg, KillFlag).addMemOperand(MMO);
|
||||
} else if (Hexagon::VectorRegs128BRegClass.hasSubClassEq(RC)) {
|
||||
} else if (Hexagon::HvxVRRegClass.hasSubClassEq(RC)) {
|
||||
// If there are variable-sized objects, spills will not be aligned.
|
||||
if (HasAlloca)
|
||||
Align = HFI.getStackAlignment();
|
||||
unsigned Opc = Align < 128 ? Hexagon::V6_vS32Ub_ai_128B
|
||||
: Hexagon::V6_vS32b_ai_128B;
|
||||
BuildMI(MBB, I, DL, get(Opc))
|
||||
.addFrameIndex(FI).addImm(0)
|
||||
.addReg(SrcReg, KillFlag).addMemOperand(MMO);
|
||||
} else if (Hexagon::VectorRegsRegClass.hasSubClassEq(RC)) {
|
||||
// If there are variable-sized objects, spills will not be aligned.
|
||||
if (HasAlloca)
|
||||
Align = HFI.getStackAlignment();
|
||||
unsigned Opc = Align < 64 ? Hexagon::V6_vS32Ub_ai
|
||||
SlotAlign = HFI.getStackAlignment();
|
||||
unsigned Opc = SlotAlign < RegAlign ? Hexagon::V6_vS32Ub_ai
|
||||
: Hexagon::V6_vS32b_ai;
|
||||
MachineMemOperand *MMOA = MF.getMachineMemOperand(
|
||||
MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore,
|
||||
MFI.getObjectSize(FI), SlotAlign);
|
||||
BuildMI(MBB, I, DL, get(Opc))
|
||||
.addFrameIndex(FI).addImm(0)
|
||||
.addReg(SrcReg, KillFlag).addMemOperand(MMO);
|
||||
} else if (Hexagon::VecDblRegsRegClass.hasSubClassEq(RC)) {
|
||||
.addReg(SrcReg, KillFlag).addMemOperand(MMOA);
|
||||
} else if (Hexagon::HvxWRRegClass.hasSubClassEq(RC)) {
|
||||
// If there are variable-sized objects, spills will not be aligned.
|
||||
if (HasAlloca)
|
||||
Align = HFI.getStackAlignment();
|
||||
unsigned Opc = Align < 64 ? Hexagon::PS_vstorerwu_ai
|
||||
SlotAlign = HFI.getStackAlignment();
|
||||
unsigned Opc = SlotAlign < RegAlign ? Hexagon::PS_vstorerwu_ai
|
||||
: Hexagon::PS_vstorerw_ai;
|
||||
MachineMemOperand *MMOA = MF.getMachineMemOperand(
|
||||
MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore,
|
||||
MFI.getObjectSize(FI), SlotAlign);
|
||||
BuildMI(MBB, I, DL, get(Opc))
|
||||
.addFrameIndex(FI).addImm(0)
|
||||
.addReg(SrcReg, KillFlag).addMemOperand(MMO);
|
||||
} else if (Hexagon::VecDblRegs128BRegClass.hasSubClassEq(RC)) {
|
||||
// If there are variable-sized objects, spills will not be aligned.
|
||||
if (HasAlloca)
|
||||
Align = HFI.getStackAlignment();
|
||||
unsigned Opc = Align < 128 ? Hexagon::PS_vstorerwu_ai_128B
|
||||
: Hexagon::PS_vstorerw_ai_128B;
|
||||
BuildMI(MBB, I, DL, get(Opc))
|
||||
.addFrameIndex(FI).addImm(0)
|
||||
.addReg(SrcReg, KillFlag).addMemOperand(MMO);
|
||||
.addReg(SrcReg, KillFlag).addMemOperand(MMOA);
|
||||
} else {
|
||||
llvm_unreachable("Unimplemented");
|
||||
}
|
||||
@ -950,14 +903,15 @@ void HexagonInstrInfo::loadRegFromStackSlot(
|
||||
DebugLoc DL = MBB.findDebugLoc(I);
|
||||
MachineFunction &MF = *MBB.getParent();
|
||||
MachineFrameInfo &MFI = MF.getFrameInfo();
|
||||
unsigned Align = MFI.getObjectAlignment(FI);
|
||||
unsigned SlotAlign = MFI.getObjectAlignment(FI);
|
||||
unsigned RegAlign = TRI->getSpillAlignment(*RC);
|
||||
bool HasAlloca = MFI.hasVarSizedObjects();
|
||||
const auto &HST = MF.getSubtarget<HexagonSubtarget>();
|
||||
const HexagonFrameLowering &HFI = *HST.getFrameLowering();
|
||||
|
||||
MachineMemOperand *MMO = MF.getMachineMemOperand(
|
||||
MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad,
|
||||
MFI.getObjectSize(FI), Align);
|
||||
MFI.getObjectSize(FI), SlotAlign);
|
||||
|
||||
if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) {
|
||||
BuildMI(MBB, I, DL, get(Hexagon::L2_loadri_io), DestReg)
|
||||
@ -971,44 +925,31 @@ void HexagonInstrInfo::loadRegFromStackSlot(
|
||||
} else if (Hexagon::ModRegsRegClass.hasSubClassEq(RC)) {
|
||||
BuildMI(MBB, I, DL, get(Hexagon::LDriw_mod), DestReg)
|
||||
.addFrameIndex(FI).addImm(0).addMemOperand(MMO);
|
||||
} else if (Hexagon::VecPredRegs128BRegClass.hasSubClassEq(RC)) {
|
||||
BuildMI(MBB, I, DL, get(Hexagon::PS_vloadrq_ai_128B), DestReg)
|
||||
.addFrameIndex(FI).addImm(0).addMemOperand(MMO);
|
||||
} else if (Hexagon::VecPredRegsRegClass.hasSubClassEq(RC)) {
|
||||
} else if (Hexagon::HvxQRRegClass.hasSubClassEq(RC)) {
|
||||
BuildMI(MBB, I, DL, get(Hexagon::PS_vloadrq_ai), DestReg)
|
||||
.addFrameIndex(FI).addImm(0).addMemOperand(MMO);
|
||||
} else if (Hexagon::VecDblRegs128BRegClass.hasSubClassEq(RC)) {
|
||||
} else if (Hexagon::HvxVRRegClass.hasSubClassEq(RC)) {
|
||||
// If there are variable-sized objects, spills will not be aligned.
|
||||
if (HasAlloca)
|
||||
Align = HFI.getStackAlignment();
|
||||
unsigned Opc = Align < 128 ? Hexagon::PS_vloadrwu_ai_128B
|
||||
: Hexagon::PS_vloadrw_ai_128B;
|
||||
BuildMI(MBB, I, DL, get(Opc), DestReg)
|
||||
.addFrameIndex(FI).addImm(0).addMemOperand(MMO);
|
||||
} else if (Hexagon::VectorRegs128BRegClass.hasSubClassEq(RC)) {
|
||||
// If there are variable-sized objects, spills will not be aligned.
|
||||
if (HasAlloca)
|
||||
Align = HFI.getStackAlignment();
|
||||
unsigned Opc = Align < 128 ? Hexagon::V6_vL32Ub_ai_128B
|
||||
: Hexagon::V6_vL32b_ai_128B;
|
||||
BuildMI(MBB, I, DL, get(Opc), DestReg)
|
||||
.addFrameIndex(FI).addImm(0).addMemOperand(MMO);
|
||||
} else if (Hexagon::VectorRegsRegClass.hasSubClassEq(RC)) {
|
||||
// If there are variable-sized objects, spills will not be aligned.
|
||||
if (HasAlloca)
|
||||
Align = HFI.getStackAlignment();
|
||||
unsigned Opc = Align < 64 ? Hexagon::V6_vL32Ub_ai
|
||||
SlotAlign = HFI.getStackAlignment();
|
||||
unsigned Opc = SlotAlign < RegAlign ? Hexagon::V6_vL32Ub_ai
|
||||
: Hexagon::V6_vL32b_ai;
|
||||
MachineMemOperand *MMOA = MF.getMachineMemOperand(
|
||||
MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad,
|
||||
MFI.getObjectSize(FI), SlotAlign);
|
||||
BuildMI(MBB, I, DL, get(Opc), DestReg)
|
||||
.addFrameIndex(FI).addImm(0).addMemOperand(MMO);
|
||||
} else if (Hexagon::VecDblRegsRegClass.hasSubClassEq(RC)) {
|
||||
.addFrameIndex(FI).addImm(0).addMemOperand(MMOA);
|
||||
} else if (Hexagon::HvxWRRegClass.hasSubClassEq(RC)) {
|
||||
// If there are variable-sized objects, spills will not be aligned.
|
||||
if (HasAlloca)
|
||||
Align = HFI.getStackAlignment();
|
||||
unsigned Opc = Align < 64 ? Hexagon::PS_vloadrwu_ai
|
||||
SlotAlign = HFI.getStackAlignment();
|
||||
unsigned Opc = SlotAlign < RegAlign ? Hexagon::PS_vloadrwu_ai
|
||||
: Hexagon::PS_vloadrw_ai;
|
||||
MachineMemOperand *MMOA = MF.getMachineMemOperand(
|
||||
MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad,
|
||||
MFI.getObjectSize(FI), SlotAlign);
|
||||
BuildMI(MBB, I, DL, get(Opc), DestReg)
|
||||
.addFrameIndex(FI).addImm(0).addMemOperand(MMO);
|
||||
.addFrameIndex(FI).addImm(0).addMemOperand(MMOA);
|
||||
} else {
|
||||
llvm_unreachable("Can't store this register to stack slot");
|
||||
}
|
||||
@ -1029,12 +970,12 @@ static void getLiveRegsAt(LivePhysRegs &Regs, const MachineInstr &MI) {
|
||||
/// new instructions and erase MI. The function should return true if
|
||||
/// anything was changed.
|
||||
bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
|
||||
const HexagonRegisterInfo &HRI = getRegisterInfo();
|
||||
MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
|
||||
MachineBasicBlock &MBB = *MI.getParent();
|
||||
MachineFunction &MF = *MBB.getParent();
|
||||
MachineRegisterInfo &MRI = MF.getRegInfo();
|
||||
const auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
|
||||
DebugLoc DL = MI.getDebugLoc();
|
||||
unsigned Opc = MI.getOpcode();
|
||||
const unsigned VecOffset = 1;
|
||||
|
||||
switch (Opc) {
|
||||
case TargetOpcode::COPY: {
|
||||
@ -1054,7 +995,6 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
|
||||
.addImm(-MI.getOperand(1).getImm());
|
||||
MBB.erase(MI);
|
||||
return true;
|
||||
case Hexagon::V6_vassignp_128B:
|
||||
case Hexagon::V6_vassignp: {
|
||||
unsigned SrcReg = MI.getOperand(1).getReg();
|
||||
unsigned DstReg = MI.getOperand(0).getReg();
|
||||
@ -1065,7 +1005,6 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
|
||||
MBB.erase(MI);
|
||||
return true;
|
||||
}
|
||||
case Hexagon::V6_lo_128B:
|
||||
case Hexagon::V6_lo: {
|
||||
unsigned SrcReg = MI.getOperand(1).getReg();
|
||||
unsigned DstReg = MI.getOperand(0).getReg();
|
||||
@ -1075,7 +1014,6 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
|
||||
MRI.clearKillFlags(SrcSubLo);
|
||||
return true;
|
||||
}
|
||||
case Hexagon::V6_hi_128B:
|
||||
case Hexagon::V6_hi: {
|
||||
unsigned SrcReg = MI.getOperand(1).getReg();
|
||||
unsigned DstReg = MI.getOperand(0).getReg();
|
||||
@ -1086,25 +1024,14 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
|
||||
return true;
|
||||
}
|
||||
case Hexagon::PS_vstorerw_ai:
|
||||
case Hexagon::PS_vstorerwu_ai:
|
||||
case Hexagon::PS_vstorerw_ai_128B:
|
||||
case Hexagon::PS_vstorerwu_ai_128B: {
|
||||
bool Is128B = (Opc == Hexagon::PS_vstorerw_ai_128B ||
|
||||
Opc == Hexagon::PS_vstorerwu_ai_128B);
|
||||
bool Aligned = (Opc == Hexagon::PS_vstorerw_ai ||
|
||||
Opc == Hexagon::PS_vstorerw_ai_128B);
|
||||
case Hexagon::PS_vstorerwu_ai: {
|
||||
bool Aligned = Opc == Hexagon::PS_vstorerw_ai;
|
||||
unsigned SrcReg = MI.getOperand(2).getReg();
|
||||
unsigned SrcSubHi = HRI.getSubReg(SrcReg, Hexagon::vsub_hi);
|
||||
unsigned SrcSubLo = HRI.getSubReg(SrcReg, Hexagon::vsub_lo);
|
||||
unsigned NewOpc;
|
||||
if (Aligned)
|
||||
NewOpc = Is128B ? Hexagon::V6_vS32b_ai_128B
|
||||
: Hexagon::V6_vS32b_ai;
|
||||
else
|
||||
NewOpc = Is128B ? Hexagon::V6_vS32Ub_ai_128B
|
||||
: Hexagon::V6_vS32Ub_ai;
|
||||
unsigned NewOpc = Aligned ? Hexagon::V6_vS32b_ai : Hexagon::V6_vS32Ub_ai;
|
||||
unsigned Offset = HRI.getSpillSize(Hexagon::HvxVRRegClass);
|
||||
|
||||
unsigned Offset = Is128B ? VecOffset << 7 : VecOffset << 6;
|
||||
MachineInstr *MI1New =
|
||||
BuildMI(MBB, MI, DL, get(NewOpc))
|
||||
.add(MI.getOperand(0))
|
||||
@ -1122,23 +1049,12 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
|
||||
return true;
|
||||
}
|
||||
case Hexagon::PS_vloadrw_ai:
|
||||
case Hexagon::PS_vloadrwu_ai:
|
||||
case Hexagon::PS_vloadrw_ai_128B:
|
||||
case Hexagon::PS_vloadrwu_ai_128B: {
|
||||
bool Is128B = (Opc == Hexagon::PS_vloadrw_ai_128B ||
|
||||
Opc == Hexagon::PS_vloadrwu_ai_128B);
|
||||
bool Aligned = (Opc == Hexagon::PS_vloadrw_ai ||
|
||||
Opc == Hexagon::PS_vloadrw_ai_128B);
|
||||
unsigned NewOpc;
|
||||
if (Aligned)
|
||||
NewOpc = Is128B ? Hexagon::V6_vL32b_ai_128B
|
||||
: Hexagon::V6_vL32b_ai;
|
||||
else
|
||||
NewOpc = Is128B ? Hexagon::V6_vL32Ub_ai_128B
|
||||
: Hexagon::V6_vL32Ub_ai;
|
||||
|
||||
case Hexagon::PS_vloadrwu_ai: {
|
||||
bool Aligned = Opc == Hexagon::PS_vloadrw_ai;
|
||||
unsigned DstReg = MI.getOperand(0).getReg();
|
||||
unsigned Offset = Is128B ? VecOffset << 7 : VecOffset << 6;
|
||||
unsigned NewOpc = Aligned ? Hexagon::V6_vL32b_ai : Hexagon::V6_vL32Ub_ai;
|
||||
unsigned Offset = HRI.getSpillSize(Hexagon::HvxVRRegClass);
|
||||
|
||||
MachineInstr *MI1New = BuildMI(MBB, MI, DL, get(NewOpc),
|
||||
HRI.getSubReg(DstReg, Hexagon::vsub_lo))
|
||||
.add(MI.getOperand(1))
|
||||
@ -1248,8 +1164,7 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
|
||||
MBB.erase(MI);
|
||||
return true;
|
||||
}
|
||||
case Hexagon::PS_vselect:
|
||||
case Hexagon::PS_vselect_128B: {
|
||||
case Hexagon::PS_vselect: {
|
||||
const MachineOperand &Op0 = MI.getOperand(0);
|
||||
const MachineOperand &Op1 = MI.getOperand(1);
|
||||
const MachineOperand &Op2 = MI.getOperand(2);
|
||||
@ -1283,8 +1198,7 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
|
||||
MBB.erase(MI);
|
||||
return true;
|
||||
}
|
||||
case Hexagon::PS_wselect:
|
||||
case Hexagon::PS_wselect_128B: {
|
||||
case Hexagon::PS_wselect: {
|
||||
MachineOperand &Op0 = MI.getOperand(0);
|
||||
MachineOperand &Op1 = MI.getOperand(1);
|
||||
MachineOperand &Op2 = MI.getOperand(2);
|
||||
@ -1452,9 +1366,11 @@ bool HexagonInstrInfo::SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
|
||||
return false;
|
||||
}
|
||||
|
||||
bool HexagonInstrInfo::DefinesPredicate(
|
||||
MachineInstr &MI, std::vector<MachineOperand> &Pred) const {
|
||||
auto &HRI = getRegisterInfo();
|
||||
bool HexagonInstrInfo::DefinesPredicate(MachineInstr &MI,
|
||||
std::vector<MachineOperand> &Pred) const {
|
||||
MachineFunction &MF = *MI.getParent()->getParent();
|
||||
const auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
|
||||
|
||||
for (unsigned oper = 0; oper < MI.getNumOperands(); ++oper) {
|
||||
MachineOperand MO = MI.getOperand(oper);
|
||||
if (MO.isReg()) {
|
||||
@ -1895,8 +1811,8 @@ bool HexagonInstrInfo::isDependent(const MachineInstr &ProdMI,
|
||||
const MachineInstr &ConsMI) const {
|
||||
if (!ProdMI.getDesc().getNumDefs())
|
||||
return false;
|
||||
|
||||
auto &HRI = getRegisterInfo();
|
||||
const MachineFunction &MF = *ProdMI.getParent()->getParent();
|
||||
const auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
|
||||
|
||||
SmallVector<unsigned, 4> DefsA;
|
||||
SmallVector<unsigned, 4> DefsB;
|
||||
@ -1931,8 +1847,6 @@ bool HexagonInstrInfo::isDotCurInst(const MachineInstr &MI) const {
|
||||
switch (MI.getOpcode()) {
|
||||
case Hexagon::V6_vL32b_cur_pi:
|
||||
case Hexagon::V6_vL32b_cur_ai:
|
||||
case Hexagon::V6_vL32b_cur_pi_128B:
|
||||
case Hexagon::V6_vL32b_cur_ai_128B:
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@ -2444,44 +2358,38 @@ bool HexagonInstrInfo::isHVXVec(const MachineInstr &MI) const {
|
||||
|
||||
// Check if the Offset is a valid auto-inc imm by Load/Store Type.
|
||||
//
|
||||
bool HexagonInstrInfo::isValidAutoIncImm(const EVT VT, const int Offset) const {
|
||||
if (VT == MVT::v16i32 || VT == MVT::v8i64 ||
|
||||
VT == MVT::v32i16 || VT == MVT::v64i8) {
|
||||
return (Offset >= Hexagon_MEMV_AUTOINC_MIN &&
|
||||
Offset <= Hexagon_MEMV_AUTOINC_MAX &&
|
||||
(Offset & 0x3f) == 0);
|
||||
bool HexagonInstrInfo::isValidAutoIncImm(const EVT VT, int Offset) const {
|
||||
int Size = VT.getSizeInBits() / 8;
|
||||
if (Offset % Size != 0)
|
||||
return false;
|
||||
int Count = Offset / Size;
|
||||
|
||||
switch (VT.getSimpleVT().SimpleTy) {
|
||||
// For scalars the auto-inc is s4
|
||||
case MVT::i8:
|
||||
case MVT::i16:
|
||||
case MVT::i32:
|
||||
case MVT::i64:
|
||||
return isInt<4>(Count);
|
||||
// For HVX vectors the auto-inc is s3
|
||||
case MVT::v64i8:
|
||||
case MVT::v32i16:
|
||||
case MVT::v16i32:
|
||||
case MVT::v8i64:
|
||||
case MVT::v128i8:
|
||||
case MVT::v64i16:
|
||||
case MVT::v32i32:
|
||||
case MVT::v16i64:
|
||||
return isInt<3>(Count);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
// 128B
|
||||
if (VT == MVT::v32i32 || VT == MVT::v16i64 ||
|
||||
VT == MVT::v64i16 || VT == MVT::v128i8) {
|
||||
return (Offset >= Hexagon_MEMV_AUTOINC_MIN_128B &&
|
||||
Offset <= Hexagon_MEMV_AUTOINC_MAX_128B &&
|
||||
(Offset & 0x7f) == 0);
|
||||
}
|
||||
if (VT == MVT::i64) {
|
||||
return (Offset >= Hexagon_MEMD_AUTOINC_MIN &&
|
||||
Offset <= Hexagon_MEMD_AUTOINC_MAX &&
|
||||
(Offset & 0x7) == 0);
|
||||
}
|
||||
if (VT == MVT::i32) {
|
||||
return (Offset >= Hexagon_MEMW_AUTOINC_MIN &&
|
||||
Offset <= Hexagon_MEMW_AUTOINC_MAX &&
|
||||
(Offset & 0x3) == 0);
|
||||
}
|
||||
if (VT == MVT::i16) {
|
||||
return (Offset >= Hexagon_MEMH_AUTOINC_MIN &&
|
||||
Offset <= Hexagon_MEMH_AUTOINC_MAX &&
|
||||
(Offset & 0x1) == 0);
|
||||
}
|
||||
if (VT == MVT::i8) {
|
||||
return (Offset >= Hexagon_MEMB_AUTOINC_MIN &&
|
||||
Offset <= Hexagon_MEMB_AUTOINC_MAX);
|
||||
}
|
||||
llvm_unreachable("Not an auto-inc opc!");
|
||||
|
||||
llvm_unreachable("Not an valid type!");
|
||||
}
|
||||
|
||||
bool HexagonInstrInfo::isValidOffset(unsigned Opcode, int Offset,
|
||||
bool Extend) const {
|
||||
const TargetRegisterInfo *TRI, bool Extend) const {
|
||||
// This function is to check whether the "Offset" is in the correct range of
|
||||
// the given "Opcode". If "Offset" is not in the correct range, "A2_addi" is
|
||||
// inserted to calculate the final address. Due to this reason, the function
|
||||
@ -2490,7 +2398,6 @@ bool HexagonInstrInfo::isValidOffset(unsigned Opcode, int Offset,
|
||||
// there are cases where a misaligned pointer recast can cause this
|
||||
// problem, and we need to allow for it. The front end warns of such
|
||||
// misaligns with respect to load size.
|
||||
|
||||
switch (Opcode) {
|
||||
case Hexagon::PS_vstorerq_ai:
|
||||
case Hexagon::PS_vstorerw_ai:
|
||||
@ -2503,22 +2410,13 @@ bool HexagonInstrInfo::isValidOffset(unsigned Opcode, int Offset,
|
||||
case Hexagon::V6_vL32b_nt_ai:
|
||||
case Hexagon::V6_vS32b_nt_ai:
|
||||
case Hexagon::V6_vL32Ub_ai:
|
||||
case Hexagon::V6_vS32Ub_ai:
|
||||
return isShiftedInt<4,6>(Offset);
|
||||
|
||||
case Hexagon::PS_vstorerq_ai_128B:
|
||||
case Hexagon::PS_vstorerw_ai_128B:
|
||||
case Hexagon::PS_vstorerw_nt_ai_128B:
|
||||
case Hexagon::PS_vloadrq_ai_128B:
|
||||
case Hexagon::PS_vloadrw_ai_128B:
|
||||
case Hexagon::PS_vloadrw_nt_ai_128B:
|
||||
case Hexagon::V6_vL32b_ai_128B:
|
||||
case Hexagon::V6_vS32b_ai_128B:
|
||||
case Hexagon::V6_vL32b_nt_ai_128B:
|
||||
case Hexagon::V6_vS32b_nt_ai_128B:
|
||||
case Hexagon::V6_vL32Ub_ai_128B:
|
||||
case Hexagon::V6_vS32Ub_ai_128B:
|
||||
return isShiftedInt<4,7>(Offset);
|
||||
case Hexagon::V6_vS32Ub_ai: {
|
||||
unsigned VectorSize = TRI->getSpillSize(Hexagon::HvxVRRegClass);
|
||||
assert(isPowerOf2_32(VectorSize));
|
||||
if (Offset & (VectorSize-1))
|
||||
return false;
|
||||
return isInt<4>(Offset >> Log2_32(VectorSize));
|
||||
}
|
||||
|
||||
case Hexagon::J2_loop0i:
|
||||
case Hexagon::J2_loop1i:
|
||||
@ -3230,15 +3128,6 @@ int HexagonInstrInfo::getDotCurOp(const MachineInstr &MI) const {
|
||||
return Hexagon::V6_vL32b_nt_cur_pi;
|
||||
case Hexagon::V6_vL32b_nt_ai:
|
||||
return Hexagon::V6_vL32b_nt_cur_ai;
|
||||
//128B
|
||||
case Hexagon::V6_vL32b_pi_128B:
|
||||
return Hexagon::V6_vL32b_cur_pi_128B;
|
||||
case Hexagon::V6_vL32b_ai_128B:
|
||||
return Hexagon::V6_vL32b_cur_ai_128B;
|
||||
case Hexagon::V6_vL32b_nt_pi_128B:
|
||||
return Hexagon::V6_vL32b_nt_cur_pi_128B;
|
||||
case Hexagon::V6_vL32b_nt_ai_128B:
|
||||
return Hexagon::V6_vL32b_nt_cur_ai_128B;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -3255,15 +3144,6 @@ int HexagonInstrInfo::getNonDotCurOp(const MachineInstr &MI) const {
|
||||
return Hexagon::V6_vL32b_nt_pi;
|
||||
case Hexagon::V6_vL32b_nt_cur_ai:
|
||||
return Hexagon::V6_vL32b_nt_ai;
|
||||
//128B
|
||||
case Hexagon::V6_vL32b_cur_pi_128B:
|
||||
return Hexagon::V6_vL32b_pi_128B;
|
||||
case Hexagon::V6_vL32b_cur_ai_128B:
|
||||
return Hexagon::V6_vL32b_ai_128B;
|
||||
case Hexagon::V6_vL32b_nt_cur_pi_128B:
|
||||
return Hexagon::V6_vL32b_nt_pi_128B;
|
||||
case Hexagon::V6_vL32b_nt_cur_ai_128B:
|
||||
return Hexagon::V6_vL32b_nt_ai_128B;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -3383,13 +3263,6 @@ int HexagonInstrInfo::getDotNewOp(const MachineInstr &MI) const {
|
||||
|
||||
case Hexagon::V6_vS32b_pi:
|
||||
return Hexagon::V6_vS32b_new_pi;
|
||||
|
||||
// 128B
|
||||
case Hexagon::V6_vS32b_ai_128B:
|
||||
return Hexagon::V6_vS32b_new_ai_128B;
|
||||
|
||||
case Hexagon::V6_vS32b_pi_128B:
|
||||
return Hexagon::V6_vS32b_new_pi_128B;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -3556,7 +3429,8 @@ int HexagonInstrInfo::getDotOldOp(const MachineInstr &MI) const {
|
||||
HexagonII::SubInstructionGroup HexagonInstrInfo::getDuplexCandidateGroup(
|
||||
const MachineInstr &MI) const {
|
||||
unsigned DstReg, SrcReg, Src1Reg, Src2Reg;
|
||||
auto &HRI = getRegisterInfo();
|
||||
const MachineFunction &MF = *MI.getParent()->getParent();
|
||||
const auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
|
||||
|
||||
switch (MI.getOpcode()) {
|
||||
default:
|
||||
@ -3924,14 +3798,16 @@ int HexagonInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
|
||||
unsigned DefIdx,
|
||||
const MachineInstr &UseMI,
|
||||
unsigned UseIdx) const {
|
||||
auto &RI = getRegisterInfo();
|
||||
const MachineFunction &MF = *DefMI.getParent()->getParent();
|
||||
const auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
|
||||
|
||||
// Get DefIdx and UseIdx for super registers.
|
||||
MachineOperand DefMO = DefMI.getOperand(DefIdx);
|
||||
|
||||
if (RI.isPhysicalRegister(DefMO.getReg())) {
|
||||
if (HRI.isPhysicalRegister(DefMO.getReg())) {
|
||||
if (DefMO.isImplicit()) {
|
||||
for (MCSuperRegIterator SR(DefMO.getReg(), &RI); SR.isValid(); ++SR) {
|
||||
int Idx = DefMI.findRegisterDefOperandIdx(*SR, false, false, &RI);
|
||||
for (MCSuperRegIterator SR(DefMO.getReg(), &HRI); SR.isValid(); ++SR) {
|
||||
int Idx = DefMI.findRegisterDefOperandIdx(*SR, false, false, &HRI);
|
||||
if (Idx != -1) {
|
||||
DefIdx = Idx;
|
||||
break;
|
||||
@ -3941,8 +3817,8 @@ int HexagonInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
|
||||
|
||||
MachineOperand UseMO = UseMI.getOperand(UseIdx);
|
||||
if (UseMO.isImplicit()) {
|
||||
for (MCSuperRegIterator SR(UseMO.getReg(), &RI); SR.isValid(); ++SR) {
|
||||
int Idx = UseMI.findRegisterUseOperandIdx(*SR, false, &RI);
|
||||
for (MCSuperRegIterator SR(UseMO.getReg(), &HRI); SR.isValid(); ++SR) {
|
||||
int Idx = UseMI.findRegisterUseOperandIdx(*SR, false, &HRI);
|
||||
if (Idx != -1) {
|
||||
UseIdx = Idx;
|
||||
break;
|
||||
@ -3999,12 +3875,13 @@ unsigned HexagonInstrInfo::getMemAccessSize(const MachineInstr &MI) const {
|
||||
if (Size != 0)
|
||||
return Size;
|
||||
|
||||
const MachineFunction &MF = *MI.getParent()->getParent();
|
||||
const auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
|
||||
|
||||
// Handle vector access sizes.
|
||||
switch (S) {
|
||||
case HexagonII::Vector64Access:
|
||||
return 64;
|
||||
case HexagonII::Vector128Access:
|
||||
return 128;
|
||||
case HexagonII::HVXVectorAccess:
|
||||
return HRI.getSpillSize(Hexagon::HvxVRRegClass);
|
||||
default:
|
||||
llvm_unreachable("Unexpected instruction");
|
||||
}
|
||||
|
@ -14,7 +14,6 @@
|
||||
#ifndef LLVM_LIB_TARGET_HEXAGON_HEXAGONINSTRINFO_H
|
||||
#define LLVM_LIB_TARGET_HEXAGON_HEXAGONINSTRINFO_H
|
||||
|
||||
#include "HexagonRegisterInfo.h"
|
||||
#include "MCTargetDesc/HexagonBaseInfo.h"
|
||||
#include "llvm/ADT/ArrayRef.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
@ -32,10 +31,9 @@ namespace llvm {
|
||||
|
||||
struct EVT;
|
||||
class HexagonSubtarget;
|
||||
class HexagonRegisterInfo;
|
||||
|
||||
class HexagonInstrInfo : public HexagonGenInstrInfo {
|
||||
const HexagonRegisterInfo RI;
|
||||
|
||||
virtual void anchor();
|
||||
|
||||
public:
|
||||
@ -327,8 +325,6 @@ public:
|
||||
/// HexagonInstrInfo specifics.
|
||||
///
|
||||
|
||||
const HexagonRegisterInfo &getRegisterInfo() const { return RI; }
|
||||
|
||||
unsigned createVR(MachineFunction* MF, MVT VT) const;
|
||||
|
||||
bool isAbsoluteSet(const MachineInstr &MI) const;
|
||||
@ -387,7 +383,8 @@ public:
|
||||
const MachineInstr &MI2) const;
|
||||
bool isHVXVec(const MachineInstr &MI) const;
|
||||
bool isValidAutoIncImm(const EVT VT, const int Offset) const;
|
||||
bool isValidOffset(unsigned Opcode, int Offset, bool Extend = true) const;
|
||||
bool isValidOffset(unsigned Opcode, int Offset,
|
||||
const TargetRegisterInfo *TRI, bool Extend = true) const;
|
||||
bool isVecAcc(const MachineInstr &MI) const;
|
||||
bool isVecALU(const MachineInstr &MI) const;
|
||||
bool isVecUsableNextPacket(const MachineInstr &ProdMI,
|
||||
|
@ -1348,17 +1348,11 @@ def: T_stc_pat<S2_storerd_pci, int_hexagon_circ_std, s4_3ImmPred, I64>;
|
||||
def: T_stc_pat<S2_storerf_pci, int_hexagon_circ_sthhi, s4_1ImmPred, I32>;
|
||||
|
||||
multiclass MaskedStore <InstHexagon MI, Intrinsic IntID> {
|
||||
def : Pat<(IntID VecPredRegs:$src1, IntRegs:$src2, VectorRegs:$src3),
|
||||
(MI VecPredRegs:$src1, IntRegs:$src2, #0, VectorRegs:$src3)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
|
||||
def : Pat<(!cast<Intrinsic>(IntID#"_128B") VecPredRegs128B:$src1,
|
||||
IntRegs:$src2,
|
||||
VectorRegs128B:$src3),
|
||||
(!cast<InstHexagon>(MI#"_128B") VecPredRegs128B:$src1,
|
||||
IntRegs:$src2, #0,
|
||||
VectorRegs128B:$src3)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def : Pat<(IntID HvxQR:$src1, IntRegs:$src2, HvxVR:$src3),
|
||||
(MI HvxQR:$src1, IntRegs:$src2, #0, HvxVR:$src3)>;
|
||||
def : Pat<(!cast<Intrinsic>(IntID#"_128B") HvxQR:$src1, IntRegs:$src2,
|
||||
HvxVR:$src3),
|
||||
(MI HvxQR:$src1, IntRegs:$src2, #0, HvxVR:$src3)>;
|
||||
}
|
||||
|
||||
defm : MaskedStore <V6_vS32b_qpred_ai, int_hexagon_V6_vmaskedstoreq>;
|
||||
|
@ -13,445 +13,298 @@
|
||||
|
||||
|
||||
let AddedComplexity = 100 in {
|
||||
def : Pat < (v16i32 (int_hexagon_V6_lo (v32i32 VecDblRegs:$src1))),
|
||||
(v16i32 (EXTRACT_SUBREG (v32i32 VecDblRegs:$src1), vsub_lo)) >,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def : Pat < (v16i32 (int_hexagon_V6_lo (v32i32 HvxWR:$src1))),
|
||||
(v16i32 (EXTRACT_SUBREG (v32i32 HvxWR:$src1), vsub_lo)) >;
|
||||
|
||||
def : Pat < (v16i32 (int_hexagon_V6_hi (v32i32 VecDblRegs:$src1))),
|
||||
(v16i32 (EXTRACT_SUBREG (v32i32 VecDblRegs:$src1), vsub_hi)) >,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def : Pat < (v16i32 (int_hexagon_V6_hi (v32i32 HvxWR:$src1))),
|
||||
(v16i32 (EXTRACT_SUBREG (v32i32 HvxWR:$src1), vsub_hi)) >;
|
||||
|
||||
def : Pat < (v32i32 (int_hexagon_V6_lo_128B (v64i32 VecDblRegs128B:$src1))),
|
||||
(v32i32 (EXTRACT_SUBREG (v64i32 VecDblRegs128B:$src1), vsub_lo)) >,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def : Pat < (v32i32 (int_hexagon_V6_lo_128B (v64i32 HvxWR:$src1))),
|
||||
(v32i32 (EXTRACT_SUBREG (v64i32 HvxWR:$src1), vsub_lo)) >;
|
||||
|
||||
def : Pat < (v32i32 (int_hexagon_V6_hi_128B (v64i32 VecDblRegs128B:$src1))),
|
||||
(v32i32 (EXTRACT_SUBREG (v64i32 VecDblRegs128B:$src1), vsub_hi)) >,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def : Pat < (v32i32 (int_hexagon_V6_hi_128B (v64i32 HvxWR:$src1))),
|
||||
(v32i32 (EXTRACT_SUBREG (v64i32 HvxWR:$src1), vsub_hi)) >;
|
||||
}
|
||||
|
||||
def : Pat <(v512i1 (bitconvert (v16i32 VectorRegs:$src1))),
|
||||
(v512i1 (V6_vandvrt(v16i32 VectorRegs:$src1),
|
||||
(A2_tfrsi 0x01010101)))>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def : Pat <(v512i1 (bitconvert (v16i32 HvxVR:$src1))),
|
||||
(v512i1 (V6_vandvrt(v16i32 HvxVR:$src1), (A2_tfrsi 0x01010101)))>;
|
||||
|
||||
def : Pat <(v512i1 (bitconvert (v32i16 VectorRegs:$src1))),
|
||||
(v512i1 (V6_vandvrt(v32i16 VectorRegs:$src1),
|
||||
(A2_tfrsi 0x01010101)))>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def : Pat <(v512i1 (bitconvert (v32i16 HvxVR:$src1))),
|
||||
(v512i1 (V6_vandvrt(v32i16 HvxVR:$src1), (A2_tfrsi 0x01010101)))>;
|
||||
|
||||
def : Pat <(v512i1 (bitconvert (v64i8 VectorRegs:$src1))),
|
||||
(v512i1 (V6_vandvrt(v64i8 VectorRegs:$src1),
|
||||
(A2_tfrsi 0x01010101)))>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def : Pat <(v512i1 (bitconvert (v64i8 HvxVR:$src1))),
|
||||
(v512i1 (V6_vandvrt(v64i8 HvxVR:$src1), (A2_tfrsi 0x01010101)))>;
|
||||
|
||||
def : Pat <(v512i1 (bitconvert (v8i64 VectorRegs:$src1))),
|
||||
(v512i1 (V6_vandvrt(v8i64 VectorRegs:$src1),
|
||||
(A2_tfrsi 0x01010101)))>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def : Pat <(v512i1 (bitconvert (v8i64 HvxVR:$src1))),
|
||||
(v512i1 (V6_vandvrt(v8i64 HvxVR:$src1), (A2_tfrsi 0x01010101)))>;
|
||||
|
||||
def : Pat <(v16i32 (bitconvert (v512i1 VecPredRegs:$src1))),
|
||||
(v16i32 (V6_vandqrt(v512i1 VecPredRegs:$src1),
|
||||
(A2_tfrsi 0x01010101)))>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def : Pat <(v16i32 (bitconvert (v512i1 HvxQR:$src1))),
|
||||
(v16i32 (V6_vandqrt(v512i1 HvxQR:$src1), (A2_tfrsi 0x01010101)))>;
|
||||
|
||||
def : Pat <(v32i16 (bitconvert (v512i1 VecPredRegs:$src1))),
|
||||
(v32i16 (V6_vandqrt(v512i1 VecPredRegs:$src1),
|
||||
(A2_tfrsi 0x01010101)))>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def : Pat <(v32i16 (bitconvert (v512i1 HvxQR:$src1))),
|
||||
(v32i16 (V6_vandqrt(v512i1 HvxQR:$src1), (A2_tfrsi 0x01010101)))>;
|
||||
|
||||
def : Pat <(v64i8 (bitconvert (v512i1 VecPredRegs:$src1))),
|
||||
(v64i8 (V6_vandqrt(v512i1 VecPredRegs:$src1),
|
||||
(A2_tfrsi 0x01010101)))>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def : Pat <(v64i8 (bitconvert (v512i1 HvxQR:$src1))),
|
||||
(v64i8 (V6_vandqrt(v512i1 HvxQR:$src1), (A2_tfrsi 0x01010101)))>;
|
||||
|
||||
def : Pat <(v8i64 (bitconvert (v512i1 VecPredRegs:$src1))),
|
||||
(v8i64 (V6_vandqrt(v512i1 VecPredRegs:$src1),
|
||||
(A2_tfrsi 0x01010101)))>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def : Pat <(v8i64 (bitconvert (v512i1 HvxQR:$src1))),
|
||||
(v8i64 (V6_vandqrt(v512i1 HvxQR:$src1), (A2_tfrsi 0x01010101)))>;
|
||||
|
||||
def : Pat <(v1024i1 (bitconvert (v32i32 VectorRegs128B:$src1))),
|
||||
(v1024i1 (V6_vandvrt_128B(v32i32 VectorRegs128B:$src1),
|
||||
(A2_tfrsi 0x01010101)))>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def : Pat <(v1024i1 (bitconvert (v32i32 HvxVR:$src1))),
|
||||
(v1024i1 (V6_vandvrt (v32i32 HvxVR:$src1), (A2_tfrsi 0x01010101)))>;
|
||||
|
||||
def : Pat <(v1024i1 (bitconvert (v64i16 VectorRegs128B:$src1))),
|
||||
(v1024i1 (V6_vandvrt_128B(v64i16 VectorRegs128B:$src1),
|
||||
(A2_tfrsi 0x01010101)))>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def : Pat <(v1024i1 (bitconvert (v64i16 HvxVR:$src1))),
|
||||
(v1024i1 (V6_vandvrt (v64i16 HvxVR:$src1), (A2_tfrsi 0x01010101)))>;
|
||||
|
||||
def : Pat <(v1024i1 (bitconvert (v128i8 VectorRegs128B:$src1))),
|
||||
(v1024i1 (V6_vandvrt_128B(v128i8 VectorRegs128B:$src1),
|
||||
(A2_tfrsi 0x01010101)))>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def : Pat <(v1024i1 (bitconvert (v128i8 HvxVR:$src1))),
|
||||
(v1024i1 (V6_vandvrt (v128i8 HvxVR:$src1), (A2_tfrsi 0x01010101)))>;
|
||||
|
||||
def : Pat <(v1024i1 (bitconvert (v16i64 VectorRegs128B:$src1))),
|
||||
(v1024i1 (V6_vandvrt_128B(v16i64 VectorRegs128B:$src1),
|
||||
(A2_tfrsi 0x01010101)))>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def : Pat <(v1024i1 (bitconvert (v16i64 HvxVR:$src1))),
|
||||
(v1024i1 (V6_vandvrt (v16i64 HvxVR:$src1), (A2_tfrsi 0x01010101)))>;
|
||||
|
||||
def : Pat <(v32i32 (bitconvert (v1024i1 VecPredRegs128B:$src1))),
|
||||
(v32i32 (V6_vandqrt_128B(v1024i1 VecPredRegs128B:$src1),
|
||||
(A2_tfrsi 0x01010101)))>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def : Pat <(v32i32 (bitconvert (v1024i1 HvxQR:$src1))),
|
||||
(v32i32 (V6_vandqrt (v1024i1 HvxQR:$src1), (A2_tfrsi 0x01010101)))>;
|
||||
|
||||
def : Pat <(v64i16 (bitconvert (v1024i1 VecPredRegs128B:$src1))),
|
||||
(v64i16 (V6_vandqrt_128B(v1024i1 VecPredRegs128B:$src1),
|
||||
(A2_tfrsi 0x01010101)))>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def : Pat <(v64i16 (bitconvert (v1024i1 HvxQR:$src1))),
|
||||
(v64i16 (V6_vandqrt (v1024i1 HvxQR:$src1), (A2_tfrsi 0x01010101)))>;
|
||||
|
||||
def : Pat <(v128i8 (bitconvert (v1024i1 VecPredRegs128B:$src1))),
|
||||
(v128i8 (V6_vandqrt_128B(v1024i1 VecPredRegs128B:$src1),
|
||||
(A2_tfrsi 0x01010101)))>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def : Pat <(v128i8 (bitconvert (v1024i1 HvxQR:$src1))),
|
||||
(v128i8 (V6_vandqrt (v1024i1 HvxQR:$src1), (A2_tfrsi 0x01010101)))>;
|
||||
|
||||
def : Pat <(v16i64 (bitconvert (v1024i1 VecPredRegs128B:$src1))),
|
||||
(v16i64 (V6_vandqrt_128B(v1024i1 VecPredRegs128B:$src1),
|
||||
(A2_tfrsi 0x01010101)))>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def : Pat <(v16i64 (bitconvert (v1024i1 HvxQR:$src1))),
|
||||
(v16i64 (V6_vandqrt (v1024i1 HvxQR:$src1), (A2_tfrsi 0x01010101)))>;
|
||||
|
||||
let AddedComplexity = 140 in {
|
||||
def : Pat <(store (v512i1 VecPredRegs:$src1), (i32 IntRegs:$addr)),
|
||||
def : Pat <(store (v512i1 HvxQR:$src1), (i32 IntRegs:$addr)),
|
||||
(V6_vS32b_ai IntRegs:$addr, 0,
|
||||
(v16i32 (V6_vandqrt (v512i1 VecPredRegs:$src1),
|
||||
(A2_tfrsi 0x01010101))))>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
(v16i32 (V6_vandqrt (v512i1 HvxQR:$src1),
|
||||
(A2_tfrsi 0x01010101))))>;
|
||||
|
||||
def : Pat <(v512i1 (load (i32 IntRegs:$addr))),
|
||||
(v512i1 (V6_vandvrt
|
||||
(v16i32 (V6_vL32b_ai IntRegs:$addr, 0)), (A2_tfrsi 0x01010101)))>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
(v16i32 (V6_vL32b_ai IntRegs:$addr, 0)), (A2_tfrsi 0x01010101)))>;
|
||||
|
||||
def : Pat <(store (v1024i1 VecPredRegs128B:$src1), (i32 IntRegs:$addr)),
|
||||
(V6_vS32b_ai_128B IntRegs:$addr, 0,
|
||||
(v32i32 (V6_vandqrt_128B (v1024i1 VecPredRegs128B:$src1),
|
||||
(A2_tfrsi 0x01010101))))>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def : Pat <(store (v1024i1 HvxQR:$src1), (i32 IntRegs:$addr)),
|
||||
(V6_vS32b_ai IntRegs:$addr, 0,
|
||||
(v32i32 (V6_vandqrt (v1024i1 HvxQR:$src1),
|
||||
(A2_tfrsi 0x01010101))))>;
|
||||
|
||||
def : Pat <(v1024i1 (load (i32 IntRegs:$addr))),
|
||||
(v1024i1 (V6_vandvrt_128B
|
||||
(v32i32 (V6_vL32b_ai_128B IntRegs:$addr, 0)),
|
||||
(A2_tfrsi 0x01010101)))>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
(v1024i1 (V6_vandvrt
|
||||
(v32i32 (V6_vL32b_ai IntRegs:$addr, 0)), (A2_tfrsi 0x01010101)))>;
|
||||
}
|
||||
|
||||
multiclass T_R_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID IntRegs:$src1), (MI IntRegs:$src1)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(IntID IntRegs:$src1), (MI IntRegs:$src1)>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") IntRegs:$src1),
|
||||
(!cast<InstHexagon>(MI#"_128B") IntRegs:$src1)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
(MI IntRegs:$src1)>;
|
||||
}
|
||||
|
||||
multiclass T_V_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VectorRegs:$src1),
|
||||
(MI VectorRegs:$src1)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(IntID HvxVR:$src1),
|
||||
(MI HvxVR:$src1)>;
|
||||
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") VectorRegs128B:$src1),
|
||||
(!cast<InstHexagon>(MI#"_128B") VectorRegs128B:$src1)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxVR:$src1),
|
||||
(MI HvxVR:$src1)>;
|
||||
}
|
||||
|
||||
multiclass T_W_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VecDblRegs:$src1),
|
||||
(MI VecDblRegs:$src1)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(IntID HvxWR:$src1),
|
||||
(MI HvxWR:$src1)>;
|
||||
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") VecDblRegs128B:$src1),
|
||||
(!cast<InstHexagon>(MI#"_128B") VecDblRegs128B:$src1)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxWR:$src1),
|
||||
(MI HvxWR:$src1)>;
|
||||
}
|
||||
|
||||
multiclass T_Q_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VecPredRegs:$src1),
|
||||
(MI VecPredRegs:$src1)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(IntID HvxQR:$src1),
|
||||
(MI HvxQR:$src1)>;
|
||||
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") VecPredRegs128B:$src1),
|
||||
(!cast<InstHexagon>(MI#"_128B") VecPredRegs128B:$src1)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxQR:$src1),
|
||||
(MI HvxQR:$src1)>;
|
||||
}
|
||||
|
||||
multiclass T_WR_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VecDblRegs:$src1, IntRegs:$src2),
|
||||
(MI VecDblRegs:$src1, IntRegs:$src2)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(IntID HvxWR:$src1, IntRegs:$src2),
|
||||
(MI HvxWR:$src1, IntRegs:$src2)>;
|
||||
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B")VecDblRegs128B:$src1, IntRegs:$src2),
|
||||
(!cast<InstHexagon>(MI#"_128B")VecDblRegs128B:$src1, IntRegs:$src2)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B")HvxWR:$src1, IntRegs:$src2),
|
||||
(MI HvxWR:$src1, IntRegs:$src2)>;
|
||||
}
|
||||
|
||||
multiclass T_VR_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VectorRegs:$src1, IntRegs:$src2),
|
||||
(MI VectorRegs:$src1, IntRegs:$src2)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(IntID HvxVR:$src1, IntRegs:$src2),
|
||||
(MI HvxVR:$src1, IntRegs:$src2)>;
|
||||
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B")VectorRegs128B:$src1, IntRegs:$src2),
|
||||
(!cast<InstHexagon>(MI#"_128B")VectorRegs128B:$src1, IntRegs:$src2)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B")HvxVR:$src1, IntRegs:$src2),
|
||||
(MI HvxVR:$src1, IntRegs:$src2)>;
|
||||
}
|
||||
|
||||
multiclass T_WV_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VecDblRegs:$src1, VectorRegs:$src2),
|
||||
(MI VecDblRegs:$src1, VectorRegs:$src2)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(IntID HvxWR:$src1, HvxVR:$src2),
|
||||
(MI HvxWR:$src1, HvxVR:$src2)>;
|
||||
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") VecDblRegs128B:$src1,
|
||||
VectorRegs128B:$src2),
|
||||
(!cast<InstHexagon>(MI#"_128B") VecDblRegs128B:$src1,
|
||||
VectorRegs128B:$src2)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxWR:$src1, HvxVR:$src2),
|
||||
(MI HvxWR:$src1, HvxVR:$src2)>;
|
||||
}
|
||||
|
||||
multiclass T_WW_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VecDblRegs:$src1, VecDblRegs:$src2),
|
||||
(MI VecDblRegs:$src1, VecDblRegs:$src2)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(IntID HvxWR:$src1, HvxWR:$src2),
|
||||
(MI HvxWR:$src1, HvxWR:$src2)>;
|
||||
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") VecDblRegs128B:$src1,
|
||||
VecDblRegs128B:$src2),
|
||||
(!cast<InstHexagon>(MI#"_128B") VecDblRegs128B:$src1,
|
||||
VecDblRegs128B:$src2)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxWR:$src1, HvxWR:$src2),
|
||||
(MI HvxWR:$src1, HvxWR:$src2)>;
|
||||
}
|
||||
|
||||
multiclass T_VV_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VectorRegs:$src1, VectorRegs:$src2),
|
||||
(MI VectorRegs:$src1, VectorRegs:$src2)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(IntID HvxVR:$src1, HvxVR:$src2),
|
||||
(MI HvxVR:$src1, HvxVR:$src2)>;
|
||||
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") VectorRegs128B:$src1,
|
||||
VectorRegs128B:$src2),
|
||||
(!cast<InstHexagon>(MI#"_128B") VectorRegs128B:$src1,
|
||||
VectorRegs128B:$src2)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxVR:$src1, HvxVR:$src2),
|
||||
(MI HvxVR:$src1, HvxVR:$src2)>;
|
||||
}
|
||||
|
||||
multiclass T_QR_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VecPredRegs:$src1, IntRegs:$src2),
|
||||
(MI VecPredRegs:$src1, IntRegs:$src2)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(IntID HvxQR:$src1, IntRegs:$src2),
|
||||
(MI HvxQR:$src1, IntRegs:$src2)>;
|
||||
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") VecPredRegs128B:$src1,
|
||||
IntRegs:$src2),
|
||||
(!cast<InstHexagon>(MI#"_128B") VecPredRegs128B:$src1,
|
||||
IntRegs:$src2)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxQR:$src1, IntRegs:$src2),
|
||||
(MI HvxQR:$src1, IntRegs:$src2)>;
|
||||
}
|
||||
|
||||
multiclass T_QQ_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VecPredRegs:$src1, VecPredRegs:$src2),
|
||||
(MI VecPredRegs:$src1, VecPredRegs:$src2)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(IntID HvxQR:$src1, HvxQR:$src2),
|
||||
(MI HvxQR:$src1, HvxQR:$src2)>;
|
||||
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") VecPredRegs128B:$src1,
|
||||
VecPredRegs128B:$src2),
|
||||
(!cast<InstHexagon>(MI#"_128B") VecPredRegs128B:$src1,
|
||||
VecPredRegs128B:$src2)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxQR:$src1, HvxQR:$src2),
|
||||
(MI HvxQR:$src1, HvxQR:$src2)>;
|
||||
}
|
||||
|
||||
multiclass T_WWR_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VecDblRegs:$src1, VecDblRegs:$src2, IntRegs:$src3),
|
||||
(MI VecDblRegs:$src1, VecDblRegs:$src2, IntRegs:$src3)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(IntID HvxWR:$src1, HvxWR:$src2, IntRegs:$src3),
|
||||
(MI HvxWR:$src1, HvxWR:$src2, IntRegs:$src3)>;
|
||||
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") VecDblRegs128B:$src1,
|
||||
VecDblRegs128B:$src2,
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxWR:$src1, HvxWR:$src2,
|
||||
IntRegs:$src3),
|
||||
(!cast<InstHexagon>(MI#"_128B") VecDblRegs128B:$src1,
|
||||
VecDblRegs128B:$src2,
|
||||
IntRegs:$src3)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
(MI HvxWR:$src1, HvxWR:$src2, IntRegs:$src3)>;
|
||||
}
|
||||
|
||||
multiclass T_VVR_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VectorRegs:$src1, VectorRegs:$src2, IntRegs:$src3),
|
||||
(MI VectorRegs:$src1, VectorRegs:$src2, IntRegs:$src3)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(IntID HvxVR:$src1, HvxVR:$src2, IntRegs:$src3),
|
||||
(MI HvxVR:$src1, HvxVR:$src2, IntRegs:$src3)>;
|
||||
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") VectorRegs128B:$src1,
|
||||
VectorRegs128B:$src2,
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxVR:$src1, HvxVR:$src2,
|
||||
IntRegs:$src3),
|
||||
(!cast<InstHexagon>(MI#"_128B") VectorRegs128B:$src1,
|
||||
VectorRegs128B:$src2,
|
||||
IntRegs:$src3)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
(MI HvxVR:$src1, HvxVR:$src2, IntRegs:$src3)>;
|
||||
}
|
||||
|
||||
multiclass T_WVR_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VecDblRegs:$src1, VectorRegs:$src2, IntRegs:$src3),
|
||||
(MI VecDblRegs:$src1, VectorRegs:$src2, IntRegs:$src3)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(IntID HvxWR:$src1, HvxVR:$src2, IntRegs:$src3),
|
||||
(MI HvxWR:$src1, HvxVR:$src2, IntRegs:$src3)>;
|
||||
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") VecDblRegs128B:$src1,
|
||||
VectorRegs128B:$src2,
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxWR:$src1, HvxVR:$src2,
|
||||
IntRegs:$src3),
|
||||
(!cast<InstHexagon>(MI#"_128B") VecDblRegs128B:$src1,
|
||||
VectorRegs128B:$src2,
|
||||
IntRegs:$src3)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
(MI HvxWR:$src1, HvxVR:$src2, IntRegs:$src3)>;
|
||||
}
|
||||
|
||||
multiclass T_VWR_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VectorRegs:$src1, VecDblRegs:$src2, IntRegs:$src3),
|
||||
(MI VectorRegs:$src1, VecDblRegs:$src2, IntRegs:$src3)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(IntID HvxVR:$src1, HvxWR:$src2, IntRegs:$src3),
|
||||
(MI HvxVR:$src1, HvxWR:$src2, IntRegs:$src3)>;
|
||||
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") VectorRegs128B:$src1,
|
||||
VecDblRegs128B:$src2,
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxVR:$src1, HvxWR:$src2,
|
||||
IntRegs:$src3),
|
||||
(!cast<InstHexagon>(MI#"_128B") VectorRegs128B:$src1,
|
||||
VecDblRegs128B:$src2,
|
||||
IntRegs:$src3)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
(MI HvxVR:$src1, HvxWR:$src2, IntRegs:$src3)>;
|
||||
}
|
||||
|
||||
multiclass T_VVV_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VectorRegs:$src1, VectorRegs:$src2, VectorRegs:$src3),
|
||||
(MI VectorRegs:$src1, VectorRegs:$src2, VectorRegs:$src3)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(IntID HvxVR:$src1, HvxVR:$src2, HvxVR:$src3),
|
||||
(MI HvxVR:$src1, HvxVR:$src2, HvxVR:$src3)>;
|
||||
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") VectorRegs128B:$src1,
|
||||
VectorRegs128B:$src2,
|
||||
VectorRegs128B:$src3),
|
||||
(!cast<InstHexagon>(MI#"_128B") VectorRegs128B:$src1,
|
||||
VectorRegs128B:$src2,
|
||||
VectorRegs128B:$src3)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxVR:$src1, HvxVR:$src2,
|
||||
HvxVR:$src3),
|
||||
(MI HvxVR:$src1, HvxVR:$src2, HvxVR:$src3)>;
|
||||
}
|
||||
|
||||
multiclass T_WVV_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VecDblRegs:$src1, VectorRegs:$src2, VectorRegs:$src3),
|
||||
(MI VecDblRegs:$src1, VectorRegs:$src2, VectorRegs:$src3)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(IntID HvxWR:$src1, HvxVR:$src2, HvxVR:$src3),
|
||||
(MI HvxWR:$src1, HvxVR:$src2, HvxVR:$src3)>;
|
||||
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") VecDblRegs128B:$src1,
|
||||
VectorRegs128B:$src2,
|
||||
VectorRegs128B:$src3),
|
||||
(!cast<InstHexagon>(MI#"_128B") VecDblRegs128B:$src1,
|
||||
VectorRegs128B:$src2,
|
||||
VectorRegs128B:$src3)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxWR:$src1, HvxVR:$src2,
|
||||
HvxVR:$src3),
|
||||
(MI HvxWR:$src1, HvxVR:$src2, HvxVR:$src3)>;
|
||||
}
|
||||
|
||||
multiclass T_QVV_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VecPredRegs:$src1, VectorRegs:$src2, VectorRegs:$src3),
|
||||
(MI VecPredRegs:$src1, VectorRegs:$src2, VectorRegs:$src3)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(IntID HvxQR:$src1, HvxVR:$src2, HvxVR:$src3),
|
||||
(MI HvxQR:$src1, HvxVR:$src2, HvxVR:$src3)>;
|
||||
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") VecPredRegs128B:$src1,
|
||||
VectorRegs128B:$src2,
|
||||
VectorRegs128B:$src3),
|
||||
(!cast<InstHexagon>(MI#"_128B") VecPredRegs128B:$src1,
|
||||
VectorRegs128B:$src2,
|
||||
VectorRegs128B:$src3)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxQR:$src1, HvxVR:$src2,
|
||||
HvxVR:$src3),
|
||||
(MI HvxQR:$src1, HvxVR:$src2, HvxVR:$src3)>;
|
||||
}
|
||||
|
||||
multiclass T_VQR_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VectorRegs:$src1, VecPredRegs:$src2, IntRegs:$src3),
|
||||
(MI VectorRegs:$src1, VecPredRegs:$src2, IntRegs:$src3)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(IntID HvxVR:$src1, HvxQR:$src2, IntRegs:$src3),
|
||||
(MI HvxVR:$src1, HvxQR:$src2, IntRegs:$src3)>;
|
||||
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") VectorRegs128B:$src1,
|
||||
VecPredRegs128B:$src2,
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxVR:$src1, HvxQR:$src2,
|
||||
IntRegs:$src3),
|
||||
(!cast<InstHexagon>(MI#"_128B") VectorRegs128B:$src1,
|
||||
VecPredRegs128B:$src2,
|
||||
IntRegs:$src3)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
(MI HvxVR:$src1, HvxQR:$src2, IntRegs:$src3)>;
|
||||
}
|
||||
|
||||
|
||||
multiclass T_QVR_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VecPredRegs:$src1, VectorRegs:$src2, IntRegs:$src3),
|
||||
(MI VecPredRegs:$src1, VectorRegs:$src2, IntRegs:$src3)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(IntID HvxQR:$src1, HvxVR:$src2, IntRegs:$src3),
|
||||
(MI HvxQR:$src1, HvxVR:$src2, IntRegs:$src3)>;
|
||||
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") VecPredRegs128B:$src1,
|
||||
VectorRegs128B:$src2,
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxQR:$src1, HvxVR:$src2,
|
||||
IntRegs:$src3),
|
||||
(!cast<InstHexagon>(MI#"_128B") VecPredRegs128B:$src1,
|
||||
VectorRegs128B:$src2,
|
||||
IntRegs:$src3)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
(MI HvxQR:$src1, HvxVR:$src2, IntRegs:$src3)>;
|
||||
}
|
||||
|
||||
multiclass T_VVI_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VectorRegs:$src1, VectorRegs:$src2, imm:$src3),
|
||||
(MI VectorRegs:$src1, VectorRegs:$src2, imm:$src3)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(IntID HvxVR:$src1, HvxVR:$src2, imm:$src3),
|
||||
(MI HvxVR:$src1, HvxVR:$src2, imm:$src3)>;
|
||||
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") VectorRegs128B:$src1,
|
||||
VectorRegs128B:$src2, imm:$src3),
|
||||
(!cast<InstHexagon>(MI#"_128B") VectorRegs128B:$src1,
|
||||
VectorRegs128B:$src2, imm:$src3)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxVR:$src1,
|
||||
HvxVR:$src2, imm:$src3),
|
||||
(MI HvxVR:$src1, HvxVR:$src2, imm:$src3)>;
|
||||
}
|
||||
|
||||
multiclass T_WRI_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VecDblRegs:$src1, IntRegs:$src2, imm:$src3),
|
||||
(MI VecDblRegs:$src1, IntRegs:$src2, imm:$src3)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(IntID HvxWR:$src1, IntRegs:$src2, imm:$src3),
|
||||
(MI HvxWR:$src1, IntRegs:$src2, imm:$src3)>;
|
||||
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") VecDblRegs128B:$src1,
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxWR:$src1,
|
||||
IntRegs:$src2, imm:$src3),
|
||||
(!cast<InstHexagon>(MI#"_128B") VecDblRegs128B:$src1,
|
||||
IntRegs:$src2, imm:$src3)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
(MI HvxWR:$src1, IntRegs:$src2, imm:$src3)>;
|
||||
}
|
||||
|
||||
multiclass T_WWRI_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VecDblRegs:$src1, VecDblRegs:$src2, IntRegs:$src3, imm:$src4),
|
||||
(MI VecDblRegs:$src1, VecDblRegs:$src2, IntRegs:$src3, imm:$src4)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(IntID HvxWR:$src1, HvxWR:$src2, IntRegs:$src3, imm:$src4),
|
||||
(MI HvxWR:$src1, HvxWR:$src2, IntRegs:$src3, imm:$src4)>;
|
||||
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") VecDblRegs128B:$src1,
|
||||
VecDblRegs128B:$src2,
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxWR:$src1, HvxWR:$src2,
|
||||
IntRegs:$src3, imm:$src4),
|
||||
(!cast<InstHexagon>(MI#"_128B") VecDblRegs128B:$src1,
|
||||
VecDblRegs128B:$src2,
|
||||
IntRegs:$src3, imm:$src4)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
(MI HvxWR:$src1, HvxWR:$src2, IntRegs:$src3, imm:$src4)>;
|
||||
}
|
||||
|
||||
multiclass T_VVVR_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VectorRegs:$src1, VectorRegs:$src2, VectorRegs:$src3,
|
||||
IntRegs:$src4),
|
||||
(MI VectorRegs:$src1, VectorRegs:$src2, VectorRegs:$src3,
|
||||
IntRegs:$src4)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(IntID HvxVR:$src1, HvxVR:$src2, HvxVR:$src3, IntRegs:$src4),
|
||||
(MI HvxVR:$src1, HvxVR:$src2, HvxVR:$src3, IntRegs:$src4)>;
|
||||
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") VectorRegs128B:$src1,
|
||||
VectorRegs128B:$src2,
|
||||
VectorRegs128B:$src3,
|
||||
IntRegs:$src4),
|
||||
(!cast<InstHexagon>(MI#"_128B") VectorRegs128B:$src1,
|
||||
VectorRegs128B:$src2,
|
||||
VectorRegs128B:$src3,
|
||||
IntRegs:$src4)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxVR:$src1, HvxVR:$src2,
|
||||
HvxVR:$src3, IntRegs:$src4),
|
||||
(MI HvxVR:$src1, HvxVR:$src2, HvxVR:$src3, IntRegs:$src4)>;
|
||||
}
|
||||
|
||||
multiclass T_WVVR_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VecDblRegs:$src1, VectorRegs:$src2, VectorRegs:$src3,
|
||||
IntRegs:$src4),
|
||||
(MI VecDblRegs:$src1, VectorRegs:$src2, VectorRegs:$src3,
|
||||
IntRegs:$src4)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(IntID HvxWR:$src1, HvxVR:$src2, HvxVR:$src3, IntRegs:$src4),
|
||||
(MI HvxWR:$src1, HvxVR:$src2, HvxVR:$src3, IntRegs:$src4)>;
|
||||
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") VecDblRegs128B:$src1,
|
||||
VectorRegs128B:$src2,
|
||||
VectorRegs128B:$src3,
|
||||
IntRegs:$src4),
|
||||
(!cast<InstHexagon>(MI#"_128B") VecDblRegs128B:$src1,
|
||||
VectorRegs128B:$src2,
|
||||
VectorRegs128B:$src3,
|
||||
IntRegs:$src4)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxWR:$src1, HvxVR:$src2,
|
||||
HvxVR:$src3, IntRegs:$src4),
|
||||
(MI HvxWR:$src1, HvxVR:$src2, HvxVR:$src3, IntRegs:$src4)>;
|
||||
}
|
||||
|
||||
defm : T_WR_pat <V6_vtmpyb, int_hexagon_V6_vtmpyb>;
|
||||
@ -793,11 +646,10 @@ defm : T_VR_pat <V6_vinsertwr, int_hexagon_V6_vinsertwr>;
|
||||
//def : T_PPQ_pat <S2_cabacencbin, int_hexagon_S2_cabacencbin>;
|
||||
|
||||
def: Pat<(v64i16 (trunc v64i32:$Vdd)),
|
||||
(v64i16 (V6_vpackwh_sat_128B
|
||||
(v32i32 (V6_hi_128B VecDblRegs128B:$Vdd)),
|
||||
(v32i32 (V6_lo_128B VecDblRegs128B:$Vdd))))>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
(v64i16 (V6_vpackwh_sat
|
||||
(v32i32 (V6_hi HvxWR:$Vdd)),
|
||||
(v32i32 (V6_lo HvxWR:$Vdd))))>;
|
||||
|
||||
def: Pat<(int_hexagon_V6_vd0), (V6_vd0)>;
|
||||
def: Pat<(int_hexagon_V6_vd0_128B), (V6_vd0_128B)>;
|
||||
def: Pat<(int_hexagon_V6_vd0_128B), (V6_vd0)>;
|
||||
|
||||
|
@ -8,147 +8,123 @@
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
multiclass T_VR_HVX_gen_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VectorRegs:$src1, IntRegs:$src2),
|
||||
(MI VectorRegs:$src1, IntRegs:$src2)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") VectorRegs128B:$src1, IntRegs:$src2),
|
||||
(!cast<InstHexagon>(MI#"_128B") VectorRegs128B:$src1, IntRegs:$src2)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def: Pat<(IntID HvxVR:$src1, IntRegs:$src2),
|
||||
(MI HvxVR:$src1, IntRegs:$src2)>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxVR:$src1, IntRegs:$src2),
|
||||
(MI HvxVR:$src1, IntRegs:$src2)>;
|
||||
}
|
||||
|
||||
multiclass T_VVL_HVX_gen_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VectorRegs:$src1, VectorRegs:$src2, IntRegsLow8:$src3),
|
||||
(MI VectorRegs:$src1, VectorRegs:$src2, IntRegsLow8:$src3)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") VectorRegs128B:$src1, VectorRegs128B:$src2, IntRegsLow8:$src3),
|
||||
(!cast<InstHexagon>(MI#"_128B") VectorRegs128B:$src1, VectorRegs128B:$src2, IntRegsLow8:$src3)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def: Pat<(IntID HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3),
|
||||
(MI HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3)>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxVR:$src1, HvxVR:$src2,
|
||||
IntRegsLow8:$src3),
|
||||
(MI HvxVR:$src1, HvxVR:$src2, IntRegsLow8:$src3)>;
|
||||
}
|
||||
|
||||
multiclass T_VV_HVX_gen_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VectorRegs:$src1, VectorRegs:$src2),
|
||||
(MI VectorRegs:$src1, VectorRegs:$src2)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") VectorRegs128B:$src1, VectorRegs128B:$src2),
|
||||
(!cast<InstHexagon>(MI#"_128B") VectorRegs128B:$src1, VectorRegs128B:$src2)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def: Pat<(IntID HvxVR:$src1, HvxVR:$src2),
|
||||
(MI HvxVR:$src1, HvxVR:$src2)>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxVR:$src1, HvxVR:$src2),
|
||||
(MI HvxVR:$src1, HvxVR:$src2)>;
|
||||
}
|
||||
|
||||
multiclass T_WW_HVX_gen_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VecDblRegs:$src1, VecDblRegs:$src2),
|
||||
(MI VecDblRegs:$src1, VecDblRegs:$src2)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") VecDblRegs128B:$src1, VecDblRegs128B:$src2),
|
||||
(!cast<InstHexagon>(MI#"_128B") VecDblRegs128B:$src1, VecDblRegs128B:$src2)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def: Pat<(IntID HvxWR:$src1, HvxWR:$src2),
|
||||
(MI HvxWR:$src1, HvxWR:$src2)>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxWR:$src1, HvxWR:$src2),
|
||||
(MI HvxWR:$src1, HvxWR:$src2)>;
|
||||
}
|
||||
|
||||
multiclass T_WVV_HVX_gen_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VecDblRegs:$src1, VectorRegs:$src2, VectorRegs:$src3),
|
||||
(MI VecDblRegs:$src1, VectorRegs:$src2, VectorRegs:$src3)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") VecDblRegs128B:$src1, VectorRegs128B:$src2, VectorRegs128B:$src3),
|
||||
(!cast<InstHexagon>(MI#"_128B") VecDblRegs128B:$src1, VectorRegs128B:$src2, VectorRegs128B:$src3)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def: Pat<(IntID HvxWR:$src1, HvxVR:$src2, HvxVR:$src3),
|
||||
(MI HvxWR:$src1, HvxVR:$src2, HvxVR:$src3)>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxWR:$src1, HvxVR:$src2,
|
||||
HvxVR:$src3),
|
||||
(MI HvxWR:$src1, HvxVR:$src2, HvxVR:$src3)>;
|
||||
}
|
||||
|
||||
multiclass T_WR_HVX_gen_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VecDblRegs:$src1, IntRegs:$src2),
|
||||
(MI VecDblRegs:$src1, IntRegs:$src2)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") VecDblRegs128B:$src1, IntRegs:$src2),
|
||||
(!cast<InstHexagon>(MI#"_128B") VecDblRegs128B:$src1, IntRegs:$src2)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def: Pat<(IntID HvxWR:$src1, IntRegs:$src2),
|
||||
(MI HvxWR:$src1, IntRegs:$src2)>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxWR:$src1, IntRegs:$src2),
|
||||
(MI HvxWR:$src1, IntRegs:$src2)>;
|
||||
}
|
||||
|
||||
multiclass T_WWR_HVX_gen_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VecDblRegs:$src1, VecDblRegs:$src2, IntRegs:$src3),
|
||||
(MI VecDblRegs:$src1, VecDblRegs:$src2, IntRegs:$src3)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") VecDblRegs128B:$src1, VecDblRegs128B:$src2, IntRegs:$src3),
|
||||
(!cast<InstHexagon>(MI#"_128B") VecDblRegs128B:$src1, VecDblRegs128B:$src2, IntRegs:$src3)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def: Pat<(IntID HvxWR:$src1, HvxWR:$src2, IntRegs:$src3),
|
||||
(MI HvxWR:$src1, HvxWR:$src2, IntRegs:$src3)>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxWR:$src1, HvxWR:$src2,
|
||||
IntRegs:$src3),
|
||||
(MI HvxWR:$src1, HvxWR:$src2, IntRegs:$src3)>;
|
||||
}
|
||||
|
||||
multiclass T_VVR_HVX_gen_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VectorRegs:$src1, VectorRegs:$src2, IntRegs:$src3),
|
||||
(MI VectorRegs:$src1, VectorRegs:$src2, IntRegs:$src3)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") VectorRegs128B:$src1, VectorRegs128B:$src2, IntRegs:$src3),
|
||||
(!cast<InstHexagon>(MI#"_128B") VectorRegs128B:$src1, VectorRegs128B:$src2, IntRegs:$src3)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def: Pat<(IntID HvxVR:$src1, HvxVR:$src2, IntRegs:$src3),
|
||||
(MI HvxVR:$src1, HvxVR:$src2, IntRegs:$src3)>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxVR:$src1, HvxVR:$src2,
|
||||
IntRegs:$src3),
|
||||
(MI HvxVR:$src1, HvxVR:$src2, IntRegs:$src3)>;
|
||||
}
|
||||
|
||||
multiclass T_ZR_HVX_gen_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VecPredRegs:$src1, IntRegs:$src2),
|
||||
(MI VecPredRegs:$src1, IntRegs:$src2)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") VecPredRegs128B:$src1, IntRegs:$src2),
|
||||
(!cast<InstHexagon>(MI#"_128B") VecPredRegs128B:$src1, IntRegs:$src2)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def: Pat<(IntID HvxQR:$src1, IntRegs:$src2),
|
||||
(MI HvxQR:$src1, IntRegs:$src2)>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxQR:$src1, IntRegs:$src2),
|
||||
(MI HvxQR:$src1, IntRegs:$src2)>;
|
||||
}
|
||||
|
||||
multiclass T_VZR_HVX_gen_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VectorRegs:$src1, VecPredRegs:$src2, IntRegs:$src3),
|
||||
(MI VectorRegs:$src1, VecPredRegs:$src2, IntRegs:$src3)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") VectorRegs128B:$src1, VecPredRegs128B:$src2, IntRegs:$src3),
|
||||
(!cast<InstHexagon>(MI#"_128B") VectorRegs128B:$src1, VecPredRegs128B:$src2, IntRegs:$src3)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def: Pat<(IntID HvxVR:$src1, HvxQR:$src2, IntRegs:$src3),
|
||||
(MI HvxVR:$src1, HvxQR:$src2, IntRegs:$src3)>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxVR:$src1, HvxQR:$src2,
|
||||
IntRegs:$src3),
|
||||
(MI HvxVR:$src1, HvxQR:$src2, IntRegs:$src3)>;
|
||||
}
|
||||
|
||||
multiclass T_ZV_HVX_gen_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VecPredRegs:$src1, VectorRegs:$src2),
|
||||
(MI VecPredRegs:$src1, VectorRegs:$src2)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") VecPredRegs128B:$src1, VectorRegs128B:$src2),
|
||||
(!cast<InstHexagon>(MI#"_128B") VecPredRegs128B:$src1, VectorRegs128B:$src2)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def: Pat<(IntID HvxQR:$src1, HvxVR:$src2),
|
||||
(MI HvxQR:$src1, HvxVR:$src2)>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxQR:$src1, HvxVR:$src2),
|
||||
(MI HvxQR:$src1, HvxVR:$src2)>;
|
||||
}
|
||||
|
||||
multiclass T_R_HVX_gen_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID IntRegs:$src1),
|
||||
(MI IntRegs:$src1)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
(MI IntRegs:$src1)>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") IntRegs:$src1),
|
||||
(!cast<InstHexagon>(MI#"_128B") IntRegs:$src1)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
(MI IntRegs:$src1)>;
|
||||
}
|
||||
|
||||
multiclass T_ZZ_HVX_gen_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VecPredRegs:$src1, VecPredRegs:$src2),
|
||||
(MI VecPredRegs:$src1, VecPredRegs:$src2)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") VecPredRegs128B:$src1, VecPredRegs128B:$src2),
|
||||
(!cast<InstHexagon>(MI#"_128B") VecPredRegs128B:$src1, VecPredRegs128B:$src2)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def: Pat<(IntID HvxQR:$src1, HvxQR:$src2),
|
||||
(MI HvxQR:$src1, HvxQR:$src2)>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxQR:$src1, HvxQR:$src2),
|
||||
(MI HvxQR:$src1, HvxQR:$src2)>;
|
||||
}
|
||||
|
||||
multiclass T_VVI_HVX_gen_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VectorRegs:$src1, VectorRegs:$src2, imm:$src3),
|
||||
(MI VectorRegs:$src1, VectorRegs:$src2, imm:$src3)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") VectorRegs128B:$src1, VectorRegs128B:$src2, imm:$src3),
|
||||
(!cast<InstHexagon>(MI#"_128B") VectorRegs128B:$src1, VectorRegs128B:$src2, imm:$src3)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def: Pat<(IntID HvxVR:$src1, HvxVR:$src2, imm:$src3),
|
||||
(MI HvxVR:$src1, HvxVR:$src2, imm:$src3)>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxVR:$src1, HvxVR:$src2,
|
||||
imm:$src3),
|
||||
(MI HvxVR:$src1, HvxVR:$src2, imm:$src3)>;
|
||||
}
|
||||
|
||||
multiclass T_VVVI_HVX_gen_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VectorRegs:$src1, VectorRegs:$src2, VectorRegs:$src3, imm:$src4),
|
||||
(MI VectorRegs:$src1, VectorRegs:$src2, VectorRegs:$src3, imm:$src4)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") VectorRegs128B:$src1, VectorRegs128B:$src2, VectorRegs128B:$src3, imm:$src4),
|
||||
(!cast<InstHexagon>(MI#"_128B") VectorRegs128B:$src1, VectorRegs128B:$src2, VectorRegs128B:$src3, imm:$src4)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def: Pat<(IntID HvxVR:$src1, HvxVR:$src2, HvxVR:$src3, imm:$src4),
|
||||
(MI HvxVR:$src1, HvxVR:$src2, HvxVR:$src3, imm:$src4)>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxVR:$src1, HvxVR:$src2,
|
||||
HvxVR:$src3, imm:$src4),
|
||||
(MI HvxVR:$src1, HvxVR:$src2, HvxVR:$src3, imm:$src4)>;
|
||||
}
|
||||
|
||||
multiclass T_WVVI_HVX_gen_pat <InstHexagon MI, Intrinsic IntID> {
|
||||
def: Pat<(IntID VecDblRegs:$src1, VectorRegs:$src2, VectorRegs:$src3, imm:$src4),
|
||||
(MI VecDblRegs:$src1, VectorRegs:$src2, VectorRegs:$src3, imm:$src4)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") VecDblRegs128B:$src1, VectorRegs128B:$src2, VectorRegs128B:$src3, imm:$src4),
|
||||
(!cast<InstHexagon>(MI#"_128B") VecDblRegs128B:$src1, VectorRegs128B:$src2, VectorRegs128B:$src3, imm:$src4)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def: Pat<(IntID HvxWR:$src1, HvxVR:$src2, HvxVR:$src3, imm:$src4),
|
||||
(MI HvxWR:$src1, HvxVR:$src2, HvxVR:$src3, imm:$src4)>;
|
||||
def: Pat<(!cast<Intrinsic>(IntID#"_128B") HvxWR:$src1, HvxVR:$src2,
|
||||
HvxVR:$src3, imm:$src4),
|
||||
(MI HvxWR:$src1, HvxVR:$src2, HvxVR:$src3, imm:$src4)>;
|
||||
}
|
||||
|
||||
def : T_R_pat <S6_vsplatrbp, int_hexagon_S6_vsplatrbp>;
|
||||
|
@ -18,14 +18,14 @@ def HiReg: OutPatFrag<(ops node:$Rs), (EXTRACT_SUBREG (i64 $Rs), isub_hi)>;
|
||||
def IsOrAdd: PatFrag<(ops node:$Addr, node:$off),
|
||||
(or node:$Addr, node:$off), [{ return isOrEquivalentToAdd(N); }]>;
|
||||
|
||||
def Iss4_6 : PatLeaf<(i32 imm), [{
|
||||
def IsVecOff : PatLeaf<(i32 imm), [{
|
||||
int32_t V = N->getSExtValue();
|
||||
return isShiftedInt<4,6>(V);
|
||||
}]>;
|
||||
|
||||
def Iss4_7 : PatLeaf<(i32 imm), [{
|
||||
int32_t V = N->getSExtValue();
|
||||
return isShiftedInt<4,7>(V);
|
||||
int32_t VecSize = HRI->getSpillSize(Hexagon::HvxVRRegClass);
|
||||
assert(isPowerOf2_32(VecSize));
|
||||
if ((uint32_t(V) & (uint32_t(VecSize)-1)) != 0)
|
||||
return false;
|
||||
int32_t L = Log2_32(VecSize);
|
||||
return isInt<4>(V >> L);
|
||||
}]>;
|
||||
|
||||
def IsPow2_32 : PatLeaf<(i32 imm), [{
|
||||
@ -2776,190 +2776,94 @@ def unalignedstore : PatFrag<(ops node:$val, node:$addr), (store $val, $addr), [
|
||||
|
||||
multiclass vS32b_ai_pats <ValueType VTSgl, ValueType VTDbl> {
|
||||
// Aligned stores
|
||||
def : Pat<(alignednontemporalstore (VTSgl VectorRegs:$src1), IntRegs:$addr),
|
||||
(V6_vS32b_nt_ai IntRegs:$addr, 0, (VTSgl VectorRegs:$src1))>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def : Pat<(alignedstore (VTSgl VectorRegs:$src1), IntRegs:$addr),
|
||||
(V6_vS32b_ai IntRegs:$addr, 0, (VTSgl VectorRegs:$src1))>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def : Pat<(unalignedstore (VTSgl VectorRegs:$src1), IntRegs:$addr),
|
||||
(V6_vS32Ub_ai IntRegs:$addr, 0, (VTSgl VectorRegs:$src1))>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
|
||||
// 128B Aligned stores
|
||||
def : Pat<(alignednontemporalstore (VTDbl VectorRegs128B:$src1), IntRegs:$addr),
|
||||
(V6_vS32b_nt_ai_128B IntRegs:$addr, 0, (VTDbl VectorRegs128B:$src1))>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def : Pat<(alignedstore (VTDbl VectorRegs128B:$src1), IntRegs:$addr),
|
||||
(V6_vS32b_ai_128B IntRegs:$addr, 0, (VTDbl VectorRegs128B:$src1))>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def : Pat<(unalignedstore (VTDbl VectorRegs128B:$src1), IntRegs:$addr),
|
||||
(V6_vS32Ub_ai_128B IntRegs:$addr, 0, (VTDbl VectorRegs128B:$src1))>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def : Pat<(alignednontemporalstore (VTSgl HvxVR:$src1), IntRegs:$addr),
|
||||
(V6_vS32b_nt_ai IntRegs:$addr, 0, (VTSgl HvxVR:$src1))>;
|
||||
def : Pat<(alignedstore (VTSgl HvxVR:$src1), IntRegs:$addr),
|
||||
(V6_vS32b_ai IntRegs:$addr, 0, (VTSgl HvxVR:$src1))>;
|
||||
def : Pat<(unalignedstore (VTSgl HvxVR:$src1), IntRegs:$addr),
|
||||
(V6_vS32Ub_ai IntRegs:$addr, 0, (VTSgl HvxVR:$src1))>;
|
||||
|
||||
// Fold Add R+OFF into vector store.
|
||||
let AddedComplexity = 10 in {
|
||||
def : Pat<(alignednontemporalstore (VTSgl VectorRegs:$src1),
|
||||
(add IntRegs:$src2, Iss4_6:$offset)),
|
||||
(V6_vS32b_nt_ai IntRegs:$src2, Iss4_6:$offset,
|
||||
(VTSgl VectorRegs:$src1))>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def : Pat<(alignedstore (VTSgl VectorRegs:$src1),
|
||||
(add IntRegs:$src2, Iss4_6:$offset)),
|
||||
(V6_vS32b_ai IntRegs:$src2, Iss4_6:$offset,
|
||||
(VTSgl VectorRegs:$src1))>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def : Pat<(unalignedstore (VTSgl VectorRegs:$src1),
|
||||
(add IntRegs:$src2, Iss4_6:$offset)),
|
||||
(V6_vS32Ub_ai IntRegs:$src2, Iss4_6:$offset,
|
||||
(VTSgl VectorRegs:$src1))>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
|
||||
// Fold Add R+OFF into vector store 128B.
|
||||
def : Pat<(alignednontemporalstore (VTDbl VectorRegs128B:$src1),
|
||||
(add IntRegs:$src2, Iss4_7:$offset)),
|
||||
(V6_vS32b_nt_ai_128B IntRegs:$src2, Iss4_7:$offset,
|
||||
(VTDbl VectorRegs128B:$src1))>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def : Pat<(alignedstore (VTDbl VectorRegs128B:$src1),
|
||||
(add IntRegs:$src2, Iss4_7:$offset)),
|
||||
(V6_vS32b_ai_128B IntRegs:$src2, Iss4_7:$offset,
|
||||
(VTDbl VectorRegs128B:$src1))>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def : Pat<(unalignedstore (VTDbl VectorRegs128B:$src1),
|
||||
(add IntRegs:$src2, Iss4_7:$offset)),
|
||||
(V6_vS32Ub_ai_128B IntRegs:$src2, Iss4_7:$offset,
|
||||
(VTDbl VectorRegs128B:$src1))>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def : Pat<(alignednontemporalstore (VTSgl HvxVR:$src1),
|
||||
(add IntRegs:$src2, IsVecOff:$offset)),
|
||||
(V6_vS32b_nt_ai IntRegs:$src2, imm:$offset,
|
||||
(VTSgl HvxVR:$src1))>;
|
||||
def : Pat<(alignedstore (VTSgl HvxVR:$src1),
|
||||
(add IntRegs:$src2, IsVecOff:$offset)),
|
||||
(V6_vS32b_ai IntRegs:$src2, imm:$offset,
|
||||
(VTSgl HvxVR:$src1))>;
|
||||
def : Pat<(unalignedstore (VTSgl HvxVR:$src1),
|
||||
(add IntRegs:$src2, IsVecOff:$offset)),
|
||||
(V6_vS32Ub_ai IntRegs:$src2, imm:$offset,
|
||||
(VTSgl HvxVR:$src1))>;
|
||||
}
|
||||
}
|
||||
|
||||
defm : vS32b_ai_pats <v64i8, v128i8>;
|
||||
defm : vS32b_ai_pats <v32i16, v64i16>;
|
||||
defm : vS32b_ai_pats <v16i32, v32i32>;
|
||||
defm : vS32b_ai_pats <v8i64, v16i64>;
|
||||
defm : vS32b_ai_pats <VecI8, v128i8>;
|
||||
defm : vS32b_ai_pats <VecI16, v64i16>;
|
||||
defm : vS32b_ai_pats <VecI32, v32i32>;
|
||||
defm : vS32b_ai_pats <VecI64, v16i64>;
|
||||
|
||||
|
||||
multiclass vL32b_ai_pats <ValueType VTSgl, ValueType VTDbl> {
|
||||
// Aligned loads
|
||||
def : Pat < (VTSgl (alignednontemporalload IntRegs:$addr)),
|
||||
(V6_vL32b_nt_ai IntRegs:$addr, 0) >,
|
||||
Requires<[UseHVXSgl]>;
|
||||
(V6_vL32b_nt_ai IntRegs:$addr, 0) >;
|
||||
def : Pat < (VTSgl (alignedload IntRegs:$addr)),
|
||||
(V6_vL32b_ai IntRegs:$addr, 0) >,
|
||||
Requires<[UseHVXSgl]>;
|
||||
(V6_vL32b_ai IntRegs:$addr, 0) >;
|
||||
def : Pat < (VTSgl (unalignedload IntRegs:$addr)),
|
||||
(V6_vL32Ub_ai IntRegs:$addr, 0) >,
|
||||
Requires<[UseHVXSgl]>;
|
||||
|
||||
// 128B Load
|
||||
def : Pat < (VTDbl (alignednontemporalload IntRegs:$addr)),
|
||||
(V6_vL32b_nt_ai_128B IntRegs:$addr, 0) >,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def : Pat < (VTDbl (alignedload IntRegs:$addr)),
|
||||
(V6_vL32b_ai_128B IntRegs:$addr, 0) >,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def : Pat < (VTDbl (unalignedload IntRegs:$addr)),
|
||||
(V6_vL32Ub_ai_128B IntRegs:$addr, 0) >,
|
||||
Requires<[UseHVXDbl]>;
|
||||
(V6_vL32Ub_ai IntRegs:$addr, 0) >;
|
||||
|
||||
// Fold Add R+OFF into vector load.
|
||||
let AddedComplexity = 10 in {
|
||||
def : Pat<(VTDbl (alignednontemporalload (add IntRegs:$src2, Iss4_7:$offset))),
|
||||
(V6_vL32b_nt_ai_128B IntRegs:$src2, Iss4_7:$offset)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def : Pat<(VTDbl (alignedload (add IntRegs:$src2, Iss4_7:$offset))),
|
||||
(V6_vL32b_ai_128B IntRegs:$src2, Iss4_7:$offset)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def : Pat<(VTDbl (unalignedload (add IntRegs:$src2, Iss4_7:$offset))),
|
||||
(V6_vL32Ub_ai_128B IntRegs:$src2, Iss4_7:$offset)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
|
||||
def : Pat<(VTSgl (alignednontemporalload (add IntRegs:$src2, Iss4_6:$offset))),
|
||||
(V6_vL32b_nt_ai IntRegs:$src2, Iss4_6:$offset)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def : Pat<(VTSgl (alignedload (add IntRegs:$src2, Iss4_6:$offset))),
|
||||
(V6_vL32b_ai IntRegs:$src2, Iss4_6:$offset)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def : Pat<(VTSgl (unalignedload (add IntRegs:$src2, Iss4_6:$offset))),
|
||||
(V6_vL32Ub_ai IntRegs:$src2, Iss4_6:$offset)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def : Pat<(VTSgl (alignednontemporalload (add IntRegs:$src2, IsVecOff:$offset))),
|
||||
(V6_vL32b_nt_ai IntRegs:$src2, imm:$offset)>;
|
||||
def : Pat<(VTSgl (alignedload (add IntRegs:$src2, IsVecOff:$offset))),
|
||||
(V6_vL32b_ai IntRegs:$src2, imm:$offset)>;
|
||||
def : Pat<(VTSgl (unalignedload (add IntRegs:$src2, IsVecOff:$offset))),
|
||||
(V6_vL32Ub_ai IntRegs:$src2, imm:$offset)>;
|
||||
}
|
||||
}
|
||||
|
||||
defm : vL32b_ai_pats <v64i8, v128i8>;
|
||||
defm : vL32b_ai_pats <v32i16, v64i16>;
|
||||
defm : vL32b_ai_pats <v16i32, v32i32>;
|
||||
defm : vL32b_ai_pats <v8i64, v16i64>;
|
||||
defm : vL32b_ai_pats <VecI8, v128i8>;
|
||||
defm : vL32b_ai_pats <VecI16, v64i16>;
|
||||
defm : vL32b_ai_pats <VecI32, v32i32>;
|
||||
defm : vL32b_ai_pats <VecI64, v16i64>;
|
||||
|
||||
multiclass STrivv_pats <ValueType VTSgl, ValueType VTDbl> {
|
||||
def : Pat<(alignednontemporalstore (VTSgl VecDblRegs:$src1), IntRegs:$addr),
|
||||
(PS_vstorerw_nt_ai IntRegs:$addr, 0, (VTSgl VecDblRegs:$src1))>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def : Pat<(alignedstore (VTSgl VecDblRegs:$src1), IntRegs:$addr),
|
||||
(PS_vstorerw_ai IntRegs:$addr, 0, (VTSgl VecDblRegs:$src1))>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def : Pat<(unalignedstore (VTSgl VecDblRegs:$src1), IntRegs:$addr),
|
||||
(PS_vstorerwu_ai IntRegs:$addr, 0, (VTSgl VecDblRegs:$src1))>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
|
||||
def : Pat<(alignednontemporalstore (VTDbl VecDblRegs128B:$src1), IntRegs:$addr),
|
||||
(PS_vstorerw_nt_ai_128B IntRegs:$addr, 0,
|
||||
(VTDbl VecDblRegs128B:$src1))>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def : Pat<(alignedstore (VTDbl VecDblRegs128B:$src1), IntRegs:$addr),
|
||||
(PS_vstorerw_ai_128B IntRegs:$addr, 0,
|
||||
(VTDbl VecDblRegs128B:$src1))>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def : Pat<(unalignedstore (VTDbl VecDblRegs128B:$src1), IntRegs:$addr),
|
||||
(PS_vstorerwu_ai_128B IntRegs:$addr, 0,
|
||||
(VTDbl VecDblRegs128B:$src1))>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def : Pat<(alignednontemporalstore (VTSgl HvxWR:$src1), IntRegs:$addr),
|
||||
(PS_vstorerw_nt_ai IntRegs:$addr, 0, (VTSgl HvxWR:$src1))>;
|
||||
def : Pat<(alignedstore (VTSgl HvxWR:$src1), IntRegs:$addr),
|
||||
(PS_vstorerw_ai IntRegs:$addr, 0, (VTSgl HvxWR:$src1))>;
|
||||
def : Pat<(unalignedstore (VTSgl HvxWR:$src1), IntRegs:$addr),
|
||||
(PS_vstorerwu_ai IntRegs:$addr, 0, (VTSgl HvxWR:$src1))>;
|
||||
}
|
||||
|
||||
defm : STrivv_pats <v128i8, v256i8>;
|
||||
defm : STrivv_pats <v64i16, v128i16>;
|
||||
defm : STrivv_pats <v32i32, v64i32>;
|
||||
defm : STrivv_pats <v16i64, v32i64>;
|
||||
defm : STrivv_pats <VecPI8, v256i8>;
|
||||
defm : STrivv_pats <VecPI16, v128i16>;
|
||||
defm : STrivv_pats <VecPI32, v64i32>;
|
||||
defm : STrivv_pats <VecPI64, v32i64>;
|
||||
|
||||
multiclass LDrivv_pats <ValueType VTSgl, ValueType VTDbl> {
|
||||
def : Pat<(VTSgl (alignednontemporalload I32:$addr)),
|
||||
(PS_vloadrw_nt_ai I32:$addr, 0)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
(PS_vloadrw_nt_ai I32:$addr, 0)>;
|
||||
def : Pat<(VTSgl (alignedload I32:$addr)),
|
||||
(PS_vloadrw_ai I32:$addr, 0)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
(PS_vloadrw_ai I32:$addr, 0)>;
|
||||
def : Pat<(VTSgl (unalignedload I32:$addr)),
|
||||
(PS_vloadrwu_ai I32:$addr, 0)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
|
||||
def : Pat<(VTDbl (alignednontemporalload I32:$addr)),
|
||||
(PS_vloadrw_nt_ai_128B I32:$addr, 0)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def : Pat<(VTDbl (alignedload I32:$addr)),
|
||||
(PS_vloadrw_ai_128B I32:$addr, 0)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def : Pat<(VTDbl (unalignedload I32:$addr)),
|
||||
(PS_vloadrwu_ai_128B I32:$addr, 0)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
(PS_vloadrwu_ai I32:$addr, 0)>;
|
||||
}
|
||||
|
||||
defm : LDrivv_pats <v128i8, v256i8>;
|
||||
defm : LDrivv_pats <v64i16, v128i16>;
|
||||
defm : LDrivv_pats <v32i32, v64i32>;
|
||||
defm : LDrivv_pats <v16i64, v32i64>;
|
||||
defm : LDrivv_pats <VecPI8, v256i8>;
|
||||
defm : LDrivv_pats <VecPI16, v128i16>;
|
||||
defm : LDrivv_pats <VecPI32, v64i32>;
|
||||
defm : LDrivv_pats <VecPI64, v32i64>;
|
||||
|
||||
let Predicates = [HasV60T,UseHVXSgl] in {
|
||||
def: Pat<(select I1:$Pu, (v16i32 VectorRegs:$Vs), VectorRegs:$Vt),
|
||||
(PS_vselect I1:$Pu, VectorRegs:$Vs, VectorRegs:$Vt)>;
|
||||
def: Pat<(select I1:$Pu, (v32i32 VecDblRegs:$Vs), VecDblRegs:$Vt),
|
||||
(PS_wselect I1:$Pu, VecDblRegs:$Vs, VecDblRegs:$Vt)>;
|
||||
}
|
||||
let Predicates = [HasV60T,UseHVXDbl] in {
|
||||
def: Pat<(select I1:$Pu, (v32i32 VectorRegs128B:$Vs), VectorRegs128B:$Vt),
|
||||
(PS_vselect_128B I1:$Pu, VectorRegs128B:$Vs, VectorRegs128B:$Vt)>;
|
||||
def: Pat<(select I1:$Pu, (v64i32 VecDblRegs128B:$Vs), VecDblRegs128B:$Vt),
|
||||
(PS_wselect_128B I1:$Pu, VecDblRegs128B:$Vs, VecDblRegs128B:$Vt)>;
|
||||
let Predicates = [HasV60T] in {
|
||||
def: Pat<(select I1:$Pu, (VecI32 HvxVR:$Vs), HvxVR:$Vt),
|
||||
(PS_vselect I1:$Pu, HvxVR:$Vs, HvxVR:$Vt)>;
|
||||
def: Pat<(select I1:$Pu, (VecPI32 HvxWR:$Vs), HvxWR:$Vt),
|
||||
(PS_wselect I1:$Pu, HvxWR:$Vs, HvxWR:$Vt)>;
|
||||
}
|
||||
|
||||
|
||||
@ -2968,49 +2872,22 @@ def SDTHexagonVCOMBINE: SDTypeProfile<1, 2, [SDTCisSameAs<1, 2>,
|
||||
|
||||
def HexagonVCOMBINE: SDNode<"HexagonISD::VCOMBINE", SDTHexagonVCOMBINE>;
|
||||
|
||||
def: Pat<(v32i32 (HexagonVCOMBINE (v16i32 VectorRegs:$Vs),
|
||||
(v16i32 VectorRegs:$Vt))),
|
||||
(V6_vcombine VectorRegs:$Vs, VectorRegs:$Vt)>,
|
||||
Requires<[UseHVXSgl]>;
|
||||
def: Pat<(v64i32 (HexagonVCOMBINE (v32i32 VecDblRegs:$Vs),
|
||||
(v32i32 VecDblRegs:$Vt))),
|
||||
(V6_vcombine_128B VecDblRegs:$Vs, VecDblRegs:$Vt)>,
|
||||
Requires<[UseHVXDbl]>;
|
||||
def: Pat<(VecPI32 (HexagonVCOMBINE (VecI32 HvxVR:$Vs), (VecI32 HvxVR:$Vt))),
|
||||
(V6_vcombine HvxVR:$Vs, HvxVR:$Vt)>;
|
||||
|
||||
def SDTHexagonVPACK: SDTypeProfile<1, 2, [SDTCisSameAs<1, 2>, SDTCisVec<1>]>;
|
||||
|
||||
def HexagonVPACKE: SDNode<"HexagonISD::VPACKE", SDTHexagonVPACK>;
|
||||
def HexagonVPACKO: SDNode<"HexagonISD::VPACKO", SDTHexagonVPACK>;
|
||||
|
||||
let Predicates = [UseHVXSgl] in {
|
||||
def: Pat<(v64i8 (HexagonVPACKE (v64i8 VectorRegs:$Vs),
|
||||
(v64i8 VectorRegs:$Vt))),
|
||||
(V6_vpackeb VectorRegs:$Vs, VectorRegs:$Vt)>;
|
||||
def: Pat<(v64i8 (HexagonVPACKO (v64i8 VectorRegs:$Vs),
|
||||
(v64i8 VectorRegs:$Vt))),
|
||||
(V6_vpackob VectorRegs:$Vs, VectorRegs:$Vt)>;
|
||||
def: Pat<(v32i16 (HexagonVPACKE (v32i16 VectorRegs:$Vs),
|
||||
(v32i16 VectorRegs:$Vt))),
|
||||
(V6_vpackeh VectorRegs:$Vs, VectorRegs:$Vt)>;
|
||||
def: Pat<(v32i16 (HexagonVPACKO (v32i16 VectorRegs:$Vs),
|
||||
(v32i16 VectorRegs:$Vt))),
|
||||
(V6_vpackoh VectorRegs:$Vs, VectorRegs:$Vt)>;
|
||||
}
|
||||
|
||||
let Predicates = [UseHVXDbl] in {
|
||||
def: Pat<(v128i8 (HexagonVPACKE (v128i8 VecDblRegs:$Vs),
|
||||
(v128i8 VecDblRegs:$Vt))),
|
||||
(V6_vpackeb_128B VecDblRegs:$Vs, VecDblRegs:$Vt)>;
|
||||
def: Pat<(v128i8 (HexagonVPACKO (v128i8 VecDblRegs:$Vs),
|
||||
(v128i8 VecDblRegs:$Vt))),
|
||||
(V6_vpackob_128B VecDblRegs:$Vs, VecDblRegs:$Vt)>;
|
||||
def: Pat<(v64i16 (HexagonVPACKE (v64i16 VecDblRegs:$Vs),
|
||||
(v64i16 VecDblRegs:$Vt))),
|
||||
(V6_vpackeh_128B VecDblRegs:$Vs, VecDblRegs:$Vt)>;
|
||||
def: Pat<(v64i16 (HexagonVPACKO (v64i16 VecDblRegs:$Vs),
|
||||
(v64i16 VecDblRegs:$Vt))),
|
||||
(V6_vpackoh_128B VecDblRegs:$Vs, VecDblRegs:$Vt)>;
|
||||
}
|
||||
def: Pat<(VecI8 (HexagonVPACKE (VecI8 HvxVR:$Vs), (VecI8 HvxVR:$Vt))),
|
||||
(V6_vpackeb HvxVR:$Vs, HvxVR:$Vt)>;
|
||||
def: Pat<(VecI8 (HexagonVPACKO (VecI8 HvxVR:$Vs), (VecI8 HvxVR:$Vt))),
|
||||
(V6_vpackob HvxVR:$Vs, HvxVR:$Vt)>;
|
||||
def: Pat<(VecI16 (HexagonVPACKE (VecI16 HvxVR:$Vs), (VecI16 HvxVR:$Vt))),
|
||||
(V6_vpackeh HvxVR:$Vs, HvxVR:$Vt)>;
|
||||
def: Pat<(VecI16 (HexagonVPACKO (VecI16 HvxVR:$Vs), (VecI16 HvxVR:$Vt))),
|
||||
(V6_vpackoh HvxVR:$Vs, HvxVR:$Vt)>;
|
||||
|
||||
def V2I1: PatLeaf<(v2i1 PredRegs:$R)>;
|
||||
def V4I1: PatLeaf<(v4i1 PredRegs:$R)>;
|
||||
|
@ -295,7 +295,7 @@ let isTerminator = 1, hasSideEffects = 0, isReturn = 1, isCodeGenOnly = 1,
|
||||
def PS_jmpretfnewpt : T_JMPr_c<1, 1, 1, J2_jumprfnewpt>, PredNewRel;
|
||||
}
|
||||
|
||||
//defm V6_vtran2x2_map : HexagonMapping<(outs VectorRegs:$Vy32, VectorRegs:$Vx32), (ins VectorRegs:$Vx32in, IntRegs:$Rt32), "vtrans2x2(${Vy32},${Vx32},${Rt32})", (V6_vshuff VectorRegs:$Vy32, VectorRegs:$Vx32, VectorRegs:$Vx32in, IntRegs:$Rt32)>;
|
||||
//defm V6_vtran2x2_map : HexagonMapping<(outs HvxVR:$Vy32, HvxVR:$Vx32), (ins HvxVR:$Vx32in, IntRegs:$Rt32), "vtrans2x2(${Vy32},${Vx32},${Rt32})", (V6_vshuff HvxVR:$Vy32, HvxVR:$Vx32, HvxVR:$Vx32in, IntRegs:$Rt32)>;
|
||||
|
||||
// The reason for the custom inserter is to record all ALLOCA instructions
|
||||
// in MachineFunctionInfo.
|
||||
@ -397,84 +397,53 @@ let isCall = 1, Uses = [R29, R31], isAsmParserOnly = 1 in {
|
||||
|
||||
// Vector store pseudos
|
||||
let Predicates = [HasV60T, UseHVX], isPseudo = 1, isCodeGenOnly = 1,
|
||||
mayStore = 1, hasSideEffects = 0 in
|
||||
mayStore = 1, accessSize = HVXVectorAccess, hasSideEffects = 0 in
|
||||
class STrivv_template<RegisterClass RC, InstHexagon rootInst>
|
||||
: InstHexagon<(outs), (ins IntRegs:$addr, s32_0Imm:$off, RC:$src),
|
||||
"", [], "", rootInst.Itinerary, rootInst.Type>;
|
||||
|
||||
let accessSize = Vector64Access, Predicates = [HasV60T,UseHVXSgl] in {
|
||||
def PS_vstorerw_ai: STrivv_template<VecDblRegs, V6_vS32b_ai>;
|
||||
def PS_vstorerw_nt_ai: STrivv_template<VecDblRegs, V6_vS32b_nt_ai>;
|
||||
def PS_vstorerwu_ai: STrivv_template<VecDblRegs, V6_vS32Ub_ai>;
|
||||
}
|
||||
def PS_vstorerw_ai: STrivv_template<HvxWR, V6_vS32b_ai>,
|
||||
Requires<[HasV60T,UseHVX]>;
|
||||
def PS_vstorerw_nt_ai: STrivv_template<HvxWR, V6_vS32b_nt_ai>,
|
||||
Requires<[HasV60T,UseHVX]>;
|
||||
def PS_vstorerwu_ai: STrivv_template<HvxWR, V6_vS32Ub_ai>,
|
||||
Requires<[HasV60T,UseHVX]>;
|
||||
|
||||
let accessSize = Vector128Access, Predicates = [HasV60T,UseHVXDbl] in {
|
||||
def PS_vstorerw_ai_128B: STrivv_template<VecDblRegs128B, V6_vS32b_ai_128B>;
|
||||
def PS_vstorerw_nt_ai_128B: STrivv_template<VecDblRegs128B,
|
||||
V6_vS32b_nt_ai_128B>;
|
||||
def PS_vstorerwu_ai_128B: STrivv_template<VecDblRegs128B, V6_vS32Ub_ai_128B>;
|
||||
}
|
||||
|
||||
let isPseudo = 1, isCodeGenOnly = 1, mayStore = 1, hasSideEffects = 0 in {
|
||||
let accessSize = Vector64Access in
|
||||
def PS_vstorerq_ai: Pseudo<(outs),
|
||||
(ins IntRegs:$Rs, s32_0Imm:$Off, VecPredRegs:$Qt), "", []>,
|
||||
Requires<[HasV60T,UseHVXSgl]>;
|
||||
let accessSize = Vector128Access in
|
||||
def PS_vstorerq_ai_128B: Pseudo<(outs),
|
||||
(ins IntRegs:$Rs, s32_0Imm:$Off, VecPredRegs128B:$Qt), "", []>,
|
||||
Requires<[HasV60T,UseHVXDbl]>;
|
||||
}
|
||||
let isPseudo = 1, isCodeGenOnly = 1, mayStore = 1, hasSideEffects = 0 in
|
||||
def PS_vstorerq_ai: Pseudo<(outs),
|
||||
(ins IntRegs:$Rs, s32_0Imm:$Off, HvxQR:$Qt), "", []>,
|
||||
Requires<[HasV60T,UseHVX]>;
|
||||
|
||||
// Vector load pseudos
|
||||
let Predicates = [HasV60T, UseHVX], isPseudo = 1, isCodeGenOnly = 1,
|
||||
mayLoad = 1, hasSideEffects = 0 in
|
||||
mayLoad = 1, accessSize = HVXVectorAccess, hasSideEffects = 0 in
|
||||
class LDrivv_template<RegisterClass RC, InstHexagon rootInst>
|
||||
: InstHexagon<(outs RC:$dst), (ins IntRegs:$addr, s32_0Imm:$off),
|
||||
"", [], "", rootInst.Itinerary, rootInst.Type>;
|
||||
|
||||
let accessSize = Vector64Access, Predicates = [HasV60T,UseHVXSgl] in {
|
||||
def PS_vloadrw_ai: LDrivv_template<VecDblRegs, V6_vL32b_ai>;
|
||||
def PS_vloadrw_nt_ai: LDrivv_template<VecDblRegs, V6_vL32b_nt_ai>;
|
||||
def PS_vloadrwu_ai: LDrivv_template<VecDblRegs, V6_vL32Ub_ai>;
|
||||
}
|
||||
|
||||
let accessSize = Vector128Access, Predicates = [HasV60T,UseHVXDbl] in {
|
||||
def PS_vloadrw_ai_128B: LDrivv_template<VecDblRegs128B, V6_vL32b_ai_128B>;
|
||||
def PS_vloadrw_nt_ai_128B: LDrivv_template<VecDblRegs128B,
|
||||
V6_vL32b_nt_ai_128B>;
|
||||
def PS_vloadrwu_ai_128B: LDrivv_template<VecDblRegs128B, V6_vL32Ub_ai_128B>;
|
||||
}
|
||||
|
||||
let isPseudo = 1, isCodeGenOnly = 1, mayLoad = 1, hasSideEffects = 0 in {
|
||||
let accessSize = Vector64Access in
|
||||
def PS_vloadrq_ai: Pseudo<(outs VecPredRegs:$Qd),
|
||||
(ins IntRegs:$Rs, s32_0Imm:$Off), "", []>,
|
||||
def PS_vloadrw_ai: LDrivv_template<HvxWR, V6_vL32b_ai>,
|
||||
Requires<[HasV60T,UseHVX]>;
|
||||
def PS_vloadrw_nt_ai: LDrivv_template<HvxWR, V6_vL32b_nt_ai>,
|
||||
Requires<[HasV60T,UseHVXSgl]>;
|
||||
let accessSize = Vector128Access in
|
||||
def PS_vloadrq_ai_128B: Pseudo<(outs VecPredRegs128B:$Qd),
|
||||
def PS_vloadrwu_ai: LDrivv_template<HvxWR, V6_vL32Ub_ai>,
|
||||
Requires<[HasV60T,UseHVX]>;
|
||||
|
||||
let isPseudo = 1, isCodeGenOnly = 1, mayLoad = 1, hasSideEffects = 0 in
|
||||
def PS_vloadrq_ai: Pseudo<(outs HvxQR:$Qd),
|
||||
(ins IntRegs:$Rs, s32_0Imm:$Off), "", []>,
|
||||
Requires<[HasV60T,UseHVXDbl]>;
|
||||
}
|
||||
Requires<[HasV60T,UseHVX]>;
|
||||
|
||||
|
||||
let isCodeGenOnly = 1, isPseudo = 1, hasSideEffects = 0 in
|
||||
class VSELInst<dag outs, dag ins, InstHexagon rootInst>
|
||||
: InstHexagon<outs, ins, "", [], "", rootInst.Itinerary, rootInst.Type>;
|
||||
|
||||
def PS_vselect: VSELInst<(outs VectorRegs:$dst),
|
||||
(ins PredRegs:$src1, VectorRegs:$src2, VectorRegs:$src3),
|
||||
V6_vcmov>, Requires<[HasV60T,UseHVXSgl]>;
|
||||
def PS_vselect_128B: VSELInst<(outs VectorRegs128B:$dst),
|
||||
(ins PredRegs:$src1, VectorRegs128B:$src2, VectorRegs128B:$src3),
|
||||
V6_vcmov>, Requires<[HasV60T,UseHVXDbl]>;
|
||||
|
||||
def PS_wselect: VSELInst<(outs VecDblRegs:$dst),
|
||||
(ins PredRegs:$src1, VecDblRegs:$src2, VecDblRegs:$src3),
|
||||
V6_vccombine>, Requires<[HasV60T,UseHVXSgl]>;
|
||||
def PS_wselect_128B: VSELInst<(outs VecDblRegs128B:$dst),
|
||||
(ins PredRegs:$src1, VecDblRegs128B:$src2, VecDblRegs128B:$src3),
|
||||
V6_vccombine>, Requires<[HasV60T,UseHVXDbl]>;
|
||||
def PS_vselect: VSELInst<(outs HvxVR:$dst),
|
||||
(ins PredRegs:$src1, HvxVR:$src2, HvxVR:$src3), V6_vcmov>,
|
||||
Requires<[HasV60T,UseHVX]>;
|
||||
def PS_wselect: VSELInst<(outs HvxWR:$dst),
|
||||
(ins PredRegs:$src1, HvxWR:$src2, HvxWR:$src3), V6_vccombine>,
|
||||
Requires<[HasV60T,UseHVX]>;
|
||||
|
||||
// Store predicate.
|
||||
let isExtendable = 1, opExtendable = 1, isExtentSigned = 1, opExtentBits = 13,
|
||||
|
@ -41,8 +41,9 @@
|
||||
|
||||
using namespace llvm;
|
||||
|
||||
HexagonRegisterInfo::HexagonRegisterInfo()
|
||||
: HexagonGenRegisterInfo(Hexagon::R31) {}
|
||||
HexagonRegisterInfo::HexagonRegisterInfo(unsigned HwMode)
|
||||
: HexagonGenRegisterInfo(Hexagon::R31, 0/*DwarfFlavor*/, 0/*EHFlavor*/,
|
||||
0/*PC*/, HwMode) {}
|
||||
|
||||
|
||||
bool HexagonRegisterInfo::isEHReturnCalleeSaveReg(unsigned R) const {
|
||||
@ -80,11 +81,9 @@ HexagonRegisterInfo::getCallerSavedRegs(const MachineFunction *MF,
|
||||
return Int64;
|
||||
case PredRegsRegClassID:
|
||||
return Pred;
|
||||
case VectorRegsRegClassID:
|
||||
case VectorRegs128BRegClassID:
|
||||
case HvxVRRegClassID:
|
||||
return VecSgl;
|
||||
case VecDblRegsRegClassID:
|
||||
case VecDblRegs128BRegClassID:
|
||||
case HvxWRRegClassID:
|
||||
return VecDbl;
|
||||
default:
|
||||
break;
|
||||
@ -213,7 +212,7 @@ void HexagonRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
|
||||
break;
|
||||
}
|
||||
|
||||
if (!HII.isValidOffset(Opc, RealOffset)) {
|
||||
if (!HII.isValidOffset(Opc, RealOffset, this)) {
|
||||
// If the offset is not valid, calculate the address in a temporary
|
||||
// register and use it with offset 0.
|
||||
auto &MRI = MF.getRegInfo();
|
||||
@ -267,8 +266,7 @@ unsigned HexagonRegisterInfo::getHexagonSubRegIndex(
|
||||
case Hexagon::CtrRegs64RegClassID:
|
||||
case Hexagon::DoubleRegsRegClassID:
|
||||
return ISub[GenIdx];
|
||||
case Hexagon::VecDblRegsRegClassID:
|
||||
case Hexagon::VecDblRegs128BRegClassID:
|
||||
case Hexagon::HvxWRRegClassID:
|
||||
return VSub[GenIdx];
|
||||
}
|
||||
|
||||
|
@ -29,7 +29,7 @@ namespace Hexagon {
|
||||
|
||||
class HexagonRegisterInfo : public HexagonGenRegisterInfo {
|
||||
public:
|
||||
HexagonRegisterInfo();
|
||||
HexagonRegisterInfo(unsigned HwMode);
|
||||
|
||||
/// Code Generation virtual methods...
|
||||
const MCPhysReg *getCalleeSavedRegs(const MachineFunction *MF)
|
||||
|
@ -164,7 +164,6 @@ let Namespace = "Hexagon" in {
|
||||
def PKTCOUNTHI: Rc<19, "pktcounthi", ["c19"]>, DwarfRegNum<[86]>;
|
||||
def UTIMERLO: Rc<30, "utimerlo", ["c30"]>, DwarfRegNum<[97]>;
|
||||
def UTIMERHI: Rc<31, "utimerhi", ["c31"]>, DwarfRegNum<[98]>;
|
||||
}
|
||||
|
||||
// Control registers pairs.
|
||||
let SubRegIndices = [isub_lo, isub_hi], CoveredBySubRegs = 1 in {
|
||||
@ -213,6 +212,29 @@ let Namespace = "Hexagon" in {
|
||||
def Q1 : Rq<1, "q1">, DwarfRegNum<[132]>;
|
||||
def Q2 : Rq<2, "q2">, DwarfRegNum<[133]>;
|
||||
def Q3 : Rq<3, "q3">, DwarfRegNum<[134]>;
|
||||
}
|
||||
|
||||
// HVX types
|
||||
|
||||
def VecI1 : ValueTypeByHwMode<[Hvx64, Hvx128, DefaultMode],
|
||||
[v512i1, v1024i1, v512i1]>;
|
||||
def VecI8 : ValueTypeByHwMode<[Hvx64, Hvx128, DefaultMode],
|
||||
[v64i8, v128i8, v64i8]>;
|
||||
def VecI16 : ValueTypeByHwMode<[Hvx64, Hvx128, DefaultMode],
|
||||
[v32i16, v64i16, v32i16]>;
|
||||
def VecI32 : ValueTypeByHwMode<[Hvx64, Hvx128, DefaultMode],
|
||||
[v16i32, v32i32, v16i32]>;
|
||||
def VecI64 : ValueTypeByHwMode<[Hvx64, Hvx128, DefaultMode],
|
||||
[v8i64, v16i64, v8i64]>;
|
||||
def VecPI8 : ValueTypeByHwMode<[Hvx64, Hvx128, DefaultMode],
|
||||
[v128i8, v256i8, v128i8]>;
|
||||
def VecPI16 : ValueTypeByHwMode<[Hvx64, Hvx128, DefaultMode],
|
||||
[v64i16, v128i16, v64i16]>;
|
||||
def VecPI32 : ValueTypeByHwMode<[Hvx64, Hvx128, DefaultMode],
|
||||
[v32i32, v64i32, v32i32]>;
|
||||
def VecPI64 : ValueTypeByHwMode<[Hvx64, Hvx128, DefaultMode],
|
||||
[v16i64, v32i64, v16i64]>;
|
||||
|
||||
|
||||
// Register classes.
|
||||
//
|
||||
@ -220,55 +242,44 @@ let Namespace = "Hexagon" in {
|
||||
// allocation order...
|
||||
//
|
||||
def IntRegs : RegisterClass<"Hexagon", [i32, f32, v4i8, v2i16], 32,
|
||||
(add (sequence "R%u", 0, 9),
|
||||
(sequence "R%u", 12, 28),
|
||||
R10, R11, R29, R30, R31)> {
|
||||
}
|
||||
(add (sequence "R%u", 0, 9), (sequence "R%u", 12, 28),
|
||||
R10, R11, R29, R30, R31)>;
|
||||
|
||||
// Registers are listed in reverse order for allocation preference reasons.
|
||||
def GeneralSubRegs : RegisterClass<"Hexagon", [i32], 32,
|
||||
(add R23, R22, R21, R20, R19, R18, R17,
|
||||
R16, R7, R6, R5, R4, R3, R2, R1, R0)>;
|
||||
(add R23, R22, R21, R20, R19, R18, R17, R16,
|
||||
R7, R6, R5, R4, R3, R2, R1, R0)>;
|
||||
|
||||
def IntRegsLow8 : RegisterClass<"Hexagon", [i32], 32,
|
||||
(add R7, R6, R5, R4, R3, R2, R1, R0)> ;
|
||||
|
||||
def DoubleRegs : RegisterClass<"Hexagon", [i64, f64, v8i8, v4i16, v2i32], 64,
|
||||
(add (sequence "D%u", 0, 4),
|
||||
(sequence "D%u", 6, 13), D5, D14, D15)>;
|
||||
(add (sequence "D%u", 0, 4), (sequence "D%u", 6, 13), D5, D14, D15)>;
|
||||
|
||||
def GeneralDoubleLow8Regs : RegisterClass<"Hexagon", [i64], 64,
|
||||
(add D11, D10, D9, D8, D3, D2, D1,
|
||||
D0)>;
|
||||
(add D11, D10, D9, D8, D3, D2, D1, D0)>;
|
||||
|
||||
def VectorRegs : RegisterClass<"Hexagon", [v64i8, v32i16, v16i32, v8i64], 512,
|
||||
(add (sequence "V%u", 0, 31))>;
|
||||
|
||||
def VecDblRegs : RegisterClass<"Hexagon",
|
||||
[v128i8, v64i16, v32i32, v16i64], 1024,
|
||||
(add (sequence "W%u", 0, 15))>;
|
||||
|
||||
def VectorRegs128B : RegisterClass<"Hexagon",
|
||||
[v128i8, v64i16, v32i32, v16i64], 1024,
|
||||
(add (sequence "V%u", 0, 31))>;
|
||||
|
||||
def VecDblRegs128B : RegisterClass<"Hexagon",
|
||||
[v256i8,v128i16,v64i32,v32i64], 2048,
|
||||
(add (sequence "W%u", 0, 15))>;
|
||||
|
||||
def VecPredRegs : RegisterClass<"Hexagon", [v512i1], 512,
|
||||
(add (sequence "Q%u", 0, 3))>;
|
||||
|
||||
def VecPredRegs128B : RegisterClass<"Hexagon", [v1024i1], 1024,
|
||||
(add (sequence "Q%u", 0, 3))>;
|
||||
|
||||
def PredRegs : RegisterClass<"Hexagon",
|
||||
[i1, v2i1, v4i1, v8i1, v4i8, v2i16, i32], 32,
|
||||
(add (sequence "P%u", 0, 3))>
|
||||
{
|
||||
let Size = 32;
|
||||
def HvxVR : RegisterClass<"Hexagon", [VecI8, VecI16, VecI32, VecI64], 512,
|
||||
(add (sequence "V%u", 0, 31))> {
|
||||
let RegInfos = RegInfoByHwMode<[Hvx64, Hvx128, DefaultMode],
|
||||
[RegInfo<512,512,512>, RegInfo<1024,1024,1024>, RegInfo<512,512,512>]>;
|
||||
}
|
||||
|
||||
def HvxWR : RegisterClass<"Hexagon", [VecPI8, VecPI16, VecPI32, VecPI64], 1024,
|
||||
(add (sequence "W%u", 0, 15))> {
|
||||
let RegInfos = RegInfoByHwMode<[Hvx64, Hvx128, DefaultMode],
|
||||
[RegInfo<1024,1024,1024>, RegInfo<2048,2048,2048>, RegInfo<1024,1024,1024>]>;
|
||||
}
|
||||
|
||||
def HvxQR : RegisterClass<"Hexagon", [VecI1], 512, (add Q0, Q1, Q2, Q3)> {
|
||||
let RegInfos = RegInfoByHwMode<[Hvx64, Hvx128, DefaultMode],
|
||||
[RegInfo<512,512,512>, RegInfo<1024,1024,1024>, RegInfo<512,512,512>]>;
|
||||
}
|
||||
|
||||
let Size = 32 in
|
||||
def PredRegs : RegisterClass<"Hexagon",
|
||||
[i1, v2i1, v4i1, v8i1, v4i8, v2i16, i32], 32, (add P0, P1, P2, P3)>;
|
||||
|
||||
let Size = 32 in
|
||||
def ModRegs : RegisterClass<"Hexagon", [i32], 32, (add M0, M1)>;
|
||||
|
||||
@ -291,8 +302,7 @@ def CtrRegs64 : RegisterClass<"Hexagon", [i64], 64,
|
||||
// The function RegisterMatchesArch() uses this list for validation.
|
||||
let isAllocatable = 0 in
|
||||
def V62Regs : RegisterClass<"Hexagon", [i32], 32,
|
||||
(add FRAMELIMIT, FRAMEKEY, C17_16,
|
||||
PKTCOUNTLO, PKTCOUNTHI, PKTCOUNT,
|
||||
(add FRAMELIMIT, FRAMEKEY, C17_16, PKTCOUNTLO, PKTCOUNTHI, PKTCOUNT,
|
||||
UTIMERLO, UTIMERHI, UTIMER)>;
|
||||
|
||||
|
||||
|
@ -301,7 +301,8 @@ void HexagonSubtarget::BankConflictMutation::apply(ScheduleDAGInstrs *DAG) {
|
||||
HexagonSubtarget::HexagonSubtarget(const Triple &TT, StringRef CPU,
|
||||
StringRef FS, const TargetMachine &TM)
|
||||
: HexagonGenSubtargetInfo(TT, CPU, FS), CPUString(CPU),
|
||||
InstrInfo(initializeSubtargetDependencies(CPU, FS)), TLInfo(TM, *this) {
|
||||
InstrInfo(initializeSubtargetDependencies(CPU, FS)),
|
||||
RegInfo(getHwMode()), TLInfo(TM, *this) {
|
||||
initializeEnvironment();
|
||||
|
||||
// Initialize scheduling itinerary for the specified CPU.
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include "HexagonFrameLowering.h"
|
||||
#include "HexagonInstrInfo.h"
|
||||
#include "HexagonISelLowering.h"
|
||||
#include "HexagonRegisterInfo.h"
|
||||
#include "HexagonSelectionDAGInfo.h"
|
||||
#include "llvm/ADT/SmallSet.h"
|
||||
#include "llvm/ADT/StringRef.h"
|
||||
@ -75,6 +76,7 @@ public:
|
||||
private:
|
||||
std::string CPUString;
|
||||
HexagonInstrInfo InstrInfo;
|
||||
HexagonRegisterInfo RegInfo;
|
||||
HexagonTargetLowering TLInfo;
|
||||
HexagonSelectionDAGInfo TSInfo;
|
||||
HexagonFrameLowering FrameLowering;
|
||||
@ -93,7 +95,7 @@ public:
|
||||
}
|
||||
const HexagonInstrInfo *getInstrInfo() const override { return &InstrInfo; }
|
||||
const HexagonRegisterInfo *getRegisterInfo() const override {
|
||||
return &InstrInfo.getRegisterInfo();
|
||||
return &RegInfo;
|
||||
}
|
||||
const HexagonTargetLowering *getTargetLowering() const override {
|
||||
return &TLInfo;
|
||||
|
@ -453,7 +453,7 @@ bool HexagonPacketizerList::useCallersSP(MachineInstr &MI) {
|
||||
unsigned FrameSize = MF.getFrameInfo().getStackSize();
|
||||
MachineOperand &Off = MI.getOperand(1);
|
||||
int64_t NewOff = Off.getImm() - (FrameSize + HEXAGON_LRFP_SIZE);
|
||||
if (HII->isValidOffset(Opc, NewOff)) {
|
||||
if (HII->isValidOffset(Opc, NewOff, HRI)) {
|
||||
Off.setImm(NewOff);
|
||||
return true;
|
||||
}
|
||||
@ -801,7 +801,7 @@ bool HexagonPacketizerList::canPromoteToDotNew(const MachineInstr &MI,
|
||||
|
||||
const MCInstrDesc& MCID = PI.getDesc();
|
||||
const TargetRegisterClass *VecRC = HII->getRegClass(MCID, 0, HRI, MF);
|
||||
if (DisableVecDblNVStores && VecRC == &Hexagon::VecDblRegsRegClass)
|
||||
if (DisableVecDblNVStores && VecRC == &Hexagon::HvxWRRegClass)
|
||||
return false;
|
||||
|
||||
// predicate .new
|
||||
|
@ -51,8 +51,7 @@ namespace HexagonII {
|
||||
HalfWordAccess,
|
||||
WordAccess,
|
||||
DoubleWordAccess,
|
||||
Vector64Access,
|
||||
Vector128Access
|
||||
HVXVectorAccess
|
||||
};
|
||||
|
||||
// MCInstrDesc TSFlags
|
||||
|
Loading…
Reference in New Issue
Block a user