1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-25 12:12:47 +01:00

[Hexagon] Updating muxir/ri/ii intrinsics. Setting predicate registers as compatible with i32 rather than doing custom type conversion.

llvm-svn: 226500
This commit is contained in:
Colin LeMahieu 2015-01-19 20:31:18 +00:00
parent 51357ceef8
commit 9cc2ac99d5
4 changed files with 129 additions and 101 deletions

View File

@ -174,6 +174,9 @@ inline SDValue XformUToUM1Imm(unsigned Imm) {
// Include the pieces autogenerated from the target description.
#include "HexagonGenDAGISel.inc"
private:
bool isValueExtension(SDValue const &Val, unsigned FromBits, SDValue &Src);
};
} // end anonymous namespace
@ -314,56 +317,6 @@ static unsigned doesIntrinsicReturnPredicate(unsigned ID)
}
}
// Intrinsics that have predicate operands.
static unsigned doesIntrinsicContainPredicate(unsigned ID)
{
switch (ID) {
default:
return 0;
case Intrinsic::hexagon_C2_tfrpr:
return Hexagon::C2_tfrpr;
case Intrinsic::hexagon_C2_and:
return Hexagon::C2_and;
case Intrinsic::hexagon_C2_xor:
return Hexagon::C2_xor;
case Intrinsic::hexagon_C2_or:
return Hexagon::C2_or;
case Intrinsic::hexagon_C2_not:
return Hexagon::C2_not;
case Intrinsic::hexagon_C2_any8:
return Hexagon::C2_any8;
case Intrinsic::hexagon_C2_all8:
return Hexagon::C2_all8;
case Intrinsic::hexagon_C2_vitpack:
return Hexagon::C2_vitpack;
case Intrinsic::hexagon_C2_mask:
return Hexagon::C2_mask;
case Intrinsic::hexagon_C2_mux:
return Hexagon::C2_mux;
// Mapping hexagon_C2_muxir to MUX_pri. This is pretty weird - but
// that's how it's mapped in q6protos.h.
case Intrinsic::hexagon_C2_muxir:
return Hexagon::C2_muxri;
// Mapping hexagon_C2_muxri to MUX_pir. This is pretty weird - but
// that's how it's mapped in q6protos.h.
case Intrinsic::hexagon_C2_muxri:
return Hexagon::C2_muxir;
case Intrinsic::hexagon_C2_muxii:
return Hexagon::C2_muxii;
case Intrinsic::hexagon_C2_vmux:
return Hexagon::C2_vmux;
case Intrinsic::hexagon_S2_valignrb:
return Hexagon::S2_valignrb;
case Intrinsic::hexagon_S2_vsplicerb:
return Hexagon::S2_vsplicerb;
}
}
static bool OffsetFitsS11(EVT MemType, int64_t Offset) {
if (MemType == MVT::i64 && isShiftedInt<11,3>(Offset)) {
return true;
@ -1206,56 +1159,30 @@ SDNode *HexagonDAGToDAGISel::SelectZeroExtend(SDNode *N) {
return SelectCode(N);
}
//
// Checking for intrinsics which have predicate registers as operand(s)
// and lowering to the actual intrinsic.
//
SDNode *HexagonDAGToDAGISel::SelectIntrinsicWOChain(SDNode *N) {
SDLoc dl(N);
unsigned ID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
unsigned IntrinsicWithPred = doesIntrinsicContainPredicate(ID);
unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
unsigned Bits;
switch (IID) {
case Intrinsic::hexagon_S2_vsplatrb:
Bits = 8;
break;
case Intrinsic::hexagon_S2_vsplatrh:
Bits = 16;
break;
default:
return SelectCode(N);
}
// We are concerned with only those intrinsics that have predicate registers
// as at least one of the operands.
if (IntrinsicWithPred) {
SmallVector<SDValue, 8> Ops;
const HexagonInstrInfo *TII = static_cast<const HexagonInstrInfo *>(
TM.getSubtargetImpl()->getInstrInfo());
const MCInstrDesc &MCID = TII->get(IntrinsicWithPred);
const TargetRegisterInfo *TRI = TM.getSubtargetImpl()->getRegisterInfo();
// Iterate over all the operands of the intrinsics.
// For PredRegs, do the transfer.
// For Double/Int Regs, just preserve the value
// For immediates, lower it.
for (unsigned i = 1; i < N->getNumOperands(); ++i) {
SDNode *Arg = N->getOperand(i).getNode();
const TargetRegisterClass *RC = TII->getRegClass(MCID, i, TRI, *MF);
if (RC == &Hexagon::IntRegsRegClass ||
RC == &Hexagon::DoubleRegsRegClass) {
Ops.push_back(SDValue(Arg, 0));
} else if (RC == &Hexagon::PredRegsRegClass) {
// Do the transfer.
SDNode *PdRs = CurDAG->getMachineNode(Hexagon::C2_tfrrp, dl, MVT::i1,
SDValue(Arg, 0));
Ops.push_back(SDValue(PdRs,0));
} else if (!RC && (dyn_cast<ConstantSDNode>(Arg) != nullptr)) {
// This is immediate operand. Lower it here making sure that we DO have
// const SDNode for immediate value.
int32_t Val = cast<ConstantSDNode>(Arg)->getSExtValue();
SDValue SDVal = CurDAG->getTargetConstant(Val, MVT::i32);
Ops.push_back(SDVal);
} else {
llvm_unreachable("Unimplemented");
}
}
EVT ReturnValueVT = N->getValueType(0);
SDNode *Result = CurDAG->getMachineNode(IntrinsicWithPred, dl,
ReturnValueVT, Ops);
ReplaceUses(N, Result);
return Result;
SDValue const &V = N->getOperand(1);
SDValue U;
if (isValueExtension(V, Bits, U)) {
SDValue R = CurDAG->getNode(N->getOpcode(), SDLoc(N), N->getValueType(0),
N->getOperand(0), U);
return SelectCode(R.getNode());
}
return SelectCode(N);
}
@ -1693,3 +1620,69 @@ bool HexagonDAGToDAGISel::SelectAddrFI(SDValue& N, SDValue &R) {
R = CurDAG->getTargetFrameIndex(FX->getIndex(), MVT::i32);
return true;
}
bool HexagonDAGToDAGISel::isValueExtension(SDValue const &Val,
unsigned FromBits, SDValue &Src) {
unsigned Opc = Val.getOpcode();
switch (Opc) {
case ISD::SIGN_EXTEND:
case ISD::ZERO_EXTEND:
case ISD::ANY_EXTEND: {
SDValue const &Op0 = Val.getOperand(0);
EVT T = Op0.getValueType();
if (T.isInteger() && T.getSizeInBits() == FromBits) {
Src = Op0;
return true;
}
break;
}
case ISD::SIGN_EXTEND_INREG:
case ISD::AssertSext:
case ISD::AssertZext:
if (Val.getOperand(0).getValueType().isInteger()) {
VTSDNode *T = cast<VTSDNode>(Val.getOperand(1));
if (T->getVT().getSizeInBits() == FromBits) {
Src = Val.getOperand(0);
return true;
}
}
break;
case ISD::AND: {
// Check if this is an AND with "FromBits" of lower bits set to 1.
uint64_t FromMask = (1 << FromBits) - 1;
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val.getOperand(0))) {
if (C->getZExtValue() == FromMask) {
Src = Val.getOperand(1);
return true;
}
}
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val.getOperand(1))) {
if (C->getZExtValue() == FromMask) {
Src = Val.getOperand(0);
return true;
}
}
break;
}
case ISD::OR:
case ISD::XOR: {
// OR/XOR with the lower "FromBits" bits set to 0.
uint64_t FromMask = (1 << FromBits) - 1;
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val.getOperand(0))) {
if ((C->getZExtValue() & FromMask) == 0) {
Src = Val.getOperand(1);
return true;
}
}
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val.getOperand(1))) {
if ((C->getZExtValue() & FromMask) == 0) {
Src = Val.getOperand(0);
return true;
}
}
}
default:
break;
}
return false;
}

View File

@ -298,6 +298,11 @@ def: Pat<(i32 (int_hexagon_C2_mux (I32:$Rp), (I32:$Rs),
(I32:$Rt))),
(i32 (C2_mux (C2_tfrrp IntRegs:$Rp), IntRegs:$Rs, IntRegs:$Rt))>;
// Mux
def : T_QRI_pat<C2_muxir, int_hexagon_C2_muxir, s8ExtPred>;
def : T_QIR_pat<C2_muxri, int_hexagon_C2_muxri, s8ExtPred>;
def : T_QII_pat<C2_muxii, int_hexagon_C2_muxii, s8ExtPred, s8ImmPred>;
// Shift halfword
def : T_R_pat<A2_aslh, int_hexagon_A2_aslh>;
def : T_R_pat<A2_asrh, int_hexagon_A2_asrh>;
@ -2171,12 +2176,6 @@ def HEXAGON_A2_combineii:
// ALU32 / PERM / Mux.
def HEXAGON_C2_mux:
si_ALU32_qisisi <"mux", int_hexagon_C2_mux>;
def HEXAGON_C2_muxri:
si_ALU32_qis8si <"mux", int_hexagon_C2_muxri>;
def HEXAGON_C2_muxir:
si_ALU32_qisis8 <"mux", int_hexagon_C2_muxir>;
def HEXAGON_C2_muxii:
si_ALU32_qis8s8 <"mux", int_hexagon_C2_muxii>;
// ALU32 / PERM / Shift halfword.
def HEXAGON_A2_aslh:

View File

@ -174,7 +174,7 @@ def DoubleRegs : RegisterClass<"Hexagon", [i64,f64], 64,
(sequence "D%u", 6, 13), D5, D14, D15)>;
def PredRegs : RegisterClass<"Hexagon", [i1], 32, (add (sequence "P%u", 0, 3))>
def PredRegs : RegisterClass<"Hexagon", [i1, i32], 32, (add (sequence "P%u", 0, 3))>
{
let Size = 32;
}

View File

@ -127,6 +127,39 @@ entry:
ret void
}
; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mux(p{{[0-3]+}}{{ *}},{{ *}}r{{[0-9]+}}{{ *}},{{ *}}##71230)
define void @test21(i32 %a) #0 {
entry:
%0 = load i8* @b, align 1
%conv = zext i8 %0 to i32
%1 = tail call i32 @llvm.hexagon.C2.muxir(i32 %conv, i32 %a, i32 71230)
store i32 %1, i32* @d, align 4
ret void
}
; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mux(p{{[0-3]+}}{{ *}},{{ *}}##5000{{ *}},{{ *}}r{{[0-9]+}})
define void @test23(i32 %a) #0 {
entry:
%0 = load i8* @b, align 1
%conv = zext i8 %0 to i32
%1 = tail call i32 @llvm.hexagon.C2.muxri(i32 %conv, i32 5000, i32 %a)
store i32 %1, i32* @d, align 4
ret void
}
; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mux(p{{[0-3]+}}{{ *}},{{ *}}##-4900{{ *}},{{ *}}#94)
define void @test24(i32 %a) #0 {
entry:
%0 = load i8* @b, align 1
%conv = zext i8 %0 to i32
%1 = tail call i32 @llvm.hexagon.C2.muxii(i32 %conv, i32 -4900, i32 94)
store i32 %1, i32* @d, align 4
ret void
}
; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}={{ *}}combine(##-1280{{ *}},{{ *}}#120)
define void @test25(i32 %a) #0 {
@ -148,4 +181,7 @@ declare i32 @llvm.hexagon.A2.orir(i32, i32) #1
declare i32 @llvm.hexagon.A2.subri(i32, i32)
declare i32 @llvm.hexagon.A2.tfril(i32, i32) #1
declare i32 @llvm.hexagon.A2.tfrih(i32, i32) #1
declare i32 @llvm.hexagon.C2.muxir(i32, i32, i32) #1
declare i32 @llvm.hexagon.C2.muxri(i32, i32, i32) #1
declare i32 @llvm.hexagon.C2.muxii(i32, i32, i32) #1
declare i64 @llvm.hexagon.A2.combineii(i32, i32) #1