1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-02-01 05:01:59 +01:00

[ARM] Add some sample IR MVE intrinsics with C++ isel.

This adds some initial example IR intrinsics for MVE instructions that
deliver multiple output values, and hence, have to be instruction-
selected by custom C++ code instead of Tablegen patterns.

I've added the writeback gather load instructions (taking a vector of
base addresses and a single common offset, returning a vector of
loaded values and an updated vector of base addresses); one example
from the long shift family (taking and returning a 64-bit value in two
GPRs); and the VADC instruction (which propagates a carry bit from
each vector-lane addition to the next, taking an input carry flag in
FPSCR and outputting the final one in FPSCR as well).

To support the VPT-predicated forms of these instructions, I've
written some helper functions to add the cluster of MVE predicate
operands to the end of a MachineInstr. `AddMVEPredicateToOps` is used
when the instruction actually is predicated (so it takes a predicate
mask argument), and `AddEmptyMVEPredicateToOps` is for when the
instruction is unpredicated (so it fills in $noreg for the mask). Each
one comes in a form suitable for `vpred_n`, and one for `vpred_r`
which takes the extra 'inactive' parameter.

For VADC, the representation of the carry flag in the IR intrinsic is
a word intended to be moved directly to and from `FPSCR_nzcvqc`, i.e.
with the carry flag in bit 29 of the word. (The user-facing ACLE
intrinsic will want it to be in bit 0, but I'll do that on the clang
side.)

Reviewers: dmgreen, miyuki, ostannard

Subscribers: kristof.beyls, hiraditya, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D68699
This commit is contained in:
Simon Tatham 2019-10-07 17:05:48 +01:00
parent f9bcd34532
commit 41354b8e32
5 changed files with 375 additions and 0 deletions

View File

@ -817,4 +817,23 @@ def int_arm_mve_vcvt_narrow: Intrinsic<[llvm_v8f16_ty],
def int_arm_mve_vcvt_narrow_predicated: Intrinsic<[llvm_v8f16_ty],
[llvm_v8f16_ty, llvm_v4f32_ty, llvm_i32_ty, llvm_v4i1_ty], [IntrNoMem]>;
def int_arm_mve_vldr_gather_base_wb: Intrinsic<
[llvm_anyvector_ty, llvm_anyvector_ty],
[LLVMMatchType<1>, llvm_i32_ty], [IntrReadMem]>;
def int_arm_mve_vldr_gather_base_wb_predicated: Intrinsic<
[llvm_anyvector_ty, llvm_anyvector_ty],
[LLVMMatchType<1>, llvm_i32_ty, llvm_anyvector_ty], [IntrReadMem]>;
def int_arm_mve_urshrl: Intrinsic<
[llvm_i32_ty, llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
[IntrNoMem]>;
def int_arm_mve_vadc: Intrinsic<
[llvm_anyvector_ty, llvm_i32_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty], [IntrNoMem]>;
def int_arm_mve_vadc_predicated: Intrinsic<
[llvm_anyvector_ty, llvm_i32_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>,
llvm_i32_ty, llvm_anyvector_ty], [IntrNoMem]>;
} // end TargetPrefix

View File

@ -209,6 +209,29 @@ private:
unsigned NumVecs, const uint16_t *DOpcodes,
const uint16_t *QOpcodes);
/// Helper functions for setting up clusters of MVE predication operands.
template <typename SDValueVector>
void AddMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc,
SDValue PredicateMask);
template <typename SDValueVector>
void AddMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc,
SDValue PredicateMask, SDValue Inactive);
template <typename SDValueVector>
void AddEmptyMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc);
template <typename SDValueVector>
void AddEmptyMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc, EVT InactiveTy);
/// SelectMVE_WB - Select MVE writeback load/store intrinsics.
void SelectMVE_WB(SDNode *N, const uint16_t *Opcodes, bool Predicated);
/// SelectMVE_LongShift - Select MVE 64-bit scalar shift intrinsics.
void SelectMVE_LongShift(SDNode *N, uint16_t Opcode, bool Immediate);
/// SelectMVE_VADCSBC - Select MVE vector add/sub-with-carry intrinsics.
void SelectMVE_VADCSBC(SDNode *N, uint16_t OpcodeWithCarry,
uint16_t OpcodeWithNoCarry, bool Add, bool Predicated);
/// SelectVLDDup - Select NEON load-duplicate intrinsics. NumVecs
/// should be 1, 2, 3 or 4. The opcode array specifies the instructions used
/// for loading D registers.
@ -2304,6 +2327,128 @@ void ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad, bool isUpdating,
CurDAG->RemoveDeadNode(N);
}
template <typename SDValueVector>
void ARMDAGToDAGISel::AddMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc,
SDValue PredicateMask) {
Ops.push_back(CurDAG->getTargetConstant(ARMVCC::Then, Loc, MVT::i32));
Ops.push_back(PredicateMask);
}
template <typename SDValueVector>
void ARMDAGToDAGISel::AddMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc,
SDValue PredicateMask,
SDValue Inactive) {
Ops.push_back(CurDAG->getTargetConstant(ARMVCC::Then, Loc, MVT::i32));
Ops.push_back(PredicateMask);
Ops.push_back(Inactive);
}
template <typename SDValueVector>
void ARMDAGToDAGISel::AddEmptyMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc) {
Ops.push_back(CurDAG->getTargetConstant(ARMVCC::None, Loc, MVT::i32));
Ops.push_back(CurDAG->getRegister(0, MVT::i32));
}
template <typename SDValueVector>
void ARMDAGToDAGISel::AddEmptyMVEPredicateToOps(SDValueVector &Ops, SDLoc Loc,
EVT InactiveTy) {
Ops.push_back(CurDAG->getTargetConstant(ARMVCC::None, Loc, MVT::i32));
Ops.push_back(CurDAG->getRegister(0, MVT::i32));
Ops.push_back(SDValue(
CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, Loc, InactiveTy), 0));
}
void ARMDAGToDAGISel::SelectMVE_WB(SDNode *N, const uint16_t *Opcodes,
bool Predicated) {
SDLoc Loc(N);
SmallVector<SDValue, 8> Ops;
uint16_t Opcode;
switch (N->getValueType(1).getVectorElementType().getSizeInBits()) {
case 32:
Opcode = Opcodes[0];
break;
case 64:
Opcode = Opcodes[1];
break;
default:
llvm_unreachable("bad vector element size in SelectMVE_WB");
}
Ops.push_back(N->getOperand(2)); // vector of base addresses
int32_t ImmValue = cast<ConstantSDNode>(N->getOperand(3))->getZExtValue();
Ops.push_back(getI32Imm(ImmValue, Loc)); // immediate offset
if (Predicated)
AddMVEPredicateToOps(Ops, Loc, N->getOperand(4));
else
AddEmptyMVEPredicateToOps(Ops, Loc);
Ops.push_back(N->getOperand(0)); // chain
CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), makeArrayRef(Ops));
}
void ARMDAGToDAGISel::SelectMVE_LongShift(SDNode *N, uint16_t Opcode,
bool Immediate) {
SDLoc Loc(N);
SmallVector<SDValue, 8> Ops;
// Two 32-bit halves of the value to be shifted
Ops.push_back(N->getOperand(1));
Ops.push_back(N->getOperand(2));
// The shift count
if (Immediate) {
int32_t ImmValue = cast<ConstantSDNode>(N->getOperand(3))->getZExtValue();
Ops.push_back(getI32Imm(ImmValue, Loc)); // immediate offset
} else {
Ops.push_back(N->getOperand(3));
}
// MVE scalar shifts are IT-predicable, so include the standard
// predicate arguments.
Ops.push_back(getAL(CurDAG, Loc));
Ops.push_back(CurDAG->getRegister(0, MVT::i32));
CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), makeArrayRef(Ops));
}
void ARMDAGToDAGISel::SelectMVE_VADCSBC(SDNode *N, uint16_t OpcodeWithCarry,
uint16_t OpcodeWithNoCarry,
bool Add, bool Predicated) {
SDLoc Loc(N);
SmallVector<SDValue, 8> Ops;
uint16_t Opcode;
unsigned FirstInputOp = Predicated ? 2 : 1;
// Two input vectors and the input carry flag
Ops.push_back(N->getOperand(FirstInputOp));
Ops.push_back(N->getOperand(FirstInputOp + 1));
SDValue CarryIn = N->getOperand(FirstInputOp + 2);
ConstantSDNode *CarryInConstant = dyn_cast<ConstantSDNode>(CarryIn);
uint32_t CarryMask = 1 << 29;
uint32_t CarryExpected = Add ? 0 : CarryMask;
if (CarryInConstant &&
(CarryInConstant->getZExtValue() & CarryMask) == CarryExpected) {
Opcode = OpcodeWithNoCarry;
} else {
Ops.push_back(CarryIn);
Opcode = OpcodeWithCarry;
}
if (Predicated)
AddMVEPredicateToOps(Ops, Loc,
N->getOperand(FirstInputOp + 3), // predicate
N->getOperand(FirstInputOp - 1)); // inactive
else
AddEmptyMVEPredicateToOps(Ops, Loc, N->getValueType(0));
CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), makeArrayRef(Ops));
}
void ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool IsIntrinsic,
bool isUpdating, unsigned NumVecs,
const uint16_t *DOpcodes,
@ -4028,6 +4173,34 @@ void ARMDAGToDAGISel::Select(SDNode *N) {
SelectVLDSTLane(N, false, false, 4, DOpcodes, QOpcodes);
return;
}
case Intrinsic::arm_mve_vldr_gather_base_wb:
case Intrinsic::arm_mve_vldr_gather_base_wb_predicated: {
static const uint16_t Opcodes[] = {ARM::MVE_VLDRWU32_qi_pre,
ARM::MVE_VLDRDU64_qi_pre};
SelectMVE_WB(N, Opcodes,
IntNo == Intrinsic::arm_mve_vldr_gather_base_wb_predicated);
return;
}
}
break;
}
case ISD::INTRINSIC_WO_CHAIN: {
unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
switch (IntNo) {
default:
break;
case Intrinsic::arm_mve_urshrl:
SelectMVE_LongShift(N, ARM::MVE_URSHRL, true);
return;
case Intrinsic::arm_mve_vadc:
case Intrinsic::arm_mve_vadc_predicated:
SelectMVE_VADCSBC(N, ARM::MVE_VADC, ARM::MVE_VADCI, true,
IntNo == Intrinsic::arm_mve_vadc_predicated);
return;
}
break;
}

View File

@ -0,0 +1,23 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp -verify-machineinstrs -o - %s | FileCheck %s
define arm_aapcs_vfpcc i64 @test_urshrl(i64 %value) {
; CHECK-LABEL: test_urshrl:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: urshrl r0, r1, #6
; CHECK-NEXT: bx lr
entry:
%0 = lshr i64 %value, 32
%1 = trunc i64 %0 to i32
%2 = trunc i64 %value to i32
%3 = tail call { i32, i32 } @llvm.arm.mve.urshrl(i32 %2, i32 %1, i32 6)
%4 = extractvalue { i32, i32 } %3, 1
%5 = zext i32 %4 to i64
%6 = shl nuw i64 %5, 32
%7 = extractvalue { i32, i32 } %3, 0
%8 = zext i32 %7 to i64
%9 = or i64 %6, %8
ret i64 %9
}
declare { i32, i32 } @llvm.arm.mve.urshrl(i32, i32, i32)

View File

@ -0,0 +1,98 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp -verify-machineinstrs -o - %s | FileCheck %s
define arm_aapcs_vfpcc <4 x i32> @test_vadciq_s32(<4 x i32> %a, <4 x i32> %b, i32* %carry_out) {
; CHECK-LABEL: test_vadciq_s32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vadci.i32 q0, q0, q1
; CHECK-NEXT: vmrs r1, fpscr_nzcvqc
; CHECK-NEXT: ubfx r1, r1, #29, #1
; CHECK-NEXT: str r1, [r0]
; CHECK-NEXT: bx lr
entry:
%0 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vadc.v4i32(<4 x i32> %a, <4 x i32> %b, i32 0)
%1 = extractvalue { <4 x i32>, i32 } %0, 1
%2 = lshr i32 %1, 29
%3 = and i32 %2, 1
store i32 %3, i32* %carry_out, align 4
%4 = extractvalue { <4 x i32>, i32 } %0, 0
ret <4 x i32> %4
}
declare { <4 x i32>, i32 } @llvm.arm.mve.vadc.v4i32(<4 x i32>, <4 x i32>, i32)
define arm_aapcs_vfpcc <4 x i32> @test_vadcq_u32(<4 x i32> %a, <4 x i32> %b, i32* %carry) {
; CHECK-LABEL: test_vadcq_u32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: ldr r1, [r0]
; CHECK-NEXT: lsls r1, r1, #29
; CHECK-NEXT: vmsr fpscr_nzcvqc, r1
; CHECK-NEXT: vadc.i32 q0, q0, q1
; CHECK-NEXT: vmrs r1, fpscr_nzcvqc
; CHECK-NEXT: ubfx r1, r1, #29, #1
; CHECK-NEXT: str r1, [r0]
; CHECK-NEXT: bx lr
entry:
%0 = load i32, i32* %carry, align 4
%1 = shl i32 %0, 29
%2 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vadc.v4i32(<4 x i32> %a, <4 x i32> %b, i32 %1)
%3 = extractvalue { <4 x i32>, i32 } %2, 1
%4 = lshr i32 %3, 29
%5 = and i32 %4, 1
store i32 %5, i32* %carry, align 4
%6 = extractvalue { <4 x i32>, i32 } %2, 0
ret <4 x i32> %6
}
define arm_aapcs_vfpcc <4 x i32> @test_vadciq_m_u32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32* %carry_out, i16 zeroext %p) {
; CHECK-LABEL: test_vadciq_m_u32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmsr p0, r1
; CHECK-NEXT: vpst
; CHECK-NEXT: vadcit.i32 q0, q1, q2
; CHECK-NEXT: vmrs r1, fpscr_nzcvqc
; CHECK-NEXT: ubfx r1, r1, #29, #1
; CHECK-NEXT: str r1, [r0]
; CHECK-NEXT: bx lr
entry:
%0 = zext i16 %p to i32
%1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
%2 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vadc.predicated.v4i32.v4i1(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32 0, <4 x i1> %1)
%3 = extractvalue { <4 x i32>, i32 } %2, 1
%4 = lshr i32 %3, 29
%5 = and i32 %4, 1
store i32 %5, i32* %carry_out, align 4
%6 = extractvalue { <4 x i32>, i32 } %2, 0
ret <4 x i32> %6
}
declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32)
declare { <4 x i32>, i32 } @llvm.arm.mve.vadc.predicated.v4i32.v4i1(<4 x i32>, <4 x i32>, <4 x i32>, i32, <4 x i1>)
define arm_aapcs_vfpcc <4 x i32> @test_vadcq_m_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32* %carry, i16 zeroext %p) {
; CHECK-LABEL: test_vadcq_m_s32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: ldr r2, [r0]
; CHECK-NEXT: vmsr p0, r1
; CHECK-NEXT: lsls r1, r2, #29
; CHECK-NEXT: vmsr fpscr_nzcvqc, r1
; CHECK-NEXT: vpst
; CHECK-NEXT: vadct.i32 q0, q1, q2
; CHECK-NEXT: vmrs r1, fpscr_nzcvqc
; CHECK-NEXT: ubfx r1, r1, #29, #1
; CHECK-NEXT: str r1, [r0]
; CHECK-NEXT: bx lr
entry:
%0 = load i32, i32* %carry, align 4
%1 = shl i32 %0, 29
%2 = zext i16 %p to i32
%3 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %2)
%4 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vadc.predicated.v4i32.v4i1(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32 %1, <4 x i1> %3)
%5 = extractvalue { <4 x i32>, i32 } %4, 1
%6 = lshr i32 %5, 29
%7 = and i32 %6, 1
store i32 %7, i32* %carry, align 4
%8 = extractvalue { <4 x i32>, i32 } %4, 0
ret <4 x i32> %8
}

View File

@ -0,0 +1,62 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp -verify-machineinstrs -o - %s | FileCheck %s
define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_gather_base_wb_s32(<4 x i32>* %addr) {
; CHECK-LABEL: test_vldrwq_gather_base_wb_s32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vldrw.u32 q0, [r0]
; CHECK-NEXT: vldrw.u32 q1, [q0, #80]!
; CHECK-NEXT: vstrw.32 q1, [r0]
; CHECK-NEXT: bx lr
entry:
%0 = load <4 x i32>, <4 x i32>* %addr, align 8
%1 = tail call { <4 x i32>, <4 x i32> } @llvm.arm.mve.vldr.gather.base.wb.v4i32.v4i32(<4 x i32> %0, i32 80)
%2 = extractvalue { <4 x i32>, <4 x i32> } %1, 1
store <4 x i32> %2, <4 x i32>* %addr, align 8
%3 = extractvalue { <4 x i32>, <4 x i32> } %1, 0
ret <4 x i32> %3
}
declare { <4 x i32>, <4 x i32> } @llvm.arm.mve.vldr.gather.base.wb.v4i32.v4i32(<4 x i32>, i32)
define arm_aapcs_vfpcc <4 x float> @test_vldrwq_gather_base_wb_f32(<4 x i32>* %addr) {
; CHECK-LABEL: test_vldrwq_gather_base_wb_f32:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vldrw.u32 q0, [r0]
; CHECK-NEXT: vldrw.u32 q1, [q0, #64]!
; CHECK-NEXT: vstrw.32 q1, [r0]
; CHECK-NEXT: bx lr
entry:
%0 = load <4 x i32>, <4 x i32>* %addr, align 8
%1 = tail call { <4 x float>, <4 x i32> } @llvm.arm.mve.vldr.gather.base.wb.v4f32.v4i32(<4 x i32> %0, i32 64)
%2 = extractvalue { <4 x float>, <4 x i32> } %1, 1
store <4 x i32> %2, <4 x i32>* %addr, align 8
%3 = extractvalue { <4 x float>, <4 x i32> } %1, 0
ret <4 x float> %3
}
declare { <4 x float>, <4 x i32> } @llvm.arm.mve.vldr.gather.base.wb.v4f32.v4i32(<4 x i32>, i32)
define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_base_wb_z_u64(<2 x i64>* %addr, i16 zeroext %p) {
; CHECK-LABEL: test_vldrdq_gather_base_wb_z_u64:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vmsr p0, r1
; CHECK-NEXT: vldrw.u32 q0, [r0]
; CHECK-NEXT: vpst
; CHECK-NEXT: vldrdt.u64 q1, [q0, #656]!
; CHECK-NEXT: vstrw.32 q1, [r0]
; CHECK-NEXT: bx lr
entry:
%0 = load <2 x i64>, <2 x i64>* %addr, align 8
%1 = zext i16 %p to i32
%2 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %1)
%3 = tail call { <2 x i64>, <2 x i64> } @llvm.arm.mve.vldr.gather.base.wb.predicated.v2i64.v2i64.v4i1(<2 x i64> %0, i32 656, <4 x i1> %2)
%4 = extractvalue { <2 x i64>, <2 x i64> } %3, 1
store <2 x i64> %4, <2 x i64>* %addr, align 8
%5 = extractvalue { <2 x i64>, <2 x i64> } %3, 0
ret <2 x i64> %5
}
declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32)
declare { <2 x i64>, <2 x i64> } @llvm.arm.mve.vldr.gather.base.wb.predicated.v2i64.v2i64.v4i1(<2 x i64>, i32, <4 x i1>)