1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-25 20:23:11 +01:00

[Hexagon] Add support to handle bit-reverse load intrinsics

Patch by Sumanth Gundapaneni.

llvm-svn: 328774
This commit is contained in:
Krzysztof Parzyszek 2018-03-29 13:52:46 +00:00
parent 13e9d4c9ef
commit e739d778c4
7 changed files with 239 additions and 203 deletions

View File

@ -643,41 +643,6 @@ class Hexagon_df_dfdfdfqi_Intrinsic<string GCCIntSuffix>
// This one below will not be auto-generated,
// so make sure, you don't overwrite this one.
//
// BUILTIN_INFO(SI_to_SXTHI_asrh,SI_ftype_SI,1)
//
def int_hexagon_SI_to_SXTHI_asrh :
Hexagon_si_si_Intrinsic<"SI_to_SXTHI_asrh">;
//
// BUILTIN_INFO_NONCONST(brev_ldd,PTR_ftype_PTRPTRSI,3)
//
def int_hexagon_brev_ldd :
Hexagon_mem_memmemsi_Intrinsic<"brev_ldd">;
//
// BUILTIN_INFO_NONCONST(brev_ldw,PTR_ftype_PTRPTRSI,3)
//
def int_hexagon_brev_ldw :
Hexagon_mem_memmemsi_Intrinsic<"brev_ldw">;
//
// BUILTIN_INFO_NONCONST(brev_ldh,PTR_ftype_PTRPTRSI,3)
//
def int_hexagon_brev_ldh :
Hexagon_mem_memmemsi_Intrinsic<"brev_ldh">;
//
// BUILTIN_INFO_NONCONST(brev_lduh,PTR_ftype_PTRPTRSI,3)
//
def int_hexagon_brev_lduh :
Hexagon_mem_memmemsi_Intrinsic<"brev_lduh">;
//
// BUILTIN_INFO_NONCONST(brev_ldb,PTR_ftype_PTRPTRSI,3)
//
def int_hexagon_brev_ldb :
Hexagon_mem_memmemsi_Intrinsic<"brev_ldb">;
//
// BUILTIN_INFO_NONCONST(brev_ldub,PTR_ftype_PTRPTRSI,3)
//
def int_hexagon_brev_ldub :
Hexagon_mem_memmemsi_Intrinsic<"brev_ldub">;
//
// BUILTIN_INFO_NONCONST(circ_ldd,PTR_ftype_PTRPTRSISI,4)
//
def int_hexagon_circ_ldd :
@ -708,31 +673,6 @@ Hexagon_mem_memmemsisi_Intrinsic<"circ_ldb">;
def int_hexagon_circ_ldub :
Hexagon_mem_memmemsisi_Intrinsic<"circ_ldub">;
//
// BUILTIN_INFO_NONCONST(brev_stb,PTR_ftype_PTRSISI,3)
//
def int_hexagon_brev_stb :
Hexagon_mem_memsisi_Intrinsic<"brev_stb">;
//
// BUILTIN_INFO_NONCONST(brev_sthhi,PTR_ftype_PTRSISI,3)
//
def int_hexagon_brev_sthhi :
Hexagon_mem_memsisi_Intrinsic<"brev_sthhi">;
//
// BUILTIN_INFO_NONCONST(brev_sth,PTR_ftype_PTRSISI,3)
//
def int_hexagon_brev_sth :
Hexagon_mem_memsisi_Intrinsic<"brev_sth">;
//
// BUILTIN_INFO_NONCONST(brev_stw,PTR_ftype_PTRSISI,3)
//
def int_hexagon_brev_stw :
Hexagon_mem_memsisi_Intrinsic<"brev_stw">;
//
// BUILTIN_INFO_NONCONST(brev_std,PTR_ftype_PTRSISI,3)
//
def int_hexagon_brev_std :
Hexagon_mem_memdisi_Intrinsic<"brev_std">;
//
// BUILTIN_INFO_NONCONST(circ_std,PTR_ftype_PTRDISISI,4)
//
@ -9340,6 +9280,27 @@ defm int_hexagon_S2_storerf : Hexagon_custom_circ_st_Intrinsic<llvm_i32_ty>;
defm int_hexagon_S2_storeri : Hexagon_custom_circ_st_Intrinsic<llvm_i32_ty>;
defm int_hexagon_S2_storerd : Hexagon_custom_circ_st_Intrinsic<llvm_i64_ty>;
// The front-end emits the intrinsic call with only two arguments. The third
// argument from the builtin is already used by front-end to write to memory
// by generating a store.
class Hexagon_custom_brev_ld_Intrinsic<LLVMType ElTy>
: Hexagon_NonGCC_Intrinsic<
[ElTy, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty],
[IntrReadMem]>;
def int_hexagon_L2_loadrub_pbr : Hexagon_custom_brev_ld_Intrinsic<llvm_i32_ty>;
def int_hexagon_L2_loadrb_pbr : Hexagon_custom_brev_ld_Intrinsic<llvm_i32_ty>;
def int_hexagon_L2_loadruh_pbr : Hexagon_custom_brev_ld_Intrinsic<llvm_i32_ty>;
def int_hexagon_L2_loadrh_pbr : Hexagon_custom_brev_ld_Intrinsic<llvm_i32_ty>;
def int_hexagon_L2_loadri_pbr : Hexagon_custom_brev_ld_Intrinsic<llvm_i32_ty>;
def int_hexagon_L2_loadrd_pbr : Hexagon_custom_brev_ld_Intrinsic<llvm_i64_ty>;
def int_hexagon_S2_storerb_pbr : Hexagon_mem_memsisi_Intrinsic<"brev_stb">;
def int_hexagon_S2_storerh_pbr : Hexagon_mem_memsisi_Intrinsic<"brev_sth">;
def int_hexagon_S2_storerf_pbr : Hexagon_mem_memsisi_Intrinsic<"brev_sthhi">;
def int_hexagon_S2_storeri_pbr : Hexagon_mem_memsisi_Intrinsic<"brev_stw">;
def int_hexagon_S2_storerd_pbr : Hexagon_mem_memdisi_Intrinsic<"brev_std">;
///
/// HexagonV62 intrinsics

View File

@ -210,26 +210,6 @@ MachineSDNode *HexagonDAGToDAGISel::LoadInstrForLoadIntrinsic(SDNode *IntN) {
return Res;
}
static std::map<unsigned,unsigned> LoadPbrMap = {
{ Intrinsic::hexagon_brev_ldb, Hexagon::L2_loadrb_pbr },
{ Intrinsic::hexagon_brev_ldub, Hexagon::L2_loadrub_pbr },
{ Intrinsic::hexagon_brev_ldh, Hexagon::L2_loadrh_pbr },
{ Intrinsic::hexagon_brev_lduh, Hexagon::L2_loadruh_pbr },
{ Intrinsic::hexagon_brev_ldw, Hexagon::L2_loadri_pbr },
{ Intrinsic::hexagon_brev_ldd, Hexagon::L2_loadrd_pbr },
};
auto FLB = LoadPbrMap.find(IntNo);
if (FLB != LoadPbrMap.end()) {
SDNode *Mod = CurDAG->getMachineNode(Hexagon::A2_tfrrcr, dl, MVT::i32,
IntN->getOperand(4));
EVT ValTy = (IntNo == Intrinsic::hexagon_brev_ldd) ? MVT::i64 : MVT::i32;
EVT RTys[] = { ValTy, MVT::i32, MVT::Other };
// Operands: { Base, Modifier, Chain }
MachineSDNode *Res = CurDAG->getMachineNode(FLB->second, dl, RTys,
{ IntN->getOperand(2), SDValue(Mod,0), IntN->getOperand(0) });
return Res;
}
return nullptr;
}
@ -300,14 +280,10 @@ bool HexagonDAGToDAGISel::tryLoadOfLoadIntrinsic(LoadSDNode *N) {
// a sign-extending intrinsic into (or the other way around).
ISD::LoadExtType IntExt;
switch (cast<ConstantSDNode>(C->getOperand(1))->getZExtValue()) {
case Intrinsic::hexagon_brev_ldub:
case Intrinsic::hexagon_brev_lduh:
case Intrinsic::hexagon_circ_ldub:
case Intrinsic::hexagon_circ_lduh:
IntExt = ISD::ZEXTLOAD;
break;
case Intrinsic::hexagon_brev_ldw:
case Intrinsic::hexagon_brev_ldd:
case Intrinsic::hexagon_circ_ldw:
case Intrinsic::hexagon_circ_ldd:
IntExt = ISD::NON_EXTLOAD;
@ -338,6 +314,47 @@ bool HexagonDAGToDAGISel::tryLoadOfLoadIntrinsic(LoadSDNode *N) {
return false;
}
// Convert the bit-reverse load intrinsic to appropriate target instruction.
bool HexagonDAGToDAGISel::SelectBrevLdIntrinsic(SDNode *IntN) {
if (IntN->getOpcode() != ISD::INTRINSIC_W_CHAIN)
return false;
const SDLoc &dl(IntN);
unsigned IntNo = cast<ConstantSDNode>(IntN->getOperand(1))->getZExtValue();
static const std::map<unsigned, unsigned> LoadBrevMap = {
{ Intrinsic::hexagon_L2_loadrb_pbr, Hexagon::L2_loadrb_pbr },
{ Intrinsic::hexagon_L2_loadrub_pbr, Hexagon::L2_loadrub_pbr },
{ Intrinsic::hexagon_L2_loadrh_pbr, Hexagon::L2_loadrh_pbr },
{ Intrinsic::hexagon_L2_loadruh_pbr, Hexagon::L2_loadruh_pbr },
{ Intrinsic::hexagon_L2_loadri_pbr, Hexagon::L2_loadri_pbr },
{ Intrinsic::hexagon_L2_loadrd_pbr, Hexagon::L2_loadrd_pbr }
};
auto FLI = LoadBrevMap.find(IntNo);
if (FLI != LoadBrevMap.end()) {
EVT ValTy =
(IntNo == Intrinsic::hexagon_L2_loadrd_pbr) ? MVT::i64 : MVT::i32;
EVT RTys[] = { ValTy, MVT::i32, MVT::Other };
// Operands of Intrinsic: {chain, enum ID of intrinsic, baseptr,
// modifier}.
// Operands of target instruction: { Base, Modifier, Chain }.
MachineSDNode *Res = CurDAG->getMachineNode(
FLI->second, dl, RTys,
{IntN->getOperand(2), IntN->getOperand(3), IntN->getOperand(0)});
MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
MemOp[0] = cast<MemIntrinsicSDNode>(IntN)->getMemOperand();
Res->setMemRefs(MemOp, MemOp + 1);
ReplaceUses(SDValue(IntN, 0), SDValue(Res, 0));
ReplaceUses(SDValue(IntN, 1), SDValue(Res, 1));
ReplaceUses(SDValue(IntN, 2), SDValue(Res, 2));
CurDAG->RemoveDeadNode(IntN);
return true;
}
return false;
}
/// Generate a machine instruction node for the new circlar buffer intrinsics.
/// The new versions use a CSx register instead of the K field.
bool HexagonDAGToDAGISel::SelectNewCircIntrinsic(SDNode *IntN) {
@ -612,6 +629,10 @@ void HexagonDAGToDAGISel::SelectIntrinsicWChain(SDNode *N) {
return;
}
// Handle bit-reverse load intrinsics.
if (SelectBrevLdIntrinsic(N))
return;
if (SelectNewCircIntrinsic(N))
return;

View File

@ -90,6 +90,7 @@ public:
unsigned ConstraintID,
std::vector<SDValue> &OutOps) override;
bool tryLoadOfLoadIntrinsic(LoadSDNode *N);
bool SelectBrevLdIntrinsic(SDNode *IntN);
bool SelectNewCircIntrinsic(SDNode *IntN);
void SelectLoad(SDNode *N);
void SelectIndexedLoad(LoadSDNode *LD, const SDLoc &dl);

View File

@ -39,6 +39,7 @@
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
@ -1744,6 +1745,81 @@ const char* HexagonTargetLowering::getTargetNodeName(unsigned Opcode) const {
return nullptr;
}
// Bit-reverse Load Intrinsic: Check if the instruction is a bit reverse load
// intrinsic.
static bool isBrevLdIntrinsic(const Value *Inst) {
unsigned ID = cast<IntrinsicInst>(Inst)->getIntrinsicID();
return (ID == Intrinsic::hexagon_L2_loadrd_pbr ||
ID == Intrinsic::hexagon_L2_loadri_pbr ||
ID == Intrinsic::hexagon_L2_loadrh_pbr ||
ID == Intrinsic::hexagon_L2_loadruh_pbr ||
ID == Intrinsic::hexagon_L2_loadrb_pbr ||
ID == Intrinsic::hexagon_L2_loadrub_pbr);
}
// Bit-reverse Load Intrinsic :Crawl up and figure out the object from previous
// instruction. So far we only handle bitcast, extract value and bit reverse
// load intrinsic instructions. Should we handle CGEP ?
static Value *getBrevLdObject(Value *V) {
if (Operator::getOpcode(V) == Instruction::ExtractValue ||
Operator::getOpcode(V) == Instruction::BitCast)
V = cast<Operator>(V)->getOperand(0);
else if (isa<IntrinsicInst>(V) && isBrevLdIntrinsic(V))
V = cast<Instruction>(V)->getOperand(0);
return V;
}
// Bit-reverse Load Intrinsic: For a PHI Node return either an incoming edge or
// a back edge. If the back edge comes from the intrinsic itself, the incoming
// edge is returned.
static Value *returnEdge(const PHINode *PN, Value *IntrBaseVal) {
const BasicBlock *Parent = PN->getParent();
int Idx = -1;
for (unsigned i = 0, e = PN->getNumIncomingValues(); i < e; ++i) {
BasicBlock *Blk = PN->getIncomingBlock(i);
// Determine if the back edge is originated from intrinsic.
if (Blk == Parent) {
Value *BackEdgeVal = PN->getIncomingValue(i);
Value *BaseVal;
// Loop over till we return the same Value or we hit the IntrBaseVal.
do {
BaseVal = BackEdgeVal;
BackEdgeVal = getBrevLdObject(BackEdgeVal);
} while ((BaseVal != BackEdgeVal) && (IntrBaseVal != BackEdgeVal));
// If the getBrevLdObject returns IntrBaseVal, we should return the
// incoming edge.
if (IntrBaseVal == BackEdgeVal)
continue;
Idx = i;
break;
} else // Set the node to incoming edge.
Idx = i;
}
assert(Idx >= 0 && "Unexpected index to incoming argument in PHI");
return PN->getIncomingValue(Idx);
}
// Bit-reverse Load Intrinsic: Figure out the underlying object the base
// pointer points to, for the bit-reverse load intrinsic. Setting this to
// memoperand might help alias analysis to figure out the dependencies.
static Value *getUnderLyingObjectForBrevLdIntr(Value *V) {
Value *IntrBaseVal = V;
Value *BaseVal;
// Loop over till we return the same Value, implies we either figure out
// the object or we hit a PHI
do {
BaseVal = V;
V = getBrevLdObject(V);
} while (BaseVal != V);
// Identify the object from PHINode.
if (const PHINode *PN = dyn_cast<PHINode>(V))
return returnEdge(PN, IntrBaseVal);
// For non PHI nodes, the object is the last value returned by getBrevLdObject
else
return V;
}
/// Given an intrinsic, checks if on the target the intrinsic will need to map
/// to a MemIntrinsicNode (touches memory). If this is the case, it returns
/// true and store the intrinsic information into the IntrinsicInfo that was
@ -1753,6 +1829,32 @@ bool HexagonTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
MachineFunction &MF,
unsigned Intrinsic) const {
switch (Intrinsic) {
case Intrinsic::hexagon_L2_loadrd_pbr:
case Intrinsic::hexagon_L2_loadri_pbr:
case Intrinsic::hexagon_L2_loadrh_pbr:
case Intrinsic::hexagon_L2_loadruh_pbr:
case Intrinsic::hexagon_L2_loadrb_pbr:
case Intrinsic::hexagon_L2_loadrub_pbr: {
Info.opc = ISD::INTRINSIC_W_CHAIN;
auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
auto &Cont = I.getCalledFunction()->getParent()->getContext();
// The intrinsic function call is of the form { ElTy, i8* }
// @llvm.hexagon.L2.loadXX.pbr(i8*, i32). The pointer and memory access type
// should be derived from ElTy.
PointerType *PtrTy = I.getCalledFunction()
->getReturnType()
->getContainedType(0)
->getPointerTo();
Info.memVT = MVT::getVT(PtrTy->getElementType());
llvm::Value *BasePtrVal = I.getOperand(0);
Info.ptrVal = getUnderLyingObjectForBrevLdIntr(BasePtrVal);
// The offset value comes through Modifier register. For now, assume the
// offset is 0.
Info.offset = 0;
Info.align = DL.getABITypeAlignment(Info.memVT.getTypeForEVT(Cont));
Info.flags = MachineMemOperand::MOLoad;
return true;
}
case Intrinsic::hexagon_V6_vgathermw:
case Intrinsic::hexagon_V6_vgathermw_128B:
case Intrinsic::hexagon_V6_vgathermh:

View File

@ -807,7 +807,6 @@ def : T_QII_pat<C2_muxii, int_hexagon_C2_muxii, s32_0ImmPred, s8_0ImmPred>;
// Shift halfword
def : T_R_pat<A2_aslh, int_hexagon_A2_aslh>;
def : T_R_pat<A2_asrh, int_hexagon_A2_asrh>;
def : T_R_pat<A2_asrh, int_hexagon_SI_to_SXTHI_asrh>;
// Sign/zero extend
def : T_R_pat<A2_sxth, int_hexagon_A2_sxth>;
@ -1353,11 +1352,11 @@ class T_stb_pat <InstHexagon MI, Intrinsic IntID, PatLeaf Val>
: Pat<(IntID I32:$Rs, Val:$Rt, I32:$Ru),
(MI I32:$Rs, I32:$Ru, Val:$Rt)>;
def : T_stb_pat <S2_storerh_pbr, int_hexagon_brev_sth, I32>;
def : T_stb_pat <S2_storerb_pbr, int_hexagon_brev_stb, I32>;
def : T_stb_pat <S2_storeri_pbr, int_hexagon_brev_stw, I32>;
def : T_stb_pat <S2_storerf_pbr, int_hexagon_brev_sthhi, I32>;
def : T_stb_pat <S2_storerd_pbr, int_hexagon_brev_std, I64>;
def : T_stb_pat <S2_storerh_pbr, int_hexagon_S2_storerh_pbr, I32>;
def : T_stb_pat <S2_storerb_pbr, int_hexagon_S2_storerb_pbr, I32>;
def : T_stb_pat <S2_storeri_pbr, int_hexagon_S2_storeri_pbr, I32>;
def : T_stb_pat <S2_storerf_pbr, int_hexagon_S2_storerf_pbr, I32>;
def : T_stb_pat <S2_storerd_pbr, int_hexagon_S2_storerd_pbr, I64>;
class T_stc_pat <InstHexagon MI, Intrinsic IntID, PatLeaf Imm, PatLeaf Val>
: Pat<(IntID I32:$Rs, Val:$Rt, I32:$Ru, Imm:$s),

View File

@ -15,126 +15,78 @@
; r1 = memub(r0++m0:brev)
; r1 = memb(r0++m0:brev)
target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32"
target triple = "hexagon"
target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
target triple = "hexagon-unknown--elf"
define i64 @foo(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind {
; CHECK: @call_brev_ldd
define i64* @call_brev_ldd(i64* %ptr, i64 %dst, i32 %mod) local_unnamed_addr #0 {
entry:
%inputLR = alloca i64, align 8
%conv = zext i16 %filtMemLen to i32
%shr1 = lshr i32 %conv, 1
%idxprom = sext i16 %filtMemIndex to i32
%arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom
%0 = bitcast i16* %arrayidx to i8*
%1 = bitcast i64* %inputLR to i8*
%sub = sub i32 13, %shr1
%shl = shl i32 1, %sub
%0 = bitcast i64* %ptr to i8*
; CHECK: = memd(r{{[0-9]*}}++m{{[0-1]}}:brev)
%2 = call i8* @llvm.hexagon.brev.ldd(i8* %0, i8* %1, i32 %shl)
%3 = bitcast i8* %1 to i64*
%4 = load i64, i64* %3, align 8, !tbaa !0
ret i64 %4
%1 = tail call { i64, i8* } @llvm.hexagon.L2.loadrd.pbr(i8* %0, i32 %mod)
%2 = extractvalue { i64, i8* } %1, 1
%3 = bitcast i8* %2 to i64*
ret i64* %3
}
declare i8* @llvm.hexagon.brev.ldd(i8*, i8*, i32) nounwind
define i32 @foo1(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind {
; CHECK: @call_brev_ldw
define i32* @call_brev_ldw(i32* %ptr, i32 %dst, i32 %mod) local_unnamed_addr #0 {
entry:
%inputLR = alloca i32, align 4
%conv = zext i16 %filtMemLen to i32
%shr1 = lshr i32 %conv, 1
%idxprom = sext i16 %filtMemIndex to i32
%arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom
%0 = bitcast i16* %arrayidx to i8*
%1 = bitcast i32* %inputLR to i8*
%sub = sub i32 14, %shr1
%shl = shl i32 1, %sub
%0 = bitcast i32* %ptr to i8*
; CHECK: = memw(r{{[0-9]*}}++m{{[0-1]}}:brev)
%2 = call i8* @llvm.hexagon.brev.ldw(i8* %0, i8* %1, i32 %shl)
%3 = bitcast i8* %1 to i32*
%4 = load i32, i32* %3, align 4, !tbaa !2
ret i32 %4
%1 = tail call { i32, i8* } @llvm.hexagon.L2.loadri.pbr(i8* %0, i32 %mod)
%2 = extractvalue { i32, i8* } %1, 1
%3 = bitcast i8* %2 to i32*
ret i32* %3
}
declare i8* @llvm.hexagon.brev.ldw(i8*, i8*, i32) nounwind
define signext i16 @foo2(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind {
; CHECK: @call_brev_ldh
define i16* @call_brev_ldh(i16* %ptr, i16 signext %dst, i32 %mod) local_unnamed_addr #0 {
entry:
%inputLR = alloca i16, align 2
%conv = zext i16 %filtMemLen to i32
%shr1 = lshr i32 %conv, 1
%idxprom = sext i16 %filtMemIndex to i32
%arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom
%0 = bitcast i16* %arrayidx to i8*
%1 = bitcast i16* %inputLR to i8*
%sub = sub i32 15, %shr1
%shl = shl i32 1, %sub
; CHECK: = memh(r{{[0-9]*}}++m0:brev)
%2 = call i8* @llvm.hexagon.brev.ldh(i8* %0, i8* %1, i32 %shl)
%3 = bitcast i8* %1 to i16*
%4 = load i16, i16* %3, align 2, !tbaa !3
ret i16 %4
%0 = bitcast i16* %ptr to i8*
; CHECK: = memh(r{{[0-9]*}}++m{{[0-1]}}:brev)
%1 = tail call { i32, i8* } @llvm.hexagon.L2.loadrh.pbr(i8* %0, i32 %mod)
%2 = extractvalue { i32, i8* } %1, 1
%3 = bitcast i8* %2 to i16*
ret i16* %3
}
declare i8* @llvm.hexagon.brev.ldh(i8*, i8*, i32) nounwind
define zeroext i16 @foo3(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind {
; CHECK: @call_brev_lduh
define i16* @call_brev_lduh(i16* %ptr, i16 zeroext %dst, i32 %mod) local_unnamed_addr #0 {
entry:
%inputLR = alloca i16, align 2
%conv = zext i16 %filtMemLen to i32
%shr1 = lshr i32 %conv, 1
%idxprom = sext i16 %filtMemIndex to i32
%arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom
%0 = bitcast i16* %arrayidx to i8*
%1 = bitcast i16* %inputLR to i8*
%sub = sub i32 15, %shr1
%shl = shl i32 1, %sub
; CHECK: = memuh(r{{[0-9]*}}++m0:brev)
%2 = call i8* @llvm.hexagon.brev.lduh(i8* %0, i8* %1, i32 %shl)
%3 = bitcast i8* %1 to i16*
%4 = load i16, i16* %3, align 2, !tbaa !3
ret i16 %4
%0 = bitcast i16* %ptr to i8*
; CHECK: = memuh(r{{[0-9]*}}++m{{[0-1]}}:brev)
%1 = tail call { i32, i8* } @llvm.hexagon.L2.loadruh.pbr(i8* %0, i32 %mod)
%2 = extractvalue { i32, i8* } %1, 1
%3 = bitcast i8* %2 to i16*
ret i16* %3
}
declare i8* @llvm.hexagon.brev.lduh(i8*, i8*, i32) nounwind
define zeroext i8 @foo4(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind {
; CHECK: @call_brev_ldb
define i8* @call_brev_ldb(i8* %ptr, i8 signext %dst, i32 %mod) local_unnamed_addr #0 {
entry:
%inputLR = alloca i8, align 1
%conv = zext i16 %filtMemLen to i32
%shr1 = lshr i32 %conv, 1
%idxprom = sext i16 %filtMemIndex to i32
%arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom
%0 = bitcast i16* %arrayidx to i8*
%sub = sub nsw i32 16, %shr1
%shl = shl i32 1, %sub
; CHECK: = memub(r{{[0-9]*}}++m{{[0-1]}}:brev)
%1 = call i8* @llvm.hexagon.brev.ldub(i8* %0, i8* %inputLR, i32 %shl)
%2 = load i8, i8* %inputLR, align 1, !tbaa !0
ret i8 %2
}
declare i8* @llvm.hexagon.brev.ldub(i8*, i8*, i32) nounwind
define signext i8 @foo5(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind {
entry:
%inputLR = alloca i8, align 1
%conv = zext i16 %filtMemLen to i32
%shr1 = lshr i32 %conv, 1
%idxprom = sext i16 %filtMemIndex to i32
%arrayidx = getelementptr inbounds i16, i16* %filtMemLR, i32 %idxprom
%0 = bitcast i16* %arrayidx to i8*
%sub = sub nsw i32 16, %shr1
%shl = shl i32 1, %sub
; CHECK: = memb(r{{[0-9]*}}++m{{[0-1]}}:brev)
%1 = call i8* @llvm.hexagon.brev.ldb(i8* %0, i8* %inputLR, i32 %shl)
%2 = load i8, i8* %inputLR, align 1, !tbaa !0
ret i8 %2
%0 = tail call { i32, i8* } @llvm.hexagon.L2.loadrb.pbr(i8* %ptr, i32 %mod)
%1 = extractvalue { i32, i8* } %0, 1
ret i8* %1
}
declare i8* @llvm.hexagon.brev.ldb(i8*, i8*, i32) nounwind
; Function Attrs: nounwind readonly
; CHECK: @call_brev_ldub
define i8* @call_brev_ldub(i8* %ptr, i8 zeroext %dst, i32 %mod) local_unnamed_addr #0 {
entry:
; CHECK: = memub(r{{[0-9]*}}++m{{[0-1]}}:brev)
%0 = tail call { i32, i8* } @llvm.hexagon.L2.loadrub.pbr(i8* %ptr, i32 %mod)
%1 = extractvalue { i32, i8* } %0, 1
ret i8* %1
}
!0 = !{!"omnipotent char", !1}
!1 = !{!"Simple C/C++ TBAA"}
!2 = !{!"int", !0}
!3 = !{!"short", !0}
declare { i64, i8* } @llvm.hexagon.L2.loadrd.pbr(i8*, i32) #1
declare { i32, i8* } @llvm.hexagon.L2.loadri.pbr(i8*, i32) #1
declare { i32, i8* } @llvm.hexagon.L2.loadrh.pbr(i8*, i32) #1
declare { i32, i8* } @llvm.hexagon.L2.loadruh.pbr(i8*, i32) #1
declare { i32, i8* } @llvm.hexagon.L2.loadrb.pbr(i8*, i32) #1
declare { i32, i8* } @llvm.hexagon.L2.loadrub.pbr(i8*, i32) #1
attributes #0 = { nounwind readonly "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="hexagonv60" "target-features"="-hvx-double,-long-calls" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { nounwind readonly }

View File

@ -27,11 +27,11 @@ entry:
%sub = sub i32 13, %shr2
%shl = shl i32 1, %sub
; CHECK: memd(r{{[0-9]*}}++m{{[0-1]}}:brev)
%1 = tail call i8* @llvm.hexagon.brev.std(i8* %0, i64 undef, i32 %shl)
%1 = tail call i8* @llvm.hexagon.S2.storerd.pbr(i8* %0, i64 undef, i32 %shl)
ret i64 0
}
declare i8* @llvm.hexagon.brev.std(i8*, i64, i32) nounwind
declare i8* @llvm.hexagon.S2.storerd.pbr(i8*, i64, i32) nounwind
define i32 @foo1(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind {
entry:
@ -43,11 +43,11 @@ entry:
%sub = sub i32 14, %shr1
%shl = shl i32 1, %sub
; CHECK: memw(r{{[0-9]*}}++m{{[0-1]}}:brev)
%1 = tail call i8* @llvm.hexagon.brev.stw(i8* %0, i32 undef, i32 %shl)
%1 = tail call i8* @llvm.hexagon.S2.storeri.pbr(i8* %0, i32 undef, i32 %shl)
ret i32 0
}
declare i8* @llvm.hexagon.brev.stw(i8*, i32, i32) nounwind
declare i8* @llvm.hexagon.S2.storeri.pbr(i8*, i32, i32) nounwind
define signext i16 @foo2(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind {
entry:
@ -59,11 +59,11 @@ entry:
%sub = sub i32 15, %shr2
%shl = shl i32 1, %sub
; CHECK: memh(r{{[0-9]*}}++m{{[0-1]}}:brev)
%1 = tail call i8* @llvm.hexagon.brev.sth(i8* %0, i32 0, i32 %shl)
%1 = tail call i8* @llvm.hexagon.S2.storerh.pbr(i8* %0, i32 0, i32 %shl)
ret i16 0
}
declare i8* @llvm.hexagon.brev.sth(i8*, i32, i32) nounwind
declare i8* @llvm.hexagon.S2.storerh.pbr(i8*, i32, i32) nounwind
define signext i16 @foo3(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind {
entry:
@ -75,11 +75,11 @@ entry:
%sub = sub i32 15, %shr2
%shl = shl i32 1, %sub
; CHECK: memh(r{{[0-9]*}}++m{{[0-1]}}:brev) = r{{[0-9]*}}.h
%1 = tail call i8* @llvm.hexagon.brev.sthhi(i8* %0, i32 0, i32 %shl)
%1 = tail call i8* @llvm.hexagon.S2.storerf.pbr(i8* %0, i32 0, i32 %shl)
ret i16 0
}
declare i8* @llvm.hexagon.brev.sthhi(i8*, i32, i32) nounwind
declare i8* @llvm.hexagon.S2.storerf.pbr(i8*, i32, i32) nounwind
define zeroext i8 @foo5(i16 zeroext %filtMemLen, i16* %filtMemLR, i16 signext %filtMemIndex) nounwind {
entry:
@ -91,11 +91,11 @@ entry:
%sub = sub nsw i32 16, %shr2
; CHECK: memb(r{{[0-9]*}}++m{{[0-1]}}:brev)
%shl = shl i32 1, %sub
%1 = tail call i8* @llvm.hexagon.brev.stb(i8* %0, i32 0, i32 %shl)
%1 = tail call i8* @llvm.hexagon.S2.storerb.pbr(i8* %0, i32 0, i32 %shl)
ret i8 0
}
declare i8* @llvm.hexagon.brev.stb(i8*, i32, i32) nounwind
declare i8* @llvm.hexagon.S2.storerb.pbr(i8*, i32, i32) nounwind
!0 = !{!"omnipotent char", !1}
!1 = !{!"Simple C/C++ TBAA"}