1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 03:02:36 +01:00

GlobalISel: translate GEP instructions.

Unlike SDag, we use a separate G_GEP instruction (much simplified, only taking
a single byte offset) to preserve the pointer type information through
selection.

llvm-svn: 281205
This commit is contained in:
Tim Northover 2016-09-12 11:20:22 +00:00
parent 2d8bf8b427
commit fda3bc797a
7 changed files with 216 additions and 1 deletions

View File

@ -167,6 +167,8 @@ private:
bool translateSelect(const User &U);
bool translateGetElementPtr(const User &U);
/// Translate return (ret) instruction.
/// The target needs to implement CallLowering::lowerReturn for
/// this to succeed.
@ -282,7 +284,6 @@ private:
bool translateCleanupRet(const User &U) { return false; }
bool translateCatchRet(const User &U) { return false; }
bool translateCatchSwitch(const User &U) { return false; }
bool translateGetElementPtr(const User &U) { return false; }
bool translateFence(const User &U) { return false; }
bool translateAtomicCmpXchg(const User &U) { return false; }
bool translateAtomicRMW(const User &U) { return false; }

View File

@ -161,6 +161,20 @@ public:
MachineInstrBuilder buildMul(unsigned Res, unsigned Op0,
unsigned Op1);
/// Build and insert \p Res<def> = G_GEP \p Op0, \p Op1
///
/// G_GEP adds \p Op1 bytes to the pointer specified by \p Op0,
/// storing the resulting pointer in \p Res.
///
/// \pre setBasicBlock or setMI must have been called.
/// \pre \p Res and \p Op0 must be generic virtual registers with pointer
/// type.
/// \pre \p Op1 must be a generic virtual register with scalar type.
///
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildGEP(unsigned Res, unsigned Op0,
unsigned Op1);
/// Build and insert \p Res<def>, \p CarryOut<def> = G_UADDE \p Op0,
/// \p Op1, \p CarryIn
///
@ -221,6 +235,16 @@ public:
/// \return The newly created instruction.
MachineInstrBuilder buildZExt(unsigned Res, unsigned Op);
/// Build and insert \p Res<def> = G_SEXT \p Op, \p Res = G_TRUNC \p Op, or
/// \p Res = COPY \p Op depending on the differing sizes of \p Res and \p Op.
/// ///
/// \pre setBasicBlock or setMI must have been called.
/// \pre \p Res must be a generic virtual register with scalar or vector type.
/// \pre \p Op must be a generic virtual register with scalar or vector type.
///
/// \return The newly created instruction.
MachineInstrBuilder buildSExtOrTrunc(unsigned Res, unsigned Op);
/// Build and insert G_BR \p Dest
///
/// G_BR is an unconditional branch to \p Dest.

View File

@ -97,6 +97,13 @@ def G_ADD : Instruction {
let isCommutable = 1;
}
// Generic pointer offset.
def G_GEP : Instruction {
let OutOperandList = (outs type0:$dst);
let InOperandList = (ins type0:$src1, type1:$src2);
let hasSideEffects = 0;
}
// Generic subtraction.
def G_SUB : Instruction {
let OutOperandList = (outs type0:$dst);

View File

@ -333,6 +333,9 @@ HANDLE_TARGET_OPCODE(G_SITOFP)
/// Generic unsigned-int to float conversion
HANDLE_TARGET_OPCODE(G_UITOFP)
/// Generic unsigned-int to float conversion
HANDLE_TARGET_OPCODE(G_GEP)
/// Generic BRANCH instruction. This is an unconditional branch.
HANDLE_TARGET_OPCODE(G_BR)

View File

@ -20,6 +20,7 @@
#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
@ -287,6 +288,77 @@ bool IRTranslator::translateCast(unsigned Opcode, const User &U) {
return true;
}
bool IRTranslator::translateGetElementPtr(const User &U) {
// FIXME: support vector GEPs.
if (U.getType()->isVectorTy())
return false;
Value &Op0 = *U.getOperand(0);
unsigned BaseReg = getOrCreateVReg(Op0);
LLT PtrTy(*Op0.getType());
unsigned PtrSize = DL->getPointerSizeInBits(PtrTy.getAddressSpace());
LLT OffsetTy = LLT::scalar(PtrSize);
int64_t Offset = 0;
for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
GTI != E; ++GTI) {
const Value *Idx = GTI.getOperand();
if (StructType *StTy = dyn_cast<StructType>(*GTI)) {
unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
continue;
} else {
uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
// If this is a scalar constant or a splat vector of constants,
// handle it quickly.
if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
Offset += ElementSize * CI->getSExtValue();
continue;
}
if (Offset != 0) {
unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
MIRBuilder.buildConstant(OffsetReg, Offset);
MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg);
BaseReg = NewBaseReg;
Offset = 0;
}
// N = N + Idx * ElementSize;
unsigned ElementSizeReg = MRI->createGenericVirtualRegister(OffsetTy);
MIRBuilder.buildConstant(ElementSizeReg, ElementSize);
unsigned IdxReg = getOrCreateVReg(*Idx);
if (MRI->getType(IdxReg) != OffsetTy) {
unsigned NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy);
MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg);
IdxReg = NewIdxReg;
}
unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
MIRBuilder.buildMul(OffsetReg, ElementSizeReg, IdxReg);
unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg);
BaseReg = NewBaseReg;
}
}
if (Offset != 0) {
unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
MIRBuilder.buildConstant(OffsetReg, Offset);
MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetReg);
return true;
}
MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
return true;
}
bool IRTranslator::translateKnownIntrinsic(const CallInst &CI,
Intrinsic::ID ID) {
unsigned Op = 0;

View File

@ -97,6 +97,18 @@ MachineInstrBuilder MachineIRBuilder::buildAdd(unsigned Res, unsigned Op0,
.addUse(Op1);
}
MachineInstrBuilder MachineIRBuilder::buildGEP(unsigned Res, unsigned Op0,
unsigned Op1) {
assert(MRI->getType(Res).isPointer() &&
MRI->getType(Res) == MRI->getType(Op0) && "type mismatch");
assert(MRI->getType(Op1).isScalar() && "invalid offset type");
return buildInstr(TargetOpcode::G_GEP)
.addDef(Res)
.addUse(Op0)
.addUse(Op1);
}
MachineInstrBuilder MachineIRBuilder::buildSub(unsigned Res, unsigned Op0,
unsigned Op1) {
assert((MRI->getType(Res).isScalar() || MRI->getType(Res).isVector()) &&
@ -206,6 +218,17 @@ MachineInstrBuilder MachineIRBuilder::buildZExt(unsigned Res, unsigned Op) {
return buildInstr(TargetOpcode::G_ZEXT).addDef(Res).addUse(Op);
}
MachineInstrBuilder MachineIRBuilder::buildSExtOrTrunc(unsigned Res,
unsigned Op) {
unsigned Opcode = TargetOpcode::COPY;
if (MRI->getType(Res).getSizeInBits() > MRI->getType(Op).getSizeInBits())
Opcode = TargetOpcode::G_SEXT;
else if (MRI->getType(Res).getSizeInBits() < MRI->getType(Op).getSizeInBits())
Opcode = TargetOpcode::G_TRUNC;
return buildInstr(Opcode).addDef(Res).addUse(Op);
}
MachineInstrBuilder MachineIRBuilder::buildExtract(ArrayRef<unsigned> Results,
ArrayRef<uint64_t> Indices,
unsigned Src) {

View File

@ -0,0 +1,85 @@
; RUN: llc -mtriple=aarch64-linux-gnu -O0 -global-isel -stop-after=irtranslator -o - %s | FileCheck %s
%type = type [4 x {i8, i32}]
define %type* @first_offset_const(%type* %addr) {
; CHECK-LABEL: name: first_offset_const
; CHECK: [[BASE:%[0-9]+]](p0) = COPY %x0
; CHECK: [[OFFSET:%[0-9]+]](s64) = G_CONSTANT 32
; CHECK: [[RES:%[0-9]+]](p0) = G_GEP [[BASE]], [[OFFSET]](s64)
; CHECK: %x0 = COPY [[RES]](p0)
%res = getelementptr %type, %type* %addr, i32 1
ret %type* %res
}
define %type* @first_offset_trivial(%type* %addr) {
; CHECK-LABEL: name: first_offset_trivial
; CHECK: [[BASE:%[0-9]+]](p0) = COPY %x0
; CHECK: [[TRIVIAL:%[0-9]+]](p0) = COPY [[BASE]](p0)
; CHECK: %x0 = COPY [[TRIVIAL]](p0)
%res = getelementptr %type, %type* %addr, i32 0
ret %type* %res
}
define %type* @first_offset_variable(%type* %addr, i64 %idx) {
; CHECK-LABEL: name: first_offset_variable
; CHECK: [[BASE:%[0-9]+]](p0) = COPY %x0
; CHECK: [[IDX:%[0-9]+]](s64) = COPY %x1
; CHECK: [[SIZE:%[0-9]+]](s64) = G_CONSTANT 32
; CHECK: [[OFFSET:%[0-9]+]](s64) = G_MUL [[SIZE]], [[IDX]]
; CHECK: [[STEP0:%[0-9]+]](p0) = G_GEP [[BASE]], [[OFFSET]](s64)
; CHECK: [[RES:%[0-9]+]](p0) = COPY [[STEP0]](p0)
; CHECK: %x0 = COPY [[RES]](p0)
%res = getelementptr %type, %type* %addr, i64 %idx
ret %type* %res
}
define %type* @first_offset_ext(%type* %addr, i32 %idx) {
; CHECK-LABEL: name: first_offset_ext
; CHECK: [[BASE:%[0-9]+]](p0) = COPY %x0
; CHECK: [[IDX32:%[0-9]+]](s32) = COPY %w1
; CHECK: [[SIZE:%[0-9]+]](s64) = G_CONSTANT 32
; CHECK: [[IDX64:%[0-9]+]](s64) = G_SEXT [[IDX32]](s32)
; CHECK: [[OFFSET:%[0-9]+]](s64) = G_MUL [[SIZE]], [[IDX64]]
; CHECK: [[STEP0:%[0-9]+]](p0) = G_GEP [[BASE]], [[OFFSET]](s64)
; CHECK: [[RES:%[0-9]+]](p0) = COPY [[STEP0]](p0)
; CHECK: %x0 = COPY [[RES]](p0)
%res = getelementptr %type, %type* %addr, i32 %idx
ret %type* %res
}
%type1 = type [4 x [4 x i32]]
define i32* @const_then_var(%type1* %addr, i64 %idx) {
; CHECK-LABEL: name: const_then_var
; CHECK: [[BASE:%[0-9]+]](p0) = COPY %x0
; CHECK: [[IDX:%[0-9]+]](s64) = COPY %x1
; CHECK: [[OFFSET1:%[0-9]+]](s64) = G_CONSTANT 272
; CHECK: [[BASE1:%[0-9]+]](p0) = G_GEP [[BASE]], [[OFFSET1]](s64)
; CHECK: [[SIZE:%[0-9]+]](s64) = G_CONSTANT 4
; CHECK: [[OFFSET2:%[0-9]+]](s64) = G_MUL [[SIZE]], [[IDX]]
; CHECK: [[BASE2:%[0-9]+]](p0) = G_GEP [[BASE1]], [[OFFSET2]](s64)
; CHECK: [[RES:%[0-9]+]](p0) = COPY [[BASE2]](p0)
; CHECK: %x0 = COPY [[RES]](p0)
%res = getelementptr %type1, %type1* %addr, i32 4, i32 1, i64 %idx
ret i32* %res
}
define i32* @var_then_const(%type1* %addr, i64 %idx) {
; CHECK-LABEL: name: var_then_const
; CHECK: [[BASE:%[0-9]+]](p0) = COPY %x0
; CHECK: [[IDX:%[0-9]+]](s64) = COPY %x1
; CHECK: [[SIZE:%[0-9]+]](s64) = G_CONSTANT 64
; CHECK: [[OFFSET1:%[0-9]+]](s64) = G_MUL [[SIZE]], [[IDX]]
; CHECK: [[BASE1:%[0-9]+]](p0) = G_GEP [[BASE]], [[OFFSET1]](s64)
; CHECK: [[OFFSET2:%[0-9]+]](s64) = G_CONSTANT 40
; CHECK: [[BASE2:%[0-9]+]](p0) = G_GEP [[BASE1]], [[OFFSET2]](s64)
; CHECK: %x0 = COPY [[BASE2]](p0)
%res = getelementptr %type1, %type1* %addr, i64 %idx, i32 2, i32 2
ret i32* %res
}