mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-01-31 20:51:52 +01:00
AMDGPU: Fix constantexpr addrspacecasts
If we had a constant group address space cast the queue pointer wasn't enabled for the function, resulting in a crash on noreg later. llvm-svn: 271935
This commit is contained in:
parent
ba23c5fa4d
commit
2c826f3e0a
@ -13,6 +13,7 @@
|
|||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
|
|
||||||
#include "AMDGPU.h"
|
#include "AMDGPU.h"
|
||||||
|
#include "llvm/IR/Constants.h"
|
||||||
#include "llvm/IR/Instructions.h"
|
#include "llvm/IR/Instructions.h"
|
||||||
#include "llvm/IR/Module.h"
|
#include "llvm/IR/Module.h"
|
||||||
|
|
||||||
@ -42,6 +43,11 @@ public:
|
|||||||
AU.setPreservesAll();
|
AU.setPreservesAll();
|
||||||
ModulePass::getAnalysisUsage(AU);
|
ModulePass::getAnalysisUsage(AU);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool visitConstantExpr(const ConstantExpr *CE);
|
||||||
|
static bool visitConstantExprsRecursively(
|
||||||
|
const Constant *EntryC,
|
||||||
|
SmallPtrSet<const Constant *, 8> &ConstantExprVisited);
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -53,21 +59,79 @@ char &llvm::AMDGPUAnnotateKernelFeaturesID = AMDGPUAnnotateKernelFeatures::ID;
|
|||||||
INITIALIZE_PASS(AMDGPUAnnotateKernelFeatures, DEBUG_TYPE,
|
INITIALIZE_PASS(AMDGPUAnnotateKernelFeatures, DEBUG_TYPE,
|
||||||
"Add AMDGPU function attributes", false, false)
|
"Add AMDGPU function attributes", false, false)
|
||||||
|
|
||||||
static bool castRequiresQueuePtr(const AddrSpaceCastInst *ASC) {
|
|
||||||
unsigned SrcAS = ASC->getSrcAddressSpace();
|
|
||||||
|
|
||||||
// The queue ptr is only needed when casting to flat, not from it.
|
// The queue ptr is only needed when casting to flat, not from it.
|
||||||
|
static bool castRequiresQueuePtr(unsigned SrcAS) {
|
||||||
return SrcAS == AMDGPUAS::LOCAL_ADDRESS || SrcAS == AMDGPUAS::PRIVATE_ADDRESS;
|
return SrcAS == AMDGPUAS::LOCAL_ADDRESS || SrcAS == AMDGPUAS::PRIVATE_ADDRESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool castRequiresQueuePtr(const AddrSpaceCastInst *ASC) {
|
||||||
|
return castRequiresQueuePtr(ASC->getSrcAddressSpace());
|
||||||
|
}
|
||||||
|
|
||||||
|
bool AMDGPUAnnotateKernelFeatures::visitConstantExpr(const ConstantExpr *CE) {
|
||||||
|
if (CE->getOpcode() == Instruction::AddrSpaceCast) {
|
||||||
|
unsigned SrcAS = CE->getOperand(0)->getType()->getPointerAddressSpace();
|
||||||
|
return castRequiresQueuePtr(SrcAS);
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool AMDGPUAnnotateKernelFeatures::visitConstantExprsRecursively(
|
||||||
|
const Constant *EntryC,
|
||||||
|
SmallPtrSet<const Constant *, 8> &ConstantExprVisited) {
|
||||||
|
|
||||||
|
if (!ConstantExprVisited.insert(EntryC).second)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
SmallVector<const Constant *, 16> Stack;
|
||||||
|
Stack.push_back(EntryC);
|
||||||
|
|
||||||
|
while (!Stack.empty()) {
|
||||||
|
const Constant *C = Stack.pop_back_val();
|
||||||
|
|
||||||
|
// Check this constant expression.
|
||||||
|
if (const auto *CE = dyn_cast<ConstantExpr>(C)) {
|
||||||
|
if (visitConstantExpr(CE))
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Visit all sub-expressions.
|
||||||
|
for (const Use &U : C->operands()) {
|
||||||
|
const auto *OpC = dyn_cast<Constant>(U);
|
||||||
|
if (!OpC)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (!ConstantExprVisited.insert(OpC).second)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
Stack.push_back(OpC);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
// Return true if an addrspacecast is used that requires the queue ptr.
|
// Return true if an addrspacecast is used that requires the queue ptr.
|
||||||
bool AMDGPUAnnotateKernelFeatures::hasAddrSpaceCast(const Function &F) {
|
bool AMDGPUAnnotateKernelFeatures::hasAddrSpaceCast(const Function &F) {
|
||||||
|
SmallPtrSet<const Constant *, 8> ConstantExprVisited;
|
||||||
|
|
||||||
for (const BasicBlock &BB : F) {
|
for (const BasicBlock &BB : F) {
|
||||||
for (const Instruction &I : BB) {
|
for (const Instruction &I : BB) {
|
||||||
if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(&I)) {
|
if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(&I)) {
|
||||||
if (castRequiresQueuePtr(ASC))
|
if (castRequiresQueuePtr(ASC))
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (const Use &U : I.operands()) {
|
||||||
|
const auto *OpC = dyn_cast<Constant>(U);
|
||||||
|
if (!OpC)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (visitConstantExprsRecursively(OpC, ConstantExprVisited))
|
||||||
|
return true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1334,8 +1334,11 @@ SDValue SITargetLowering::getSegmentAperture(unsigned AS,
|
|||||||
SDLoc SL;
|
SDLoc SL;
|
||||||
MachineFunction &MF = DAG.getMachineFunction();
|
MachineFunction &MF = DAG.getMachineFunction();
|
||||||
SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
|
SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
|
||||||
|
unsigned UserSGPR = Info->getQueuePtrUserSGPR();
|
||||||
|
assert(UserSGPR != AMDGPU::NoRegister);
|
||||||
|
|
||||||
SDValue QueuePtr = CreateLiveInRegister(
|
SDValue QueuePtr = CreateLiveInRegister(
|
||||||
DAG, &AMDGPU::SReg_64RegClass, Info->getQueuePtrUserSGPR(), MVT::i64);
|
DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
|
||||||
|
|
||||||
// Offset into amd_queue_t for group_segment_aperture_base_hi /
|
// Offset into amd_queue_t for group_segment_aperture_base_hi /
|
||||||
// private_segment_aperture_base_hi.
|
// private_segment_aperture_base_hi.
|
||||||
|
106
test/CodeGen/AMDGPU/addrspacecast-constantexpr.ll
Normal file
106
test/CodeGen/AMDGPU/addrspacecast-constantexpr.ll
Normal file
@ -0,0 +1,106 @@
|
|||||||
|
; RUN: opt -mtriple=amdgcn-unknown-amdhsa -S -amdgpu-annotate-kernel-features < %s | FileCheck -check-prefix=HSA %s
|
||||||
|
|
||||||
|
declare void @llvm.memcpy.p1i32.p4i32.i32(i32 addrspace(1)* nocapture, i32 addrspace(4)* nocapture, i32, i32, i1) #0
|
||||||
|
|
||||||
|
@lds.i32 = unnamed_addr addrspace(3) global i32 undef, align 4
|
||||||
|
@lds.arr = unnamed_addr addrspace(3) global [256 x i32] undef, align 4
|
||||||
|
|
||||||
|
@global.i32 = unnamed_addr addrspace(1) global i32 undef, align 4
|
||||||
|
@global.arr = unnamed_addr addrspace(1) global [256 x i32] undef, align 4
|
||||||
|
|
||||||
|
; HSA: @store_cast_0_flat_to_group_addrspacecast() #1
|
||||||
|
define void @store_cast_0_flat_to_group_addrspacecast() #1 {
|
||||||
|
store i32 7, i32 addrspace(3)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(3)*)
|
||||||
|
ret void
|
||||||
|
}
|
||||||
|
|
||||||
|
; HSA: @store_cast_0_group_to_flat_addrspacecast() #2
|
||||||
|
define void @store_cast_0_group_to_flat_addrspacecast() #1 {
|
||||||
|
store i32 7, i32 addrspace(4)* addrspacecast (i32 addrspace(3)* null to i32 addrspace(4)*)
|
||||||
|
ret void
|
||||||
|
}
|
||||||
|
|
||||||
|
; HSA: define void @store_constant_cast_group_gv_to_flat() #2
|
||||||
|
define void @store_constant_cast_group_gv_to_flat() #1 {
|
||||||
|
store i32 7, i32 addrspace(4)* addrspacecast (i32 addrspace(3)* @lds.i32 to i32 addrspace(4)*)
|
||||||
|
ret void
|
||||||
|
}
|
||||||
|
|
||||||
|
; HSA: @store_constant_cast_group_gv_gep_to_flat() #2
|
||||||
|
define void @store_constant_cast_group_gv_gep_to_flat() #1 {
|
||||||
|
store i32 7, i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(3)* @lds.arr to [256 x i32] addrspace(4)*), i64 0, i64 8)
|
||||||
|
ret void
|
||||||
|
}
|
||||||
|
|
||||||
|
; HSA: @store_constant_cast_global_gv_to_flat() #1
|
||||||
|
define void @store_constant_cast_global_gv_to_flat() #1 {
|
||||||
|
store i32 7, i32 addrspace(4)* addrspacecast (i32 addrspace(1)* @global.i32 to i32 addrspace(4)*)
|
||||||
|
ret void
|
||||||
|
}
|
||||||
|
|
||||||
|
; HSA: @store_constant_cast_global_gv_gep_to_flat() #1
|
||||||
|
define void @store_constant_cast_global_gv_gep_to_flat() #1 {
|
||||||
|
store i32 7, i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(1)* @global.arr to [256 x i32] addrspace(4)*), i64 0, i64 8)
|
||||||
|
ret void
|
||||||
|
}
|
||||||
|
|
||||||
|
; HSA: @load_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out) #2
|
||||||
|
define void @load_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out) #1 {
|
||||||
|
%val = load i32, i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(3)* @lds.arr to [256 x i32] addrspace(4)*), i64 0, i64 8)
|
||||||
|
store i32 %val, i32 addrspace(1)* %out
|
||||||
|
ret void
|
||||||
|
}
|
||||||
|
|
||||||
|
; HSA: @atomicrmw_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out) #2
|
||||||
|
define void @atomicrmw_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out) #1 {
|
||||||
|
%val = atomicrmw add i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(3)* @lds.arr to [256 x i32] addrspace(4)*), i64 0, i64 8), i32 1 seq_cst
|
||||||
|
store i32 %val, i32 addrspace(1)* %out
|
||||||
|
ret void
|
||||||
|
}
|
||||||
|
|
||||||
|
; HSA: @cmpxchg_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out) #2
|
||||||
|
define void @cmpxchg_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out) #1 {
|
||||||
|
%val = cmpxchg i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(3)* @lds.arr to [256 x i32] addrspace(4)*), i64 0, i64 8), i32 0, i32 1 seq_cst seq_cst
|
||||||
|
%val0 = extractvalue { i32, i1 } %val, 0
|
||||||
|
store i32 %val0, i32 addrspace(1)* %out
|
||||||
|
ret void
|
||||||
|
}
|
||||||
|
|
||||||
|
; HSA: @memcpy_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out) #2
|
||||||
|
define void @memcpy_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out) #1 {
|
||||||
|
call void @llvm.memcpy.p1i32.p4i32.i32(i32 addrspace(1)* %out, i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(3)* @lds.arr to [256 x i32] addrspace(4)*), i64 0, i64 8), i32 32, i32 4, i1 false)
|
||||||
|
ret void
|
||||||
|
}
|
||||||
|
|
||||||
|
; Can't just search the pointer value
|
||||||
|
; HSA: @store_value_constant_cast_lds_gv_gep_to_flat(i32 addrspace(4)* addrspace(1)* %out) #2
|
||||||
|
define void @store_value_constant_cast_lds_gv_gep_to_flat(i32 addrspace(4)* addrspace(1)* %out) #1 {
|
||||||
|
store i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(3)* @lds.arr to [256 x i32] addrspace(4)*), i64 0, i64 8), i32 addrspace(4)* addrspace(1)* %out
|
||||||
|
ret void
|
||||||
|
}
|
||||||
|
|
||||||
|
; Can't just search pointer types
|
||||||
|
; HSA: @store_ptrtoint_value_constant_cast_lds_gv_gep_to_flat(i64 addrspace(1)* %out) #2
|
||||||
|
define void @store_ptrtoint_value_constant_cast_lds_gv_gep_to_flat(i64 addrspace(1)* %out) #1 {
|
||||||
|
store i64 ptrtoint (i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(3)* @lds.arr to [256 x i32] addrspace(4)*), i64 0, i64 8) to i64), i64 addrspace(1)* %out
|
||||||
|
ret void
|
||||||
|
}
|
||||||
|
|
||||||
|
; Cast group to flat, do GEP, cast back to group
|
||||||
|
; HSA: @store_constant_cast_group_gv_gep_to_flat_to_group() #2
|
||||||
|
define void @store_constant_cast_group_gv_gep_to_flat_to_group() #1 {
|
||||||
|
store i32 7, i32 addrspace(3)* addrspacecast (i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(3)* @lds.arr to [256 x i32] addrspace(4)*), i64 0, i64 8) to i32 addrspace(3)*)
|
||||||
|
ret void
|
||||||
|
}
|
||||||
|
|
||||||
|
; HSA: @ret_constant_cast_group_gv_gep_to_flat_to_group() #2
|
||||||
|
define i32 addrspace(3)* @ret_constant_cast_group_gv_gep_to_flat_to_group() #1 {
|
||||||
|
ret i32 addrspace(3)* addrspacecast (i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(3)* @lds.arr to [256 x i32] addrspace(4)*), i64 0, i64 8) to i32 addrspace(3)*)
|
||||||
|
}
|
||||||
|
|
||||||
|
; HSA: attributes #0 = { argmemonly nounwind }
|
||||||
|
; HSA: attributes #1 = { nounwind }
|
||||||
|
; HSA: attributes #2 = { nounwind "amdgpu-queue-ptr" }
|
||||||
|
|
||||||
|
attributes #0 = { argmemonly nounwind }
|
||||||
|
attributes #1 = { nounwind }
|
Loading…
x
Reference in New Issue
Block a user