1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 11:13:28 +01:00

[IR] Extend cmpxchg to allow pointer type operands

Today, we do not allow cmpxchg operations with pointer arguments. We require the frontend to insert ptrtoint casts and do the cmpxchg in integers. While correct, this is problematic from a couple of perspectives:
1) It makes the IR harder to analyse (for instance, it make capture tracking overly conservative)
2) It pushes work onto the frontend authors for no real gain

This patch implements the simplest form of IR support. As we did with floating point loads and stores, we teach AtomicExpand to convert back to the old representation. This prevents us needing to change all backends in a single lock step change. Over time, we can migrate each backend to natively selecting the pointer type. In the meantime, we get the advantages of a cleaner IR representation without waiting for the backend changes.

Differential Revision: http://reviews.llvm.org/D17413

llvm-svn: 261281
This commit is contained in:
Philip Reames 2016-02-19 00:06:41 +00:00
parent 3941775761
commit 2d78d97ccc
5 changed files with 161 additions and 20 deletions

View File

@ -7082,13 +7082,13 @@ Arguments:
There are three arguments to the '``cmpxchg``' instruction: an address
to operate on, a value to compare to the value currently be at that
address, and a new value to place at that address if the compared values
are equal. The type of '<cmp>' must be an integer type whose bit width
is a power of two greater than or equal to eight and less than or equal
to a target-specific size limit. '<cmp>' and '<new>' must have the same
type, and the type of '<pointer>' must be a pointer to that type. If the
``cmpxchg`` is marked as ``volatile``, then the optimizer is not allowed
to modify the number or order of execution of this ``cmpxchg`` with
other :ref:`volatile operations <volatile>`.
are equal. The type of '<cmp>' must be an integer or pointer type whose
bit width is a power of two greater than or equal to eight and less
than or equal to a target-specific size limit. '<cmp>' and '<new>' must
have the same type, and the type of '<pointer>' must be a pointer to
that type. If the ``cmpxchg`` is marked as ``volatile``, then the
optimizer is not allowed to modify the number or order of execution of
this ``cmpxchg`` with other :ref:`volatile operations <volatile>`.
The success and failure :ref:`ordering <ordering>` arguments specify how this
``cmpxchg`` synchronizes with other atomic operations. Both ordering parameters

View File

@ -27,6 +27,7 @@
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/ValueSymbolTable.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Dwarf.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/SaveAndRestore.h"
@ -5903,13 +5904,8 @@ int LLParser::ParseCmpXchg(Instruction *&Inst, PerFunctionState &PFS) {
return Error(CmpLoc, "compare value and pointer type do not match");
if (cast<PointerType>(Ptr->getType())->getElementType() != New->getType())
return Error(NewLoc, "new value and pointer type do not match");
if (!New->getType()->isIntegerTy())
return Error(NewLoc, "cmpxchg operand must be an integer");
unsigned Size = New->getType()->getPrimitiveSizeInBits();
if (Size < 8 || (Size & (Size - 1)))
return Error(NewLoc, "cmpxchg operand must be power-of-two byte-sized"
" integer");
if (!New->getType()->isFirstClassType())
return Error(NewLoc, "cmpxchg operand must be a first class value");
AtomicCmpXchgInst *CXI = new AtomicCmpXchgInst(
Ptr, Cmp, New, SuccessOrdering, FailureOrdering, Scope);
CXI->setVolatile(isVolatile);

View File

@ -60,6 +60,7 @@ namespace {
bool expandAtomicOpToLLSC(
Instruction *I, Value *Addr, AtomicOrdering MemOpOrder,
std::function<Value *(IRBuilder<> &, Value *)> PerformOp);
AtomicCmpXchgInst *convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI);
bool expandAtomicCmpXchg(AtomicCmpXchgInst *CI);
bool isIdempotentRMW(AtomicRMWInst *AI);
bool simplifyIdempotentRMW(AtomicRMWInst *AI);
@ -168,8 +169,22 @@ bool AtomicExpand::runOnFunction(Function &F) {
} else {
MadeChange |= tryExpandAtomicRMW(RMWI);
}
} else if (CASI && TLI->shouldExpandAtomicCmpXchgInIR(CASI)) {
MadeChange |= expandAtomicCmpXchg(CASI);
} else if (CASI) {
// TODO: when we're ready to make the change at the IR level, we can
// extend convertCmpXchgToInteger for floating point too.
assert(!CASI->getCompareOperand()->getType()->isFloatingPointTy() &&
"unimplemented - floating point not legal at IR level");
if (CASI->getCompareOperand()->getType()->isPointerTy() ) {
// TODO: add a TLI hook to control this so that each target can
// convert to lowering the original type one at a time.
CASI = convertCmpXchgToIntegerType(CASI);
assert(CASI->getCompareOperand()->getType()->isIntegerTy() &&
"invariant broken");
MadeChange = true;
}
if (TLI->shouldExpandAtomicCmpXchgInIR(CASI))
MadeChange |= expandAtomicCmpXchg(CASI);
}
}
return MadeChange;
@ -206,7 +221,7 @@ IntegerType *AtomicExpand::getCorrespondingIntegerType(Type *T,
}
/// Convert an atomic load of a non-integral type to an integer load of the
/// equivelent bitwidth. See the function comment on
/// equivalent bitwidth. See the function comment on
/// convertAtomicStoreToIntegerType for background.
LoadInst *AtomicExpand::convertAtomicLoadToIntegerType(LoadInst *LI) {
auto *M = LI->getModule();
@ -283,7 +298,7 @@ bool AtomicExpand::expandAtomicLoadToCmpXchg(LoadInst *LI) {
}
/// Convert an atomic store of a non-integral type to an integer store of the
/// equivelent bitwidth. We used to not support floating point or vector
/// equivalent bitwidth. We used to not support floating point or vector
/// atomics in the IR at all. The backends learned to deal with the bitcast
/// idiom because that was the only way of expressing the notion of a atomic
/// float or vector store. The long term plan is to teach each backend to
@ -448,6 +463,50 @@ bool AtomicExpand::expandAtomicOpToLLSC(
return true;
}
/// Convert an atomic cmpxchg of a non-integral type to an integer cmpxchg of
/// the equivalent bitwidth. We used to not support pointer cmpxchg in the
/// IR. As a migration step, we convert back to what use to be the standard
/// way to represent a pointer cmpxchg so that we can update backends one by
/// one.
AtomicCmpXchgInst *AtomicExpand::convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI) {
auto *M = CI->getModule();
Type *NewTy = getCorrespondingIntegerType(CI->getCompareOperand()->getType(),
M->getDataLayout());
IRBuilder<> Builder(CI);
Value *Addr = CI->getPointerOperand();
Type *PT = PointerType::get(NewTy,
Addr->getType()->getPointerAddressSpace());
Value *NewAddr = Builder.CreateBitCast(Addr, PT);
Value *NewCmp = Builder.CreatePtrToInt(CI->getCompareOperand(), NewTy);
Value *NewNewVal = Builder.CreatePtrToInt(CI->getNewValOperand(), NewTy);
auto *NewCI = Builder.CreateAtomicCmpXchg(NewAddr, NewCmp, NewNewVal,
CI->getSuccessOrdering(),
CI->getFailureOrdering(),
CI->getSynchScope());
NewCI->setVolatile(CI->isVolatile());
NewCI->setWeak(CI->isWeak());
DEBUG(dbgs() << "Replaced " << *CI << " with " << *NewCI << "\n");
Value *OldVal = Builder.CreateExtractValue(NewCI, 0);
Value *Succ = Builder.CreateExtractValue(NewCI, 1);
OldVal = Builder.CreateIntToPtr(OldVal, CI->getCompareOperand()->getType());
Value *Res = UndefValue::get(CI->getType());
Res = Builder.CreateInsertValue(Res, OldVal, 0);
Res = Builder.CreateInsertValue(Res, Succ, 1);
CI->replaceAllUsesWith(Res);
CI->eraseFromParent();
return NewCI;
}
bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
AtomicOrdering SuccessOrder = CI->getSuccessOrdering();
AtomicOrdering FailureOrder = CI->getFailureOrdering();

View File

@ -2952,8 +2952,9 @@ void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
PointerType *PTy = dyn_cast<PointerType>(CXI.getOperand(0)->getType());
Assert(PTy, "First cmpxchg operand must be a pointer.", &CXI);
Type *ElTy = PTy->getElementType();
Assert(ElTy->isIntegerTy(), "cmpxchg operand must have integer type!", &CXI,
ElTy);
Assert(ElTy->isIntegerTy() || ElTy->isPointerTy(),
"cmpxchg operand must have integer or pointer type",
ElTy, &CXI);
checkAtomicMemAccessSize(M, ElTy, &CXI);
Assert(ElTy == CXI.getOperand(1)->getType(),
"Expected value type does not match pointer operand type!", &CXI,

View File

@ -80,3 +80,88 @@ define void @float_store_expand_addr1(float addrspace(1)* %ptr, float %v) {
ret void
}
define void @pointer_cmpxchg_expand(i8** %ptr, i8* %v) {
; CHECK-LABEL: @pointer_cmpxchg_expand
; CHECK: %1 = bitcast i8** %ptr to i64*
; CHECK: %2 = ptrtoint i8* %v to i64
; CHECK: %3 = cmpxchg i64* %1, i64 0, i64 %2 seq_cst monotonic
; CHECK: %4 = extractvalue { i64, i1 } %3, 0
; CHECK: %5 = extractvalue { i64, i1 } %3, 1
; CHECK: %6 = inttoptr i64 %4 to i8*
; CHECK: %7 = insertvalue { i8*, i1 } undef, i8* %6, 0
; CHECK: %8 = insertvalue { i8*, i1 } %7, i1 %5, 1
cmpxchg i8** %ptr, i8* null, i8* %v seq_cst monotonic
ret void
}
define void @pointer_cmpxchg_expand2(i8** %ptr, i8* %v) {
; CHECK-LABEL: @pointer_cmpxchg_expand2
; CHECK: %1 = bitcast i8** %ptr to i64*
; CHECK: %2 = ptrtoint i8* %v to i64
; CHECK: %3 = cmpxchg i64* %1, i64 0, i64 %2 release monotonic
; CHECK: %4 = extractvalue { i64, i1 } %3, 0
; CHECK: %5 = extractvalue { i64, i1 } %3, 1
; CHECK: %6 = inttoptr i64 %4 to i8*
; CHECK: %7 = insertvalue { i8*, i1 } undef, i8* %6, 0
; CHECK: %8 = insertvalue { i8*, i1 } %7, i1 %5, 1
cmpxchg i8** %ptr, i8* null, i8* %v release monotonic
ret void
}
define void @pointer_cmpxchg_expand3(i8** %ptr, i8* %v) {
; CHECK-LABEL: @pointer_cmpxchg_expand3
; CHECK: %1 = bitcast i8** %ptr to i64*
; CHECK: %2 = ptrtoint i8* %v to i64
; CHECK: %3 = cmpxchg i64* %1, i64 0, i64 %2 seq_cst seq_cst
; CHECK: %4 = extractvalue { i64, i1 } %3, 0
; CHECK: %5 = extractvalue { i64, i1 } %3, 1
; CHECK: %6 = inttoptr i64 %4 to i8*
; CHECK: %7 = insertvalue { i8*, i1 } undef, i8* %6, 0
; CHECK: %8 = insertvalue { i8*, i1 } %7, i1 %5, 1
cmpxchg i8** %ptr, i8* null, i8* %v seq_cst seq_cst
ret void
}
define void @pointer_cmpxchg_expand4(i8** %ptr, i8* %v) {
; CHECK-LABEL: @pointer_cmpxchg_expand4
; CHECK: %1 = bitcast i8** %ptr to i64*
; CHECK: %2 = ptrtoint i8* %v to i64
; CHECK: %3 = cmpxchg weak i64* %1, i64 0, i64 %2 seq_cst seq_cst
; CHECK: %4 = extractvalue { i64, i1 } %3, 0
; CHECK: %5 = extractvalue { i64, i1 } %3, 1
; CHECK: %6 = inttoptr i64 %4 to i8*
; CHECK: %7 = insertvalue { i8*, i1 } undef, i8* %6, 0
; CHECK: %8 = insertvalue { i8*, i1 } %7, i1 %5, 1
cmpxchg weak i8** %ptr, i8* null, i8* %v seq_cst seq_cst
ret void
}
define void @pointer_cmpxchg_expand5(i8** %ptr, i8* %v) {
; CHECK-LABEL: @pointer_cmpxchg_expand5
; CHECK: %1 = bitcast i8** %ptr to i64*
; CHECK: %2 = ptrtoint i8* %v to i64
; CHECK: %3 = cmpxchg volatile i64* %1, i64 0, i64 %2 seq_cst seq_cst
; CHECK: %4 = extractvalue { i64, i1 } %3, 0
; CHECK: %5 = extractvalue { i64, i1 } %3, 1
; CHECK: %6 = inttoptr i64 %4 to i8*
; CHECK: %7 = insertvalue { i8*, i1 } undef, i8* %6, 0
; CHECK: %8 = insertvalue { i8*, i1 } %7, i1 %5, 1
cmpxchg volatile i8** %ptr, i8* null, i8* %v seq_cst seq_cst
ret void
}
define void @pointer_cmpxchg_expand6(i8 addrspace(2)* addrspace(1)* %ptr,
i8 addrspace(2)* %v) {
; CHECK-LABEL: @pointer_cmpxchg_expand6
; CHECK: %1 = bitcast i8 addrspace(2)* addrspace(1)* %ptr to i64 addrspace(1)*
; CHECK: %2 = ptrtoint i8 addrspace(2)* %v to i64
; CHECK: %3 = cmpxchg i64 addrspace(1)* %1, i64 0, i64 %2 seq_cst seq_cst
; CHECK: %4 = extractvalue { i64, i1 } %3, 0
; CHECK: %5 = extractvalue { i64, i1 } %3, 1
; CHECK: %6 = inttoptr i64 %4 to i8 addrspace(2)*
; CHECK: %7 = insertvalue { i8 addrspace(2)*, i1 } undef, i8 addrspace(2)* %6, 0
; CHECK: %8 = insertvalue { i8 addrspace(2)*, i1 } %7, i1 %5, 1
cmpxchg i8 addrspace(2)* addrspace(1)* %ptr, i8 addrspace(2)* null, i8 addrspace(2)* %v seq_cst seq_cst
ret void
}