1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 10:42:39 +01:00

Reapply "IR: Add fp operations to atomicrmw"

This reapplies commits r351778 and r351782 with
RISCV test fixes.

llvm-svn: 351850
This commit is contained in:
Matt Arsenault 2019-01-22 18:18:02 +00:00
parent 5462c341de
commit fd94cae16d
26 changed files with 407 additions and 22 deletions

View File

@ -8690,15 +8690,18 @@ operation. The operation must be one of the following keywords:
- min
- umax
- umin
- fadd
- fsub
For most of these operations, the type of '<value>' must be an integer
type whose bit width is a power of two greater than or equal to eight
and less than or equal to a target-specific size limit. For xchg, this
may also be a floating point type with the same size constraints as
integers. The type of the '``<pointer>``' operand must be a pointer to
that type. If the ``atomicrmw`` is marked as ``volatile``, then the
optimizer is not allowed to modify the number or order of execution of
this ``atomicrmw`` with other :ref:`volatile operations <volatile>`.
integers. For fadd/fsub, this must be a floating point type. The
type of the '``<pointer>``' operand must be a pointer to that type. If
the ``atomicrmw`` is marked as ``volatile``, then the optimizer is not
allowed to modify the number or order of execution of this
``atomicrmw`` with other :ref:`volatile operations <volatile>`.
A ``atomicrmw`` instruction can also take an optional
":ref:`syncscope <syncscope>`" argument.
@ -8724,6 +8727,8 @@ operation argument:
comparison)
- umin: ``*ptr = *ptr < val ? *ptr : val`` (using an unsigned
comparison)
- fadd: ``*ptr = *ptr + val`` (using floating point arithmetic)
- fsub: ``*ptr = *ptr - val`` (using floating point arithmetic)
Example:
""""""""

View File

@ -406,7 +406,9 @@ enum RMWOperations {
RMW_MAX = 7,
RMW_MIN = 8,
RMW_UMAX = 9,
RMW_UMIN = 10
RMW_UMIN = 10,
RMW_FADD = 11,
RMW_FSUB = 12
};
/// OverflowingBinaryOperatorOptionalFlags - Flags for serializing

View File

@ -1715,8 +1715,9 @@ public:
/// Returns how the IR-level AtomicExpand pass should expand the given
/// AtomicRMW, if at all. Default is to never expand.
virtual AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const {
return AtomicExpansionKind::None;
virtual AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
return RMW->isFloatingPointOperation() ?
AtomicExpansionKind::CmpXChg : AtomicExpansionKind::None;
}
/// On some platforms, an AtomicRMW that never actually modifies the value

View File

@ -724,8 +724,14 @@ public:
/// *p = old <unsigned v ? old : v
UMin,
/// *p = old + v
FAdd,
/// *p = old - v
FSub,
FIRST_BINOP = Xchg,
LAST_BINOP = UMin,
LAST_BINOP = FSub,
BAD_BINOP
};
@ -747,6 +753,16 @@ public:
static StringRef getOperationName(BinOp Op);
static bool isFPOperation(BinOp Op) {
switch (Op) {
case AtomicRMWInst::FAdd:
case AtomicRMWInst::FSub:
return true;
default:
return false;
}
}
void setOperation(BinOp Operation) {
unsigned short SubclassData = getSubclassDataFromInstruction();
setInstructionSubclassData((SubclassData & 31) |
@ -804,6 +820,10 @@ public:
return getPointerOperand()->getType()->getPointerAddressSpace();
}
bool isFloatingPointOperation() const {
return isFPOperation(getOperation());
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::AtomicRMW;

View File

@ -6815,6 +6815,7 @@ int LLParser::ParseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS) {
AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
SyncScope::ID SSID = SyncScope::System;
bool isVolatile = false;
bool IsFP = false;
AtomicRMWInst::BinOp Operation;
if (EatIfPresent(lltok::kw_volatile))
@ -6833,6 +6834,14 @@ int LLParser::ParseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS) {
case lltok::kw_min: Operation = AtomicRMWInst::Min; break;
case lltok::kw_umax: Operation = AtomicRMWInst::UMax; break;
case lltok::kw_umin: Operation = AtomicRMWInst::UMin; break;
case lltok::kw_fadd:
Operation = AtomicRMWInst::FAdd;
IsFP = true;
break;
case lltok::kw_fsub:
Operation = AtomicRMWInst::FSub;
IsFP = true;
break;
}
Lex.Lex(); // Eat the operation.
@ -6849,18 +6858,25 @@ int LLParser::ParseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS) {
if (cast<PointerType>(Ptr->getType())->getElementType() != Val->getType())
return Error(ValLoc, "atomicrmw value and pointer type do not match");
if (Operation != AtomicRMWInst::Xchg && !Val->getType()->isIntegerTy()) {
return Error(ValLoc, "atomicrmw " +
AtomicRMWInst::getOperationName(Operation) +
" operand must be an integer");
}
if (Operation == AtomicRMWInst::Xchg &&
!Val->getType()->isIntegerTy() &&
!Val->getType()->isFloatingPointTy()) {
return Error(ValLoc, "atomicrmw " +
AtomicRMWInst::getOperationName(Operation) +
" operand must be an integer or floating point type");
if (Operation == AtomicRMWInst::Xchg) {
if (!Val->getType()->isIntegerTy() &&
!Val->getType()->isFloatingPointTy()) {
return Error(ValLoc, "atomicrmw " +
AtomicRMWInst::getOperationName(Operation) +
" operand must be an integer or floating point type");
}
} else if (IsFP) {
if (!Val->getType()->isFloatingPointTy()) {
return Error(ValLoc, "atomicrmw " +
AtomicRMWInst::getOperationName(Operation) +
" operand must be a floating point type");
}
} else {
if (!Val->getType()->isIntegerTy()) {
return Error(ValLoc, "atomicrmw " +
AtomicRMWInst::getOperationName(Operation) +
" operand must be an integer");
}
}
unsigned Size = Val->getType()->getPrimitiveSizeInBits();

View File

@ -1034,6 +1034,8 @@ static AtomicRMWInst::BinOp getDecodedRMWOperation(unsigned Val) {
case bitc::RMW_MIN: return AtomicRMWInst::Min;
case bitc::RMW_UMAX: return AtomicRMWInst::UMax;
case bitc::RMW_UMIN: return AtomicRMWInst::UMin;
case bitc::RMW_FADD: return AtomicRMWInst::FAdd;
case bitc::RMW_FSUB: return AtomicRMWInst::FSub;
}
}

View File

@ -559,6 +559,8 @@ static unsigned getEncodedRMWOperation(AtomicRMWInst::BinOp Op) {
case AtomicRMWInst::Min: return bitc::RMW_MIN;
case AtomicRMWInst::UMax: return bitc::RMW_UMAX;
case AtomicRMWInst::UMin: return bitc::RMW_UMIN;
case AtomicRMWInst::FAdd: return bitc::RMW_FADD;
case AtomicRMWInst::FSub: return bitc::RMW_FSUB;
}
}

View File

@ -549,6 +549,10 @@ static Value *performAtomicOp(AtomicRMWInst::BinOp Op, IRBuilder<> &Builder,
case AtomicRMWInst::UMin:
NewVal = Builder.CreateICmpULE(Loaded, Inc);
return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
case AtomicRMWInst::FAdd:
return Builder.CreateFAdd(Loaded, Inc, "new");
case AtomicRMWInst::FSub:
return Builder.CreateFSub(Loaded, Inc, "new");
default:
llvm_unreachable("Unknown atomic op");
}
@ -1547,6 +1551,8 @@ static ArrayRef<RTLIB::Libcall> GetRMWLibcall(AtomicRMWInst::BinOp Op) {
case AtomicRMWInst::Min:
case AtomicRMWInst::UMax:
case AtomicRMWInst::UMin:
case AtomicRMWInst::FAdd:
case AtomicRMWInst::FSub:
// No atomic libcalls are available for max/min/umax/umin.
return {};
}

View File

@ -1407,6 +1407,10 @@ StringRef AtomicRMWInst::getOperationName(BinOp Op) {
return "umax";
case AtomicRMWInst::UMin:
return "umin";
case AtomicRMWInst::FAdd:
return "fadd";
case AtomicRMWInst::FSub:
return "fsub";
case AtomicRMWInst::BAD_BINOP:
return "<invalid operation>";
}

View File

@ -3435,6 +3435,11 @@ void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
AtomicRMWInst::getOperationName(Op) +
" operand must have integer or floating point type!",
&RMWI, ElTy);
} else if (AtomicRMWInst::isFPOperation(Op)) {
Assert(ElTy->isFloatingPointTy(), "atomicrmw " +
AtomicRMWInst::getOperationName(Op) +
" operand must have floating point type!",
&RMWI, ElTy);
} else {
Assert(ElTy->isIntegerTy(), "atomicrmw " +
AtomicRMWInst::getOperationName(Op) +

View File

@ -11600,6 +11600,9 @@ AArch64TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
// For the real atomic operations, we have ldxr/stxr up to 128 bits,
TargetLowering::AtomicExpansionKind
AArch64TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
if (AI->isFloatingPointOperation())
return AtomicExpansionKind::CmpXChg;
unsigned Size = AI->getType()->getPrimitiveSizeInBits();
if (Size > 128) return AtomicExpansionKind::None;
// Nand not supported in LSE.

View File

@ -14645,6 +14645,9 @@ ARMTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
// and up to 64 bits on the non-M profiles
TargetLowering::AtomicExpansionKind
ARMTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
if (AI->isFloatingPointOperation())
return AtomicExpansionKind::CmpXChg;
unsigned Size = AI->getType()->getPrimitiveSizeInBits();
bool hasAtomicRMW = !Subtarget->isThumb() || Subtarget->hasV8MBaselineOps();
return (Size <= (Subtarget->isMClass() ? 32U : 64U) && hasAtomicRMW)

View File

@ -3110,13 +3110,21 @@ Value *HexagonTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
AtomicOrdering Ord) const {
BasicBlock *BB = Builder.GetInsertBlock();
Module *M = BB->getParent()->getParent();
Type *Ty = cast<PointerType>(Addr->getType())->getElementType();
auto PT = cast<PointerType>(Addr->getType());
Type *Ty = PT->getElementType();
unsigned SZ = Ty->getPrimitiveSizeInBits();
assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic loads supported");
Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_L2_loadw_locked
: Intrinsic::hexagon_L4_loadd_locked;
PointerType *NewPtrTy
= Builder.getIntNTy(SZ)->getPointerTo(PT->getAddressSpace());
Addr = Builder.CreateBitCast(Addr, NewPtrTy);
Value *Fn = Intrinsic::getDeclaration(M, IntID);
return Builder.CreateCall(Fn, Addr, "larx");
Value *Call = Builder.CreateCall(Fn, Addr, "larx");
return Builder.CreateBitCast(Call, Ty);
}
/// Perform a store-conditional operation to Addr. Return the status of the
@ -3127,10 +3135,17 @@ Value *HexagonTargetLowering::emitStoreConditional(IRBuilder<> &Builder,
Module *M = BB->getParent()->getParent();
Type *Ty = Val->getType();
unsigned SZ = Ty->getPrimitiveSizeInBits();
Type *CastTy = Builder.getIntNTy(SZ);
assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic stores supported");
Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_S2_storew_locked
: Intrinsic::hexagon_S4_stored_locked;
Value *Fn = Intrinsic::getDeclaration(M, IntID);
unsigned AS = Addr->getType()->getPointerAddressSpace();
Addr = Builder.CreateBitCast(Addr, CastTy->getPointerTo(AS));
Val = Builder.CreateBitCast(Val, CastTy);
Value *Call = Builder.CreateCall(Fn, {Addr, Val}, "stcx");
Value *Cmp = Builder.CreateICmpEQ(Call, Builder.getInt32(0), "");
Value *Ext = Builder.CreateZExt(Cmp, Type::getInt32Ty(M->getContext()));

View File

@ -1724,6 +1724,12 @@ Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
TargetLowering::AtomicExpansionKind
RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
// atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
// point operations can't be used in an lr/sc sequence without breaking the
// forward-progress guarantee.
if (AI->isFloatingPointOperation())
return AtomicExpansionKind::CmpXChg;
unsigned Size = AI->getType()->getPrimitiveSizeInBits();
if (Size == 8 || Size == 16)
return AtomicExpansionKind::MaskedIntrinsic;

View File

@ -39,3 +39,13 @@ define void @f(i32* %x) {
fence syncscope("device") seq_cst
ret void
}
define void @fp_atomics(float* %x) {
; CHECK: atomicrmw fadd float* %x, float 1.000000e+00 seq_cst
atomicrmw fadd float* %x, float 1.0 seq_cst
; CHECK: atomicrmw volatile fadd float* %x, float 1.000000e+00 seq_cst
atomicrmw volatile fadd float* %x, float 1.0 seq_cst
ret void
}

View File

@ -0,0 +1,7 @@
; RUN: not llvm-as -disable-output %s 2>&1 | FileCheck %s
; CHECK: error: atomicrmw fadd operand must be a floating point type
define void @f(i32* %ptr) {
atomicrmw fadd i32* %ptr, i32 2 seq_cst
ret void
}

View File

@ -0,0 +1,7 @@
; RUN: not llvm-as -disable-output %s 2>&1 | FileCheck %s
; CHECK: error: atomicrmw fsub operand must be a floating point type
define void @f(i32* %ptr) {
atomicrmw fsub i32* %ptr, i32 2 seq_cst
ret void
}

View File

@ -764,6 +764,13 @@ define void @atomics(i32* %word) {
define void @fp_atomics(float* %word) {
; CHECK: %atomicrmw.xchg = atomicrmw xchg float* %word, float 1.000000e+00 monotonic
%atomicrmw.xchg = atomicrmw xchg float* %word, float 1.0 monotonic
; CHECK: %atomicrmw.fadd = atomicrmw fadd float* %word, float 1.000000e+00 monotonic
%atomicrmw.fadd = atomicrmw fadd float* %word, float 1.0 monotonic
; CHECK: %atomicrmw.fsub = atomicrmw fsub float* %word, float 1.000000e+00 monotonic
%atomicrmw.fsub = atomicrmw fsub float* %word, float 1.0 monotonic
ret void
}

View File

@ -0,0 +1,47 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S -mtriple=aarch64-linux-gnu -atomic-expand %s | FileCheck %s
define float @test_atomicrmw_fadd_f32(float* %ptr, float %value) {
; CHECK-LABEL: @test_atomicrmw_fadd_f32(
; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[PTR:%.*]], align 4
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
; CHECK: atomicrmw.start:
; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
; CHECK-NEXT: [[NEW:%.*]] = fadd float [[LOADED]], [[VALUE:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = bitcast float* [[PTR]] to i32*
; CHECK-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32
; CHECK-NEXT: [[TMP4:%.*]] = bitcast float [[LOADED]] to i32
; CHECK-NEXT: [[TMP5:%.*]] = cmpxchg i32* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
; CHECK-NEXT: [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0
; CHECK-NEXT: [[TMP6]] = bitcast i32 [[NEWLOADED]] to float
; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
; CHECK: atomicrmw.end:
; CHECK-NEXT: ret float [[TMP6]]
;
%res = atomicrmw fadd float* %ptr, float %value seq_cst
ret float %res
}
define float @test_atomicrmw_fsub_f32(float* %ptr, float %value) {
; CHECK-LABEL: @test_atomicrmw_fsub_f32(
; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[PTR:%.*]], align 4
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
; CHECK: atomicrmw.start:
; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
; CHECK-NEXT: [[NEW:%.*]] = fsub float [[LOADED]], [[VALUE:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = bitcast float* [[PTR]] to i32*
; CHECK-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32
; CHECK-NEXT: [[TMP4:%.*]] = bitcast float [[LOADED]] to i32
; CHECK-NEXT: [[TMP5:%.*]] = cmpxchg i32* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
; CHECK-NEXT: [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0
; CHECK-NEXT: [[TMP6]] = bitcast i32 [[NEWLOADED]] to float
; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
; CHECK: atomicrmw.end:
; CHECK-NEXT: ret float [[TMP6]]
;
%res = atomicrmw fsub float* %ptr, float %value seq_cst
ret float %res
}

View File

@ -0,0 +1,51 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S -mtriple=armv7-apple-ios7.0 -atomic-expand %s | FileCheck %s
define float @test_atomicrmw_fadd_f32(float* %ptr, float %value) {
; CHECK-LABEL: @test_atomicrmw_fadd_f32(
; CHECK-NEXT: call void @llvm.arm.dmb(i32 11)
; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[PTR:%.*]], align 4
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
; CHECK: atomicrmw.start:
; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
; CHECK-NEXT: [[NEW:%.*]] = fadd float [[LOADED]], [[VALUE:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = bitcast float* [[PTR]] to i32*
; CHECK-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32
; CHECK-NEXT: [[TMP4:%.*]] = bitcast float [[LOADED]] to i32
; CHECK-NEXT: [[TMP5:%.*]] = cmpxchg i32* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] monotonic monotonic
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
; CHECK-NEXT: [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0
; CHECK-NEXT: [[TMP6]] = bitcast i32 [[NEWLOADED]] to float
; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
; CHECK: atomicrmw.end:
; CHECK-NEXT: call void @llvm.arm.dmb(i32 11)
; CHECK-NEXT: ret float [[TMP6]]
;
%res = atomicrmw fadd float* %ptr, float %value seq_cst
ret float %res
}
define float @test_atomicrmw_fsub_f32(float* %ptr, float %value) {
; CHECK-LABEL: @test_atomicrmw_fsub_f32(
; CHECK-NEXT: call void @llvm.arm.dmb(i32 11)
; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[PTR:%.*]], align 4
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
; CHECK: atomicrmw.start:
; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
; CHECK-NEXT: [[NEW:%.*]] = fsub float [[LOADED]], [[VALUE:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = bitcast float* [[PTR]] to i32*
; CHECK-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32
; CHECK-NEXT: [[TMP4:%.*]] = bitcast float [[LOADED]] to i32
; CHECK-NEXT: [[TMP5:%.*]] = cmpxchg i32* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] monotonic monotonic
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
; CHECK-NEXT: [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0
; CHECK-NEXT: [[TMP6]] = bitcast i32 [[NEWLOADED]] to float
; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
; CHECK: atomicrmw.end:
; CHECK-NEXT: call void @llvm.arm.dmb(i32 11)
; CHECK-NEXT: ret float [[TMP6]]
;
%res = atomicrmw fsub float* %ptr, float %value seq_cst
ret float %res
}

View File

@ -0,0 +1,47 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S -mtriple=hexagon-- -atomic-expand %s | FileCheck %s
define float @test_atomicrmw_fadd_f32(float* %ptr, float %value) {
; CHECK-LABEL: @test_atomicrmw_fadd_f32(
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
; CHECK: atomicrmw.start:
; CHECK-NEXT: [[TMP1:%.*]] = bitcast float* [[PTR:%.*]] to i32*
; CHECK-NEXT: [[LARX:%.*]] = call i32 @llvm.hexagon.L2.loadw.locked(i32* [[TMP1]])
; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32 [[LARX]] to float
; CHECK-NEXT: [[NEW:%.*]] = fadd float [[TMP2]], [[VALUE:%.*]]
; CHECK-NEXT: [[TMP3:%.*]] = bitcast float* [[PTR]] to i32*
; CHECK-NEXT: [[TMP4:%.*]] = bitcast float [[NEW]] to i32
; CHECK-NEXT: [[STCX:%.*]] = call i32 @llvm.hexagon.S2.storew.locked(i32* [[TMP3]], i32 [[TMP4]])
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[STCX]], 0
; CHECK-NEXT: [[TMP6:%.*]] = zext i1 [[TMP5]] to i32
; CHECK-NEXT: [[TRYAGAIN:%.*]] = icmp ne i32 [[TMP6]], 0
; CHECK-NEXT: br i1 [[TRYAGAIN]], label [[ATOMICRMW_START]], label [[ATOMICRMW_END:%.*]]
; CHECK: atomicrmw.end:
; CHECK-NEXT: ret float [[TMP2]]
;
%res = atomicrmw fadd float* %ptr, float %value seq_cst
ret float %res
}
define float @test_atomicrmw_fsub_f32(float* %ptr, float %value) {
; CHECK-LABEL: @test_atomicrmw_fsub_f32(
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
; CHECK: atomicrmw.start:
; CHECK-NEXT: [[TMP1:%.*]] = bitcast float* [[PTR:%.*]] to i32*
; CHECK-NEXT: [[LARX:%.*]] = call i32 @llvm.hexagon.L2.loadw.locked(i32* [[TMP1]])
; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32 [[LARX]] to float
; CHECK-NEXT: [[NEW:%.*]] = fsub float [[TMP2]], [[VALUE:%.*]]
; CHECK-NEXT: [[TMP3:%.*]] = bitcast float* [[PTR]] to i32*
; CHECK-NEXT: [[TMP4:%.*]] = bitcast float [[NEW]] to i32
; CHECK-NEXT: [[STCX:%.*]] = call i32 @llvm.hexagon.S2.storew.locked(i32* [[TMP3]], i32 [[TMP4]])
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[STCX]], 0
; CHECK-NEXT: [[TMP6:%.*]] = zext i1 [[TMP5]] to i32
; CHECK-NEXT: [[TRYAGAIN:%.*]] = icmp ne i32 [[TMP6]], 0
; CHECK-NEXT: br i1 [[TRYAGAIN]], label [[ATOMICRMW_START]], label [[ATOMICRMW_END:%.*]]
; CHECK: atomicrmw.end:
; CHECK-NEXT: ret float [[TMP2]]
;
%res = atomicrmw fsub float* %ptr, float %value seq_cst
ret float %res
}

View File

@ -0,0 +1,2 @@
if not 'Hexagon' in config.root.targets:
config.unsupported = True

View File

@ -0,0 +1,51 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S -mtriple=mips64-mti-linux-gnu -atomic-expand %s | FileCheck %s
define float @test_atomicrmw_fadd_f32(float* %ptr, float %value) {
; CHECK-LABEL: @test_atomicrmw_fadd_f32(
; CHECK-NEXT: fence seq_cst
; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[PTR:%.*]], align 4
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
; CHECK: atomicrmw.start:
; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
; CHECK-NEXT: [[NEW:%.*]] = fadd float [[LOADED]], [[VALUE:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = bitcast float* [[PTR]] to i32*
; CHECK-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32
; CHECK-NEXT: [[TMP4:%.*]] = bitcast float [[LOADED]] to i32
; CHECK-NEXT: [[TMP5:%.*]] = cmpxchg i32* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] monotonic monotonic
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
; CHECK-NEXT: [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0
; CHECK-NEXT: [[TMP6]] = bitcast i32 [[NEWLOADED]] to float
; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
; CHECK: atomicrmw.end:
; CHECK-NEXT: fence seq_cst
; CHECK-NEXT: ret float [[TMP6]]
;
%res = atomicrmw fadd float* %ptr, float %value seq_cst
ret float %res
}
define float @test_atomicrmw_fsub_f32(float* %ptr, float %value) {
; CHECK-LABEL: @test_atomicrmw_fsub_f32(
; CHECK-NEXT: fence seq_cst
; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[PTR:%.*]], align 4
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
; CHECK: atomicrmw.start:
; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
; CHECK-NEXT: [[NEW:%.*]] = fsub float [[LOADED]], [[VALUE:%.*]]
; CHECK-NEXT: [[TMP2:%.*]] = bitcast float* [[PTR]] to i32*
; CHECK-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32
; CHECK-NEXT: [[TMP4:%.*]] = bitcast float [[LOADED]] to i32
; CHECK-NEXT: [[TMP5:%.*]] = cmpxchg i32* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] monotonic monotonic
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
; CHECK-NEXT: [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0
; CHECK-NEXT: [[TMP6]] = bitcast i32 [[NEWLOADED]] to float
; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
; CHECK: atomicrmw.end:
; CHECK-NEXT: fence seq_cst
; CHECK-NEXT: ret float [[TMP6]]
;
%res = atomicrmw fsub float* %ptr, float %value seq_cst
ret float %res
}

View File

@ -0,0 +1,2 @@
if not 'Mips' in config.root.targets:
config.unsupported = True

View File

@ -0,0 +1,59 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S -mtriple=riscv32-- -atomic-expand %s | FileCheck %s
define float @test_atomicrmw_fadd_f32(float* %ptr, float %value) {
; CHECK-LABEL: @test_atomicrmw_fadd_f32(
; CHECK-NEXT: [[TMP1:%.*]] = alloca float, align 4
; CHECK-NEXT: [[TMP2:%.*]] = load float, float* [[PTR:%.*]], align 4
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
; CHECK: atomicrmw.start:
; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
; CHECK-NEXT: [[NEW:%.*]] = fadd float [[LOADED]], [[VALUE:%.*]]
; CHECK-NEXT: [[TMP3:%.*]] = bitcast float* [[PTR]] to i8*
; CHECK-NEXT: [[TMP4:%.*]] = bitcast float* [[TMP1]] to i8*
; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 4, i8* [[TMP4]])
; CHECK-NEXT: store float [[LOADED]], float* [[TMP1]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = bitcast float [[NEW]] to i32
; CHECK-NEXT: [[TMP6:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(i8* [[TMP3]], i8* [[TMP4]], i32 [[TMP5]], i32 5, i32 5)
; CHECK-NEXT: [[TMP7:%.*]] = load float, float* [[TMP1]], align 4
; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 4, i8* [[TMP4]])
; CHECK-NEXT: [[TMP8:%.*]] = insertvalue { float, i1 } undef, float [[TMP7]], 0
; CHECK-NEXT: [[TMP9:%.*]] = insertvalue { float, i1 } [[TMP8]], i1 [[TMP6]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { float, i1 } [[TMP9]], 1
; CHECK-NEXT: [[NEWLOADED]] = extractvalue { float, i1 } [[TMP9]], 0
; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
; CHECK: atomicrmw.end:
; CHECK-NEXT: ret float [[NEWLOADED]]
;
%res = atomicrmw fadd float* %ptr, float %value seq_cst
ret float %res
}
define float @test_atomicrmw_fsub_f32(float* %ptr, float %value) {
; CHECK-LABEL: @test_atomicrmw_fsub_f32(
; CHECK-NEXT: [[TMP1:%.*]] = alloca float, align 4
; CHECK-NEXT: [[TMP2:%.*]] = load float, float* [[PTR:%.*]], align 4
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
; CHECK: atomicrmw.start:
; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP2]], [[TMP0:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ]
; CHECK-NEXT: [[NEW:%.*]] = fsub float [[LOADED]], [[VALUE:%.*]]
; CHECK-NEXT: [[TMP3:%.*]] = bitcast float* [[PTR]] to i8*
; CHECK-NEXT: [[TMP4:%.*]] = bitcast float* [[TMP1]] to i8*
; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 4, i8* [[TMP4]])
; CHECK-NEXT: store float [[LOADED]], float* [[TMP1]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = bitcast float [[NEW]] to i32
; CHECK-NEXT: [[TMP6:%.*]] = call zeroext i1 @__atomic_compare_exchange_4(i8* [[TMP3]], i8* [[TMP4]], i32 [[TMP5]], i32 5, i32 5)
; CHECK-NEXT: [[TMP7:%.*]] = load float, float* [[TMP1]], align 4
; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 4, i8* [[TMP4]])
; CHECK-NEXT: [[TMP8:%.*]] = insertvalue { float, i1 } undef, float [[TMP7]], 0
; CHECK-NEXT: [[TMP9:%.*]] = insertvalue { float, i1 } [[TMP8]], i1 [[TMP6]], 1
; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { float, i1 } [[TMP9]], 1
; CHECK-NEXT: [[NEWLOADED]] = extractvalue { float, i1 } [[TMP9]], 0
; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
; CHECK: atomicrmw.end:
; CHECK-NEXT: ret float [[NEWLOADED]]
;
%res = atomicrmw fsub float* %ptr, float %value seq_cst
ret float %res
}

View File

@ -0,0 +1,5 @@
config.suffixes = ['.ll']
targets = set(config.root.targets_to_build.split())
if not 'RISCV' in targets:
config.unsupported = True