1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 12:41:49 +01:00

[WebAssembly] Support for atomic.wait / atomic.wake instructions

Summary:
This adds support for atomic.wait / atomic.wake instructions in the wasm
thread proposal.

Reviewers: dschuff

Subscribers: sbc100, jgravelle-google, sunfish, llvm-commits

Differential Revision: https://reviews.llvm.org/D49395

llvm-svn: 338770
This commit is contained in:
Heejin Ahn 2018-08-02 21:44:24 +00:00
parent 67370a2e75
commit 855cdeda11
7 changed files with 467 additions and 0 deletions

View File

@ -64,4 +64,27 @@ def int_wasm_landingpad_index: Intrinsic<[], [llvm_i32_ty], [IntrNoMem]>;
// Returns LSDA address of the current function.
def int_wasm_lsda : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
//===----------------------------------------------------------------------===//
// Atomic intrinsics
//===----------------------------------------------------------------------===//
// wait / notify
def int_wasm_atomic_wait_i32 :
Intrinsic<[llvm_i32_ty],
[LLVMPointerType<llvm_i32_ty>, llvm_i32_ty, llvm_i64_ty],
[IntrInaccessibleMemOrArgMemOnly, ReadOnly<0>, NoCapture<0>,
IntrHasSideEffects],
"", [SDNPMemOperand]>;
def int_wasm_atomic_wait_i64 :
Intrinsic<[llvm_i32_ty],
[LLVMPointerType<llvm_i64_ty>, llvm_i64_ty, llvm_i64_ty],
[IntrInaccessibleMemOrArgMemOnly, ReadOnly<0>, NoCapture<0>,
IntrHasSideEffects],
"", [SDNPMemOperand]>;
def int_wasm_atomic_notify:
Intrinsic<[llvm_i64_ty], [LLVMPointerType<llvm_i32_ty>, llvm_i64_ty],
[IntrInaccessibleMemOnly, NoCapture<0>, IntrHasSideEffects], "",
[SDNPMemOperand]>;
}

View File

@ -253,6 +253,10 @@ inline unsigned GetDefaultP2Align(unsigned Opcode) {
case WebAssembly::ATOMIC_RMW_CMPXCHG_I32_S:
case WebAssembly::ATOMIC_RMW32_U_CMPXCHG_I64:
case WebAssembly::ATOMIC_RMW32_U_CMPXCHG_I64_S:
case WebAssembly::ATOMIC_NOTIFY:
case WebAssembly::ATOMIC_NOTIFY_S:
case WebAssembly::ATOMIC_WAIT_I32:
case WebAssembly::ATOMIC_WAIT_I32_S:
return 2;
case WebAssembly::LOAD_I64:
case WebAssembly::LOAD_I64_S:
@ -280,6 +284,8 @@ inline unsigned GetDefaultP2Align(unsigned Opcode) {
case WebAssembly::ATOMIC_RMW_XCHG_I64_S:
case WebAssembly::ATOMIC_RMW_CMPXCHG_I64:
case WebAssembly::ATOMIC_RMW_CMPXCHG_I64_S:
case WebAssembly::ATOMIC_WAIT_I64:
case WebAssembly::ATOMIC_WAIT_I64_S:
return 3;
default:
llvm_unreachable("Only loads and stores have p2align values");

View File

@ -438,6 +438,46 @@ EVT WebAssemblyTargetLowering::getSetCCResultType(const DataLayout &DL,
return TargetLowering::getSetCCResultType(DL, C, VT);
}
bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
const CallInst &I,
MachineFunction &MF,
unsigned Intrinsic) const {
switch (Intrinsic) {
case Intrinsic::wasm_atomic_notify:
Info.opc = ISD::INTRINSIC_W_CHAIN;
Info.memVT = MVT::i32;
Info.ptrVal = I.getArgOperand(0);
Info.offset = 0;
Info.align = 4;
// atomic.notify instruction does not really load the memory specified with
// this argument, but MachineMemOperand should either be load or store, so
// we set this to a load.
// FIXME Volatile isn't really correct, but currently all LLVM atomic
// instructions are treated as volatiles in the backend, so we should be
// consistent. The same applies for wasm_atomic_wait intrinsics too.
Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
return true;
case Intrinsic::wasm_atomic_wait_i32:
Info.opc = ISD::INTRINSIC_W_CHAIN;
Info.memVT = MVT::i32;
Info.ptrVal = I.getArgOperand(0);
Info.offset = 0;
Info.align = 4;
Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
return true;
case Intrinsic::wasm_atomic_wait_i64:
Info.opc = ISD::INTRINSIC_W_CHAIN;
Info.memVT = MVT::i64;
Info.ptrVal = I.getArgOperand(0);
Info.offset = 0;
Info.align = 8;
Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
return true;
default:
return false;
}
}
//===----------------------------------------------------------------------===//
// WebAssembly Lowering private implementation.
//===----------------------------------------------------------------------===//

View File

@ -66,6 +66,9 @@ class WebAssemblyTargetLowering final : public TargetLowering {
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
EVT VT) const override;
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
MachineFunction &MF,
unsigned Intrinsic) const override;
SDValue LowerCall(CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const override;

View File

@ -897,4 +897,130 @@ defm : TerRMWTruncExtPattern<
ATOMIC_RMW8_U_CMPXCHG_I32, ATOMIC_RMW16_U_CMPXCHG_I32,
ATOMIC_RMW8_U_CMPXCHG_I64, ATOMIC_RMW16_U_CMPXCHG_I64,
ATOMIC_RMW32_U_CMPXCHG_I64>;
}
//===----------------------------------------------------------------------===//
// Atomic wait / notify
//===----------------------------------------------------------------------===//
let Defs = [ARGUMENTS] in {
let hasSideEffects = 1 in {
defm ATOMIC_NOTIFY :
I<(outs I64:$dst),
(ins P2Align:$p2align, offset32_op:$off, I32:$addr, I64:$count),
(outs), (ins P2Align:$p2align, offset32_op:$off), [],
"atomic.notify \t$dst, ${off}(${addr})${p2align}, $count",
"atomic.notify \t${off}, ${p2align}", 0xfe00>;
let mayLoad = 1 in {
defm ATOMIC_WAIT_I32 :
I<(outs I32:$dst),
(ins P2Align:$p2align, offset32_op:$off, I32:$addr, I32:$exp, I64:$timeout),
(outs), (ins P2Align:$p2align, offset32_op:$off), [],
"i32.atomic.wait \t$dst, ${off}(${addr})${p2align}, $exp, $timeout",
"i32.atomic.wait \t${off}, ${p2align}", 0xfe01>;
defm ATOMIC_WAIT_I64 :
I<(outs I32:$dst),
(ins P2Align:$p2align, offset32_op:$off, I32:$addr, I64:$exp, I64:$timeout),
(outs), (ins P2Align:$p2align, offset32_op:$off), [],
"i64.atomic.wait \t$dst, ${off}(${addr})${p2align}, $exp, $timeout",
"i64.atomic.wait \t${off}, ${p2align}", 0xfe02>;
} // mayLoad = 1
} // hasSideEffects = 1
} // Defs = [ARGUMENTS]
let Predicates = [HasAtomics] in {
// Select notifys with no constant offset.
class NotifyPatNoOffset<Intrinsic kind> :
Pat<(i64 (kind I32:$addr, I64:$count)),
(ATOMIC_NOTIFY 0, 0, I32:$addr, I64:$count)>;
def : NotifyPatNoOffset<int_wasm_atomic_notify>;
// Select notifys with a constant offset.
// Pattern with address + immediate offset
class NotifyPatImmOff<Intrinsic kind, PatFrag operand> :
Pat<(i64 (kind (operand I32:$addr, imm:$off), I64:$count)),
(ATOMIC_NOTIFY 0, imm:$off, I32:$addr, I64:$count)>;
def : NotifyPatImmOff<int_wasm_atomic_notify, regPlusImm>;
def : NotifyPatImmOff<int_wasm_atomic_notify, or_is_add>;
class NotifyPatGlobalAddr<Intrinsic kind> :
Pat<(i64 (kind (regPlusGA I32:$addr, (WebAssemblywrapper tglobaladdr:$off)),
I64:$count)),
(ATOMIC_NOTIFY 0, tglobaladdr:$off, I32:$addr, I64:$count)>;
def : NotifyPatGlobalAddr<int_wasm_atomic_notify>;
class NotifyPatExternalSym<Intrinsic kind> :
Pat<(i64 (kind (add I32:$addr, (WebAssemblywrapper texternalsym:$off)),
I64:$count)),
(ATOMIC_NOTIFY 0, texternalsym:$off, I32:$addr, I64:$count)>;
def : NotifyPatExternalSym<int_wasm_atomic_notify>;
// Select notifys with just a constant offset.
class NotifyPatOffsetOnly<Intrinsic kind> :
Pat<(i64 (kind imm:$off, I64:$count)),
(ATOMIC_NOTIFY 0, imm:$off, (CONST_I32 0), I64:$count)>;
def : NotifyPatOffsetOnly<int_wasm_atomic_notify>;
class NotifyPatGlobalAddrOffOnly<Intrinsic kind> :
Pat<(i64 (kind (WebAssemblywrapper tglobaladdr:$off), I64:$count)),
(ATOMIC_NOTIFY 0, tglobaladdr:$off, (CONST_I32 0), I64:$count)>;
def : NotifyPatGlobalAddrOffOnly<int_wasm_atomic_notify>;
class NotifyPatExternSymOffOnly<Intrinsic kind> :
Pat<(i64 (kind (WebAssemblywrapper texternalsym:$off), I64:$count)),
(ATOMIC_NOTIFY 0, texternalsym:$off, (CONST_I32 0), I64:$count)>;
def : NotifyPatExternSymOffOnly<int_wasm_atomic_notify>;
// Select waits with no constant offset.
class WaitPatNoOffset<ValueType ty, Intrinsic kind, NI inst> :
Pat<(i32 (kind I32:$addr, ty:$exp, I64:$timeout)),
(inst 0, 0, I32:$addr, ty:$exp, I64:$timeout)>;
def : WaitPatNoOffset<i32, int_wasm_atomic_wait_i32, ATOMIC_WAIT_I32>;
def : WaitPatNoOffset<i64, int_wasm_atomic_wait_i64, ATOMIC_WAIT_I64>;
// Select waits with a constant offset.
// Pattern with address + immediate offset
class WaitPatImmOff<ValueType ty, Intrinsic kind, PatFrag operand, NI inst> :
Pat<(i32 (kind (operand I32:$addr, imm:$off), ty:$exp, I64:$timeout)),
(inst 0, imm:$off, I32:$addr, ty:$exp, I64:$timeout)>;
def : WaitPatImmOff<i32, int_wasm_atomic_wait_i32, regPlusImm, ATOMIC_WAIT_I32>;
def : WaitPatImmOff<i32, int_wasm_atomic_wait_i32, or_is_add, ATOMIC_WAIT_I32>;
def : WaitPatImmOff<i64, int_wasm_atomic_wait_i64, regPlusImm, ATOMIC_WAIT_I64>;
def : WaitPatImmOff<i64, int_wasm_atomic_wait_i64, or_is_add, ATOMIC_WAIT_I64>;
class WaitPatGlobalAddr<ValueType ty, Intrinsic kind, NI inst> :
Pat<(i32 (kind (regPlusGA I32:$addr, (WebAssemblywrapper tglobaladdr:$off)),
ty:$exp, I64:$timeout)),
(inst 0, tglobaladdr:$off, I32:$addr, ty:$exp, I64:$timeout)>;
def : WaitPatGlobalAddr<i32, int_wasm_atomic_wait_i32, ATOMIC_WAIT_I32>;
def : WaitPatGlobalAddr<i64, int_wasm_atomic_wait_i64, ATOMIC_WAIT_I64>;
class WaitPatExternalSym<ValueType ty, Intrinsic kind, NI inst> :
Pat<(i32 (kind (add I32:$addr, (WebAssemblywrapper texternalsym:$off)),
ty:$exp, I64:$timeout)),
(inst 0, texternalsym:$off, I32:$addr, ty:$exp, I64:$timeout)>;
def : WaitPatExternalSym<i32, int_wasm_atomic_wait_i32, ATOMIC_WAIT_I32>;
def : WaitPatExternalSym<i64, int_wasm_atomic_wait_i64, ATOMIC_WAIT_I64>;
// Select wait_i32, ATOMIC_WAIT_I32s with just a constant offset.
class WaitPatOffsetOnly<ValueType ty, Intrinsic kind, NI inst> :
Pat<(i32 (kind imm:$off, ty:$exp, I64:$timeout)),
(inst 0, imm:$off, (CONST_I32 0), ty:$exp, I64:$timeout)>;
def : WaitPatOffsetOnly<i32, int_wasm_atomic_wait_i32, ATOMIC_WAIT_I32>;
def : WaitPatOffsetOnly<i64, int_wasm_atomic_wait_i64, ATOMIC_WAIT_I64>;
class WaitPatGlobalAddrOffOnly<ValueType ty, Intrinsic kind, NI inst> :
Pat<(i32 (kind (WebAssemblywrapper tglobaladdr:$off), ty:$exp, I64:$timeout)),
(inst 0, tglobaladdr:$off, (CONST_I32 0), ty:$exp, I64:$timeout)>;
def : WaitPatGlobalAddrOffOnly<i32, int_wasm_atomic_wait_i32, ATOMIC_WAIT_I32>;
def : WaitPatGlobalAddrOffOnly<i64, int_wasm_atomic_wait_i64, ATOMIC_WAIT_I64>;
class WaitPatExternSymOffOnly<ValueType ty, Intrinsic kind, NI inst> :
Pat<(i32 (kind (WebAssemblywrapper texternalsym:$off), ty:$exp,
I64:$timeout)),
(inst 0, texternalsym:$off, (CONST_I32 0), ty:$exp, I64:$timeout)>;
def : WaitPatExternSymOffOnly<i32, int_wasm_atomic_wait_i32, ATOMIC_WAIT_I32>;
def : WaitPatExternSymOffOnly<i64, int_wasm_atomic_wait_i64, ATOMIC_WAIT_I64>;
} // Predicates = [HasAtomics]

View File

@ -156,6 +156,9 @@ bool WebAssemblySetP2AlignOperands::runOnMachineFunction(MachineFunction &MF) {
case WebAssembly::ATOMIC_RMW_XOR_I64:
case WebAssembly::ATOMIC_RMW_XCHG_I64:
case WebAssembly::ATOMIC_RMW_CMPXCHG_I64:
case WebAssembly::ATOMIC_NOTIFY:
case WebAssembly::ATOMIC_WAIT_I32:
case WebAssembly::ATOMIC_WAIT_I64:
RewriteP2Align(MI, WebAssembly::LoadP2AlignOperandNo);
break;
case WebAssembly::STORE_I32:

View File

@ -1525,3 +1525,269 @@ define i32 @cmpxchg_i8_i32_z_from_global_address(i32 %exp, i32 %new) {
%u = zext i8 %old to i32
ret i32 %u
}
;===----------------------------------------------------------------------------
; Waits: 32-bit
;===----------------------------------------------------------------------------
declare i32 @llvm.wasm.atomic.wait.i32(i32*, i32, i64)
; Basic wait.
; CHECK-LABEL: wait_i32_no_offset:
; CHECK: i32.atomic.wait $push0=, 0($0), $1, $2{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i32 @wait_i32_no_offset(i32* %p, i32 %exp, i64 %timeout) {
%v = call i32 @llvm.wasm.atomic.wait.i32(i32* %p, i32 %exp, i64 %timeout)
ret i32 %v
}
; With an nuw add, we can fold an offset.
; CHECK-LABEL: wait_i32_with_folded_offset:
; CHECK: i32.atomic.wait $push0=, 24($0), $1, $2{{$}}
define i32 @wait_i32_with_folded_offset(i32* %p, i32 %exp, i64 %timeout) {
%q = ptrtoint i32* %p to i32
%r = add nuw i32 %q, 24
%s = inttoptr i32 %r to i32*
%t = call i32 @llvm.wasm.atomic.wait.i32(i32* %s, i32 %exp, i64 %timeout)
ret i32 %t
}
; With an inbounds gep, we can fold an offset.
; CHECK-LABEL: wait_i32_with_folded_gep_offset:
; CHECK: i32.atomic.wait $push0=, 24($0), $1, $2{{$}}
define i32 @wait_i32_with_folded_gep_offset(i32* %p, i32 %exp, i64 %timeout) {
%s = getelementptr inbounds i32, i32* %p, i32 6
%t = call i32 @llvm.wasm.atomic.wait.i32(i32* %s, i32 %exp, i64 %timeout)
ret i32 %t
}
; We can't fold a negative offset though, even with an inbounds gep.
; CHECK-LABEL: wait_i32_with_unfolded_gep_negative_offset:
; CHECK: i32.const $push0=, -24{{$}}
; CHECK: i32.add $push1=, $0, $pop0{{$}}
; CHECK: i32.atomic.wait $push2=, 0($pop1), $1, $2{{$}}
define i32 @wait_i32_with_unfolded_gep_negative_offset(i32* %p, i32 %exp, i64 %timeout) {
%s = getelementptr inbounds i32, i32* %p, i32 -6
%t = call i32 @llvm.wasm.atomic.wait.i32(i32* %s, i32 %exp, i64 %timeout)
ret i32 %t
}
; Without nuw, and even with nsw, we can't fold an offset.
; CHECK-LABEL: wait_i32_with_unfolded_offset:
; CHECK: i32.const $push0=, 24{{$}}
; CHECK: i32.add $push1=, $0, $pop0{{$}}
; CHECK: i32.atomic.wait $push2=, 0($pop1), $1, $2{{$}}
define i32 @wait_i32_with_unfolded_offset(i32* %p, i32 %exp, i64 %timeout) {
%q = ptrtoint i32* %p to i32
%r = add nsw i32 %q, 24
%s = inttoptr i32 %r to i32*
%t = call i32 @llvm.wasm.atomic.wait.i32(i32* %s, i32 %exp, i64 %timeout)
ret i32 %t
}
; Without inbounds, we can't fold a gep offset.
; CHECK-LABEL: wait_i32_with_unfolded_gep_offset:
; CHECK: i32.const $push0=, 24{{$}}
; CHECK: i32.add $push1=, $0, $pop0{{$}}
; CHECK: i32.atomic.wait $push2=, 0($pop1), $1, $2{{$}}
define i32 @wait_i32_with_unfolded_gep_offset(i32* %p, i32 %exp, i64 %timeout) {
%s = getelementptr i32, i32* %p, i32 6
%t = call i32 @llvm.wasm.atomic.wait.i32(i32* %s, i32 %exp, i64 %timeout)
ret i32 %t
}
; When waiting from a fixed address, materialize a zero.
; CHECK-LABEL: wait_i32_from_numeric_address
; CHECK: i32.const $push0=, 0{{$}}
; CHECK: i32.atomic.wait $push1=, 42($pop0), $0, $1{{$}}
define i32 @wait_i32_from_numeric_address(i32 %exp, i64 %timeout) {
%s = inttoptr i32 42 to i32*
%t = call i32 @llvm.wasm.atomic.wait.i32(i32* %s, i32 %exp, i64 %timeout)
ret i32 %t
}
; CHECK-LABEL: wait_i32_from_global_address
; CHECK: i32.const $push0=, 0{{$}}
; CHECK: i32.atomic.wait $push1=, gv($pop0), $0, $1{{$}}
define i32 @wait_i32_from_global_address(i32 %exp, i64 %timeout) {
%t = call i32 @llvm.wasm.atomic.wait.i32(i32* @gv, i32 %exp, i64 %timeout)
ret i32 %t
}
;===----------------------------------------------------------------------------
; Waits: 64-bit
;===----------------------------------------------------------------------------
declare i32 @llvm.wasm.atomic.wait.i64(i64*, i64, i64)
; Basic wait.
; CHECK-LABEL: wait_i64_no_offset:
; CHECK: i64.atomic.wait $push0=, 0($0), $1, $2{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i32 @wait_i64_no_offset(i64* %p, i64 %exp, i64 %timeout) {
%v = call i32 @llvm.wasm.atomic.wait.i64(i64* %p, i64 %exp, i64 %timeout)
ret i32 %v
}
; With an nuw add, we can fold an offset.
; CHECK-LABEL: wait_i64_with_folded_offset:
; CHECK: i64.atomic.wait $push0=, 24($0), $1, $2{{$}}
define i32 @wait_i64_with_folded_offset(i64* %p, i64 %exp, i64 %timeout) {
%q = ptrtoint i64* %p to i32
%r = add nuw i32 %q, 24
%s = inttoptr i32 %r to i64*
%t = call i32 @llvm.wasm.atomic.wait.i64(i64* %s, i64 %exp, i64 %timeout)
ret i32 %t
}
; With an inbounds gep, we can fold an offset.
; CHECK-LABEL: wait_i64_with_folded_gep_offset:
; CHECK: i64.atomic.wait $push0=, 24($0), $1, $2{{$}}
define i32 @wait_i64_with_folded_gep_offset(i64* %p, i64 %exp, i64 %timeout) {
%s = getelementptr inbounds i64, i64* %p, i32 3
%t = call i32 @llvm.wasm.atomic.wait.i64(i64* %s, i64 %exp, i64 %timeout)
ret i32 %t
}
; We can't fold a negative offset though, even with an inbounds gep.
; CHECK-LABEL: wait_i64_with_unfolded_gep_negative_offset:
; CHECK: i32.const $push0=, -24{{$}}
; CHECK: i32.add $push1=, $0, $pop0{{$}}
; CHECK: i64.atomic.wait $push2=, 0($pop1), $1, $2{{$}}
define i32 @wait_i64_with_unfolded_gep_negative_offset(i64* %p, i64 %exp, i64 %timeout) {
%s = getelementptr inbounds i64, i64* %p, i32 -3
%t = call i32 @llvm.wasm.atomic.wait.i64(i64* %s, i64 %exp, i64 %timeout)
ret i32 %t
}
; Without nuw, and even with nsw, we can't fold an offset.
; CHECK-LABEL: wait_i64_with_unfolded_offset:
; CHECK: i32.const $push0=, 24{{$}}
; CHECK: i32.add $push1=, $0, $pop0{{$}}
; CHECK: i64.atomic.wait $push2=, 0($pop1), $1, $2{{$}}
define i32 @wait_i64_with_unfolded_offset(i64* %p, i64 %exp, i64 %timeout) {
%q = ptrtoint i64* %p to i32
%r = add nsw i32 %q, 24
%s = inttoptr i32 %r to i64*
%t = call i32 @llvm.wasm.atomic.wait.i64(i64* %s, i64 %exp, i64 %timeout)
ret i32 %t
}
; Without inbounds, we can't fold a gep offset.
; CHECK-LABEL: wait_i64_with_unfolded_gep_offset:
; CHECK: i32.const $push0=, 24{{$}}
; CHECK: i32.add $push1=, $0, $pop0{{$}}
; CHECK: i64.atomic.wait $push2=, 0($pop1), $1, $2{{$}}
define i32 @wait_i64_with_unfolded_gep_offset(i64* %p, i64 %exp, i64 %timeout) {
%s = getelementptr i64, i64* %p, i32 3
%t = call i32 @llvm.wasm.atomic.wait.i64(i64* %s, i64 %exp, i64 %timeout)
ret i32 %t
}
;===----------------------------------------------------------------------------
; Notifies
;===----------------------------------------------------------------------------
declare i64 @llvm.wasm.atomic.notify(i32*, i64)
; Basic notify.
; CHECK-LABEL: notify_no_offset:
; CHECK: atomic.notify $push0=, 0($0), $1{{$}}
; CHECK-NEXT: return $pop0{{$}}
define i64 @notify_no_offset(i32* %p, i64 %notify_count) {
%v = call i64 @llvm.wasm.atomic.notify(i32* %p, i64 %notify_count)
ret i64 %v
}
; With an nuw add, we can fold an offset.
; CHECK-LABEL: notify_with_folded_offset:
; CHECK: atomic.notify $push0=, 24($0), $1{{$}}
define i64 @notify_with_folded_offset(i32* %p, i64 %notify_count) {
%q = ptrtoint i32* %p to i32
%r = add nuw i32 %q, 24
%s = inttoptr i32 %r to i32*
%t = call i64 @llvm.wasm.atomic.notify(i32* %s, i64 %notify_count)
ret i64 %t
}
; With an inbounds gep, we can fold an offset.
; CHECK-LABEL: notify_with_folded_gep_offset:
; CHECK: atomic.notify $push0=, 24($0), $1{{$}}
define i64 @notify_with_folded_gep_offset(i32* %p, i64 %notify_count) {
%s = getelementptr inbounds i32, i32* %p, i32 6
%t = call i64 @llvm.wasm.atomic.notify(i32* %s, i64 %notify_count)
ret i64 %t
}
; We can't fold a negative offset though, even with an inbounds gep.
; CHECK-LABEL: notify_with_unfolded_gep_negative_offset:
; CHECK: i32.const $push0=, -24{{$}}
; CHECK: i32.add $push1=, $0, $pop0{{$}}
; CHECK: atomic.notify $push2=, 0($pop1), $1{{$}}
define i64 @notify_with_unfolded_gep_negative_offset(i32* %p, i64 %notify_count) {
%s = getelementptr inbounds i32, i32* %p, i32 -6
%t = call i64 @llvm.wasm.atomic.notify(i32* %s, i64 %notify_count)
ret i64 %t
}
; Without nuw, and even with nsw, we can't fold an offset.
; CHECK-LABEL: notify_with_unfolded_offset:
; CHECK: i32.const $push0=, 24{{$}}
; CHECK: i32.add $push1=, $0, $pop0{{$}}
; CHECK: atomic.notify $push2=, 0($pop1), $1{{$}}
define i64 @notify_with_unfolded_offset(i32* %p, i64 %notify_count) {
%q = ptrtoint i32* %p to i32
%r = add nsw i32 %q, 24
%s = inttoptr i32 %r to i32*
%t = call i64 @llvm.wasm.atomic.notify(i32* %s, i64 %notify_count)
ret i64 %t
}
; Without inbounds, we can't fold a gep offset.
; CHECK-LABEL: notify_with_unfolded_gep_offset:
; CHECK: i32.const $push0=, 24{{$}}
; CHECK: i32.add $push1=, $0, $pop0{{$}}
; CHECK: atomic.notify $push2=, 0($pop1), $1{{$}}
define i64 @notify_with_unfolded_gep_offset(i32* %p, i64 %notify_count) {
%s = getelementptr i32, i32* %p, i32 6
%t = call i64 @llvm.wasm.atomic.notify(i32* %s, i64 %notify_count)
ret i64 %t
}
; When notifying from a fixed address, materialize a zero.
; CHECK-LABEL: notify_from_numeric_address
; CHECK: i32.const $push0=, 0{{$}}
; CHECK: atomic.notify $push1=, 42($pop0), $0{{$}}
define i64 @notify_from_numeric_address(i64 %notify_count) {
%s = inttoptr i32 42 to i32*
%t = call i64 @llvm.wasm.atomic.notify(i32* %s, i64 %notify_count)
ret i64 %t
}
; CHECK-LABEL: notify_from_global_address
; CHECK: i32.const $push0=, 0{{$}}
; CHECK: atomic.notify $push1=, gv($pop0), $0{{$}}
define i64 @notify_from_global_address(i64 %notify_count) {
%t = call i64 @llvm.wasm.atomic.notify(i32* @gv, i64 %notify_count)
ret i64 %t
}