mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-01-31 12:41:49 +01:00
[BasicAA] Teach the analysis about atomic memcpy
Summary: A simple change to derive mod/ref info from the atomic memcpy intrinsic in the same way as from the regular memcpy intrinsic. llvm-svn: 333454
This commit is contained in:
parent
edc5a4d040
commit
6a4ca59d1b
@ -888,7 +888,7 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
|
||||
// operands, i.e., source and destination of any given memcpy must no-alias.
|
||||
// If Loc must-aliases either one of these two locations, then it necessarily
|
||||
// no-aliases the other.
|
||||
if (auto *Inst = dyn_cast<MemCpyInst>(CS.getInstruction())) {
|
||||
if (auto *Inst = dyn_cast<AnyMemCpyInst>(CS.getInstruction())) {
|
||||
AliasResult SrcAA, DestAA;
|
||||
|
||||
if ((SrcAA = getBestAAResults().alias(MemoryLocation::getForSource(Inst),
|
||||
|
@ -4,6 +4,7 @@ target triple = "arm-apple-ios"
|
||||
|
||||
declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1) #0
|
||||
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #0
|
||||
declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32) #0
|
||||
|
||||
declare void @a_readonly_func(i8*) #1
|
||||
declare void @a_writeonly_func(i8*) #2
|
||||
@ -24,6 +25,22 @@ define void @test2(i8* %P, i8* %Q) #3 {
|
||||
; CHECK: Just Mod: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i1 false)
|
||||
}
|
||||
|
||||
define void @test2_atomic(i8* %P, i8* %Q) #3 {
|
||||
tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1)
|
||||
tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1)
|
||||
ret void
|
||||
|
||||
; CHECK-LABEL: Function: test2_atomic:
|
||||
|
||||
; CHECK: MayAlias: i8* %P, i8* %Q
|
||||
; CHECK: Just Mod: Ptr: i8* %P <-> tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1)
|
||||
; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1)
|
||||
; CHECK: Just Mod: Ptr: i8* %P <-> tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1)
|
||||
; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1)
|
||||
; CHECK: Just Mod: tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1) <-> tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1)
|
||||
; CHECK: Just Mod: tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1) <-> tail call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %P, i8* align 1 %Q, i64 12, i32 1)
|
||||
}
|
||||
|
||||
define void @test2a(i8* noalias %P, i8* noalias %Q) #3 {
|
||||
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i1 false)
|
||||
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i1 false)
|
||||
|
Loading…
x
Reference in New Issue
Block a user