1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 19:23:23 +01:00
llvm-mirror/test/Bitcode/memInstructions.3.2.ll
Michael Kuperstein e6c489b605 Ensure bitcode encoding of instructions and their operands stays stable.
This includes instructions that relate to memory access (load/store/GEP), comparison instructions and calls.

Work was done by lama.saba@intel.com.

llvm-svn: 202647
2014-03-02 15:26:36 +00:00

308 lines
12 KiB
LLVM

; RUN: llvm-dis < %s.bc| FileCheck %s
; memOperations.3.2.ll.bc was generated by passing this file to llvm-as-3.2.
; The test checks that LLVM does not misread memory related instructions of
; older bitcode files.
define void @alloca(){
entry:
; CHECK: %res1 = alloca i8
%res1 = alloca i8
; CHECK-NEXT: %res2 = alloca i8, i32 2
%res2 = alloca i8, i32 2
; CHECK-NEXT: %res3 = alloca i8, i32 2, align 4
%res3 = alloca i8, i32 2, align 4
; CHECK-NEXT: %res4 = alloca i8, align 4
%res4 = alloca i8, align 4
ret void
}
define void @load(){
entry:
%ptr1 = alloca i8
store i8 2, i8* %ptr1
; CHECK: %res1 = load i8* %ptr1
%res1 = load i8* %ptr1
; CHECK-NEXT: %res2 = load volatile i8* %ptr1
%res2 = load volatile i8* %ptr1
; CHECK-NEXT: %res3 = load i8* %ptr1, align 1
%res3 = load i8* %ptr1, align 1
; CHECK-NEXT: %res4 = load volatile i8* %ptr1, align 1
%res4 = load volatile i8* %ptr1, align 1
; CHECK-NEXT: %res5 = load i8* %ptr1, !nontemporal !0
%res5 = load i8* %ptr1, !nontemporal !0
; CHECK-NEXT: %res6 = load volatile i8* %ptr1, !nontemporal !0
%res6 = load volatile i8* %ptr1, !nontemporal !0
; CHECK-NEXT: %res7 = load i8* %ptr1, align 1, !nontemporal !0
%res7 = load i8* %ptr1, align 1, !nontemporal !0
; CHECK-NEXT: %res8 = load volatile i8* %ptr1, align 1, !nontemporal !0
%res8 = load volatile i8* %ptr1, align 1, !nontemporal !0
; CHECK-NEXT: %res9 = load i8* %ptr1, !invariant.load !1
%res9 = load i8* %ptr1, !invariant.load !1
; CHECK-NEXT: %res10 = load volatile i8* %ptr1, !invariant.load !1
%res10 = load volatile i8* %ptr1, !invariant.load !1
; CHECK-NEXT: %res11 = load i8* %ptr1, align 1, !invariant.load !1
%res11 = load i8* %ptr1, align 1, !invariant.load !1
; CHECK-NEXT: %res12 = load volatile i8* %ptr1, align 1, !invariant.load !1
%res12 = load volatile i8* %ptr1, align 1, !invariant.load !1
; CHECK-NEXT: %res13 = load i8* %ptr1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
%res13 = load i8* %ptr1, !nontemporal !0, !invariant.load !1
; CHECK-NEXT: %res14 = load volatile i8* %ptr1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
%res14 = load volatile i8* %ptr1, !nontemporal !0, !invariant.load !1
; CHECK-NEXT: %res15 = load i8* %ptr1, align 1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
%res15 = load i8* %ptr1, align 1, !nontemporal !0, !invariant.load !1
; CHECK-NEXT: %res16 = load volatile i8* %ptr1, align 1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
%res16 = load volatile i8* %ptr1, align 1, !nontemporal !0, !invariant.load !1
ret void
}
define void @loadAtomic(){
entry:
%ptr1 = alloca i8
store i8 2, i8* %ptr1
; CHECK: %res1 = load atomic i8* %ptr1 unordered, align 1
%res1 = load atomic i8* %ptr1 unordered, align 1
; CHECK-NEXT: %res2 = load atomic i8* %ptr1 monotonic, align 1
%res2 = load atomic i8* %ptr1 monotonic, align 1
; CHECK-NEXT: %res3 = load atomic i8* %ptr1 acquire, align 1
%res3 = load atomic i8* %ptr1 acquire, align 1
; CHECK-NEXT: %res4 = load atomic i8* %ptr1 seq_cst, align 1
%res4 = load atomic i8* %ptr1 seq_cst, align 1
; CHECK-NEXT: %res5 = load atomic volatile i8* %ptr1 unordered, align 1
%res5 = load atomic volatile i8* %ptr1 unordered, align 1
; CHECK-NEXT: %res6 = load atomic volatile i8* %ptr1 monotonic, align 1
%res6 = load atomic volatile i8* %ptr1 monotonic, align 1
; CHECK-NEXT: %res7 = load atomic volatile i8* %ptr1 acquire, align 1
%res7 = load atomic volatile i8* %ptr1 acquire, align 1
; CHECK-NEXT: %res8 = load atomic volatile i8* %ptr1 seq_cst, align 1
%res8 = load atomic volatile i8* %ptr1 seq_cst, align 1
; CHECK-NEXT: %res9 = load atomic i8* %ptr1 singlethread unordered, align 1
%res9 = load atomic i8* %ptr1 singlethread unordered, align 1
; CHECK-NEXT: %res10 = load atomic i8* %ptr1 singlethread monotonic, align 1
%res10 = load atomic i8* %ptr1 singlethread monotonic, align 1
; CHECK-NEXT: %res11 = load atomic i8* %ptr1 singlethread acquire, align 1
%res11 = load atomic i8* %ptr1 singlethread acquire, align 1
; CHECK-NEXT: %res12 = load atomic i8* %ptr1 singlethread seq_cst, align 1
%res12 = load atomic i8* %ptr1 singlethread seq_cst, align 1
; CHECK-NEXT: %res13 = load atomic volatile i8* %ptr1 singlethread unordered, align 1
%res13 = load atomic volatile i8* %ptr1 singlethread unordered, align 1
; CHECK-NEXT: %res14 = load atomic volatile i8* %ptr1 singlethread monotonic, align 1
%res14 = load atomic volatile i8* %ptr1 singlethread monotonic, align 1
; CHECK-NEXT: %res15 = load atomic volatile i8* %ptr1 singlethread acquire, align 1
%res15 = load atomic volatile i8* %ptr1 singlethread acquire, align 1
; CHECK-NEXT: %res16 = load atomic volatile i8* %ptr1 singlethread seq_cst, align 1
%res16 = load atomic volatile i8* %ptr1 singlethread seq_cst, align 1
ret void
}
define void @store(){
entry:
%ptr1 = alloca i8
; CHECK: store i8 2, i8* %ptr1
store i8 2, i8* %ptr1
; CHECK-NEXT: store volatile i8 2, i8* %ptr1
store volatile i8 2, i8* %ptr1
; CHECK-NEXT: store i8 2, i8* %ptr1, align 1
store i8 2, i8* %ptr1, align 1
; CHECK-NEXT: store volatile i8 2, i8* %ptr1, align 1
store volatile i8 2, i8* %ptr1, align 1
; CHECK-NEXT: store i8 2, i8* %ptr1, !nontemporal !0
store i8 2, i8* %ptr1, !nontemporal !0
; CHECK-NEXT: store volatile i8 2, i8* %ptr1, !nontemporal !0
store volatile i8 2, i8* %ptr1, !nontemporal !0
; CHECK-NEXT: store i8 2, i8* %ptr1, align 1, !nontemporal !0
store i8 2, i8* %ptr1, align 1, !nontemporal !0
; CHECK-NEXT: store volatile i8 2, i8* %ptr1, align 1, !nontemporal !0
store volatile i8 2, i8* %ptr1, align 1, !nontemporal !0
ret void
}
define void @storeAtomic(){
entry:
%ptr1 = alloca i8
; CHECK: store atomic i8 2, i8* %ptr1 unordered, align 1
store atomic i8 2, i8* %ptr1 unordered, align 1
; CHECK-NEXT: store atomic i8 2, i8* %ptr1 monotonic, align 1
store atomic i8 2, i8* %ptr1 monotonic, align 1
; CHECK-NEXT: store atomic i8 2, i8* %ptr1 release, align 1
store atomic i8 2, i8* %ptr1 release, align 1
; CHECK-NEXT: store atomic i8 2, i8* %ptr1 seq_cst, align 1
store atomic i8 2, i8* %ptr1 seq_cst, align 1
; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 unordered, align 1
store atomic volatile i8 2, i8* %ptr1 unordered, align 1
; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 monotonic, align 1
store atomic volatile i8 2, i8* %ptr1 monotonic, align 1
; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 release, align 1
store atomic volatile i8 2, i8* %ptr1 release, align 1
; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 seq_cst, align 1
store atomic volatile i8 2, i8* %ptr1 seq_cst, align 1
; CHECK-NEXT: store atomic i8 2, i8* %ptr1 singlethread unordered, align 1
store atomic i8 2, i8* %ptr1 singlethread unordered, align 1
; CHECK-NEXT: store atomic i8 2, i8* %ptr1 singlethread monotonic, align 1
store atomic i8 2, i8* %ptr1 singlethread monotonic, align 1
; CHECK-NEXT: store atomic i8 2, i8* %ptr1 singlethread release, align 1
store atomic i8 2, i8* %ptr1 singlethread release, align 1
; CHECK-NEXT: store atomic i8 2, i8* %ptr1 singlethread seq_cst, align 1
store atomic i8 2, i8* %ptr1 singlethread seq_cst, align 1
; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 singlethread unordered, align 1
store atomic volatile i8 2, i8* %ptr1 singlethread unordered, align 1
; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 singlethread monotonic, align 1
store atomic volatile i8 2, i8* %ptr1 singlethread monotonic, align 1
; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 singlethread release, align 1
store atomic volatile i8 2, i8* %ptr1 singlethread release, align 1
; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 singlethread seq_cst, align 1
store atomic volatile i8 2, i8* %ptr1 singlethread seq_cst, align 1
ret void
}
define void @cmpxchg(i32* %ptr,i32 %cmp,i32 %new){
entry:
;cmpxchg [volatile] <ty>* <pointer>, <ty> <cmp>, <ty> <new> [singlethread] <ordering>
; CHECK: %res1 = cmpxchg i32* %ptr, i32 %cmp, i32 %new monotonic
%res1 = cmpxchg i32* %ptr, i32 %cmp, i32 %new monotonic
; CHECK-NEXT: %res2 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new monotonic
%res2 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new monotonic
; CHECK-NEXT: %res3 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread monotonic
%res3 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread monotonic
; CHECK-NEXT: %res4 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread monotonic
%res4 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread monotonic
; CHECK-NEXT: %res5 = cmpxchg i32* %ptr, i32 %cmp, i32 %new acquire
%res5 = cmpxchg i32* %ptr, i32 %cmp, i32 %new acquire
; CHECK-NEXT: %res6 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acquire
%res6 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acquire
; CHECK-NEXT: %res7 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acquire
%res7 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acquire
; CHECK-NEXT: %res8 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acquire
%res8 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acquire
; CHECK-NEXT: %res9 = cmpxchg i32* %ptr, i32 %cmp, i32 %new release
%res9 = cmpxchg i32* %ptr, i32 %cmp, i32 %new release
; CHECK-NEXT: %res10 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new release
%res10 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new release
; CHECK-NEXT: %res11 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread release
%res11 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread release
; CHECK-NEXT: %res12 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread release
%res12 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread release
; CHECK-NEXT: %res13 = cmpxchg i32* %ptr, i32 %cmp, i32 %new acq_rel
%res13 = cmpxchg i32* %ptr, i32 %cmp, i32 %new acq_rel
; CHECK-NEXT: %res14 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acq_rel
%res14 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acq_rel
; CHECK-NEXT: %res15 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel
%res15 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel
; CHECK-NEXT: %res16 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel
%res16 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel
; CHECK-NEXT: %res17 = cmpxchg i32* %ptr, i32 %cmp, i32 %new seq_cst
%res17 = cmpxchg i32* %ptr, i32 %cmp, i32 %new seq_cst
; CHECK-NEXT: %res18 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new seq_cst
%res18 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new seq_cst
; CHECK-NEXT: %res19 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst
%res19 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst
; CHECK-NEXT: %res20 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst
%res20 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst
ret void
}
define void @getelementptr({i8, i8}* %s, <4 x i8*> %ptrs, <4 x i64> %offsets ){
entry:
; CHECK: %res1 = getelementptr { i8, i8 }* %s, i32 1, i32 1
%res1 = getelementptr {i8, i8}* %s, i32 1, i32 1
; CHECK-NEXT: %res2 = getelementptr inbounds { i8, i8 }* %s, i32 1, i32 1
%res2 = getelementptr inbounds {i8, i8}* %s, i32 1, i32 1
; CHECK-NEXT: %res3 = getelementptr <4 x i8*> %ptrs, <4 x i64> %offsets
%res3 = getelementptr <4 x i8*> %ptrs, <4 x i64> %offsets
ret void
}
!0 = metadata !{ i32 1 }
!1 = metadata !{}