mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 10:42:39 +01:00
[NFC][Coroutines] Autogenerate a few tests for ease of further updates
This commit is contained in:
parent
7095312ff1
commit
07d20f9eb2
@ -1,3 +1,4 @@
|
||||
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
||||
; RUN: opt < %s -enable-coroutines -O2 -S | FileCheck %s
|
||||
; RUN: opt < %s -enable-coroutines -passes='default<O2>' -S | FileCheck %s
|
||||
|
||||
@ -5,6 +6,18 @@ target datalayout = "p:64:64:64"
|
||||
|
||||
declare {i8*, i8*, i32} @prototype_f(i8*, i1)
|
||||
define {i8*, i8*, i32} @f(i8* %buffer, i32 %n) {
|
||||
; CHECK-LABEL: @f(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[N_VAL_SPILL_ADDR:%.*]] = getelementptr inbounds i8, i8* [[BUFFER:%.*]], i64 8
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[N_VAL_SPILL_ADDR]] to i32*
|
||||
; CHECK-NEXT: store i32 [[N:%.*]], i32* [[TMP0]], align 4
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = tail call i8* @allocate(i32 [[N]])
|
||||
; CHECK-NEXT: [[DOTSPILL_ADDR:%.*]] = bitcast i8* [[BUFFER]] to i8**
|
||||
; CHECK-NEXT: store i8* [[TMP1]], i8** [[DOTSPILL_ADDR]], align 8
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = insertvalue { i8*, i8*, i32 } { i8* bitcast ({ i8*, i8*, i32 } (i8*, i1)* @f.resume.0 to i8*), i8* undef, i32 undef }, i8* [[TMP1]], 1
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = insertvalue { i8*, i8*, i32 } [[TMP2]], i32 [[N]], 2
|
||||
; CHECK-NEXT: ret { i8*, i8*, i32 } [[TMP3]]
|
||||
;
|
||||
entry:
|
||||
%id = call token @llvm.coro.id.retcon(i32 1024, i32 8, i8* %buffer, i8* bitcast ({i8*, i8*, i32} (i8*, i1)* @prototype_f to i8*), i8* bitcast (i8* (i32)* @allocate to i8*), i8* bitcast (void (i8*)* @deallocate to i8*))
|
||||
%hdl = call i8* @llvm.coro.begin(token %id, i8* null)
|
||||
@ -27,28 +40,20 @@ cleanup:
|
||||
unreachable
|
||||
}
|
||||
|
||||
; CHECK-LABEL: define { i8*, i8*, i32 } @f(i8* %buffer, i32 %n)
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds i8, i8* %buffer, i64 8
|
||||
; CHECK-NEXT: [[T1:%.*]] = bitcast i8* [[T0]] to i32*
|
||||
; CHECK-NEXT: store i32 %n, i32* [[T1]], align 4
|
||||
; CHECK-NEXT: [[ALLOC:%.*]] = tail call i8* @allocate(i32 %n)
|
||||
; CHECK-NEXT: [[T1:%.*]] = bitcast i8* %buffer to i8**
|
||||
; CHECK-NEXT: store i8* [[ALLOC]], i8** [[T1]], align 8
|
||||
; CHECK-NEXT: [[T0:%.*]] = insertvalue { i8*, i8*, i32 } { i8* bitcast ({ i8*, i8*, i32 } (i8*, i1)* @f.resume.0 to i8*), i8* undef, i32 undef }, i8* [[ALLOC]], 1
|
||||
; CHECK-NEXT: [[RET:%.*]] = insertvalue { i8*, i8*, i32 } [[T0]], i32 %n, 2
|
||||
; CHECK-NEXT: ret { i8*, i8*, i32 } [[RET]]
|
||||
; CHECK-NEXT: }
|
||||
|
||||
; CHECK-LABEL: define internal { i8*, i8*, i32 } @f.resume.0(i8* {{.*}} %0, i1 %1)
|
||||
; CHECK-NEXT: :
|
||||
; CHECK-NEXT: [[T1:%.*]] = bitcast i8* %0 to i8**
|
||||
; CHECK-NEXT: [[ALLOC:%.*]] = load i8*, i8** [[T1]], align 8
|
||||
; CHECK-NEXT: tail call void @deallocate(i8* [[ALLOC]])
|
||||
; CHECK-NEXT: br i1 %1,
|
||||
|
||||
declare {i8*, i32} @prototype_g(i8*, i1)
|
||||
define {i8*, i32} @g(i8* %buffer, i32 %n) {
|
||||
; CHECK-LABEL: @g(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[N_VAL_SPILL_ADDR:%.*]] = bitcast i8* [[BUFFER:%.*]] to i32*
|
||||
; CHECK-NEXT: store i32 [[N:%.*]], i32* [[N_VAL_SPILL_ADDR]], align 4
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = alloca i8, i64 [[TMP0]], align 8
|
||||
; CHECK-NEXT: call void @use(i8* nonnull [[TMP1]])
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = insertvalue { i8*, i32 } { i8* bitcast ({ i8*, i32 } (i8*, i1)* @g.resume.0 to i8*), i32 undef }, i32 [[N]], 1
|
||||
; CHECK-NEXT: ret { i8*, i32 } [[TMP2]]
|
||||
;
|
||||
entry:
|
||||
%id = call token @llvm.coro.id.retcon(i32 1024, i32 8, i8* %buffer, i8* bitcast ({i8*, i32} (i8*, i1)* @prototype_g to i8*), i8* bitcast (i8* (i32)* @allocate to i8*), i8* bitcast (void (i8*)* @deallocate to i8*))
|
||||
%hdl = call i8* @llvm.coro.begin(token %id, i8* null)
|
||||
@ -72,35 +77,17 @@ cleanup:
|
||||
unreachable
|
||||
}
|
||||
|
||||
; CHECK-LABEL: define { i8*, i32 } @g(i8* %buffer, i32 %n)
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[T0:%.*]] = bitcast i8* %buffer to i32*
|
||||
; CHECK-NEXT: store i32 %n, i32* [[T0]], align 4
|
||||
; CHECK-NEXT: [[T0:%.*]] = zext i32 %n to i64
|
||||
; CHECK-NEXT: [[ALLOC:%.*]] = alloca i8, i64 [[T0]], align 8
|
||||
; CHECK-NEXT: call void @use(i8* nonnull [[ALLOC]])
|
||||
; CHECK-NEXT: [[RET:%.*]] = insertvalue { i8*, i32 } { i8* bitcast ({ i8*, i32 } (i8*, i1)* @g.resume.0 to i8*), i32 undef }, i32 %n, 1
|
||||
; CHECK-NEXT: ret { i8*, i32 } [[RET]]
|
||||
; CHECK-NEXT: }
|
||||
|
||||
; CHECK-LABEL: define internal { i8*, i32 } @g.resume.0(i8* {{.*}} %0, i1 %1)
|
||||
; CHECK-NEXT: :
|
||||
; CHECK-NEXT: br i1 %1,
|
||||
; CHECK: :
|
||||
; CHECK-NEXT: [[T0:%.*]] = bitcast i8* %0 to i32*
|
||||
; CHECK-NEXT: [[T1:%.*]] = load i32, i32* [[T0]], align 8
|
||||
; CHECK-NEXT: %inc = add i32 [[T1]], 1
|
||||
; CHECK-NEXT: store i32 %inc, i32* [[T0]], align 8
|
||||
; CHECK-NEXT: [[T0:%.*]] = zext i32 %inc to i64
|
||||
; CHECK-NEXT: [[ALLOC:%.*]] = alloca i8, i64 [[T0]], align 8
|
||||
; CHECK-NEXT: call void @use(i8* nonnull [[ALLOC]])
|
||||
; CHECK-NEXT: [[RET:%.*]] = insertvalue { i8*, i32 } { i8* bitcast ({ i8*, i32 } (i8*, i1)* @g.resume.0 to i8*), i32 undef }, i32 %inc, 1
|
||||
; CHECK-NEXT: ret { i8*, i32 } [[RET]]
|
||||
; CHECK: :
|
||||
; CHECK-NEXT: ret { i8*, i32 } { i8* null, i32 undef }
|
||||
|
||||
declare {i8*, i32} @prototype_h(i8*, i1)
|
||||
define {i8*, i32} @h(i8* %buffer, i32 %n) {
|
||||
; CHECK-LABEL: @h(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[N_VAL_SPILL_ADDR:%.*]] = bitcast i8* [[BUFFER:%.*]] to i32*
|
||||
; CHECK-NEXT: store i32 [[N:%.*]], i32* [[N_VAL_SPILL_ADDR]], align 4
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = insertvalue { i8*, i32 } { i8* bitcast ({ i8*, i32 } (i8*, i1)* @h.resume.0 to i8*), i32 undef }, i32 [[N]], 1
|
||||
; CHECK-NEXT: ret { i8*, i32 } [[TMP0]]
|
||||
;
|
||||
entry:
|
||||
%id = call token @llvm.coro.id.retcon(i32 1024, i32 8, i8* %buffer, i8* bitcast ({i8*, i32} (i8*, i1)* @prototype_h to i8*), i8* bitcast (i8* (i32)* @allocate to i8*), i8* bitcast (void (i8*)* @deallocate to i8*))
|
||||
%hdl = call i8* @llvm.coro.begin(token %id, i8* null)
|
||||
@ -124,32 +111,17 @@ cleanup:
|
||||
unreachable
|
||||
}
|
||||
|
||||
; CHECK-LABEL: define { i8*, i32 } @h(i8* %buffer, i32 %n)
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[T0:%.*]] = bitcast i8* %buffer to i32*
|
||||
; CHECK-NEXT: store i32 %n, i32* [[T0]], align 4
|
||||
; CHECK-NEXT: [[RET:%.*]] = insertvalue { i8*, i32 } { i8* bitcast ({ i8*, i32 } (i8*, i1)* @h.resume.0 to i8*), i32 undef }, i32 %n, 1
|
||||
; CHECK-NEXT: ret { i8*, i32 } [[RET]]
|
||||
; CHECK-NEXT: }
|
||||
|
||||
; CHECK-LABEL: define internal { i8*, i32 } @h.resume.0(i8* {{.*}} %0, i1 %1)
|
||||
; CHECK-NEXT: :
|
||||
; CHECK-NEXT: br i1 %1,
|
||||
; CHECK: :
|
||||
; CHECK-NEXT: [[NSLOT:%.*]] = bitcast i8* %0 to i32*
|
||||
; CHECK-NEXT: [[T1:%.*]] = load i32, i32* [[NSLOT]], align 8
|
||||
; CHECK-NEXT: %inc = add i32 [[T1]], 1
|
||||
; CHECK-NEXT: [[T0:%.*]] = zext i32 %inc to i64
|
||||
; CHECK-NEXT: [[ALLOC:%.*]] = alloca i8, i64 [[T0]], align 8
|
||||
; CHECK-NEXT: call void @use(i8* nonnull [[ALLOC]])
|
||||
; CHECK-NEXT: store i32 %inc, i32* [[NSLOT]], align 8
|
||||
; CHECK-NEXT: [[RET:%.*]] = insertvalue { i8*, i32 } { i8* bitcast ({ i8*, i32 } (i8*, i1)* @h.resume.0 to i8*), i32 undef }, i32 %inc, 1
|
||||
; CHECK-NEXT: ret { i8*, i32 } [[RET]]
|
||||
; CHECK: :
|
||||
; CHECK-NEXT: ret { i8*, i32 } { i8* null, i32 undef }
|
||||
|
||||
declare {i8*, i32} @prototype_i(i8*)
|
||||
define {i8*, i32} @i(i8* %buffer, i32 %n) {
|
||||
; CHECK-LABEL: @i(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[N_VAL_SPILL_ADDR:%.*]] = bitcast i8* [[BUFFER:%.*]] to i32*
|
||||
; CHECK-NEXT: store i32 [[N:%.*]], i32* [[N_VAL_SPILL_ADDR]], align 4
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = insertvalue { i8*, i32 } { i8* bitcast ({ i8*, i32 } (i8*)* @i.resume.0 to i8*), i32 undef }, i32 [[N]], 1
|
||||
; CHECK-NEXT: ret { i8*, i32 } [[TMP0]]
|
||||
;
|
||||
entry:
|
||||
%id = call token @llvm.coro.id.retcon(i32 1024, i32 8, i8* %buffer, i8* bitcast ({i8*, i32} (i8*)* @prototype_i to i8*), i8* bitcast (i8* (i32)* @allocate to i8*), i8* bitcast (void (i8*)* @deallocate to i8*))
|
||||
%hdl = call i8* @llvm.coro.begin(token %id, i8* null)
|
||||
@ -172,38 +144,17 @@ loop2:
|
||||
br i1 %cmp, label %loop2, label %loop
|
||||
}
|
||||
|
||||
; CHECK-LABEL: define { i8*, i32 } @i(i8* %buffer, i32 %n)
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[T0:%.*]] = bitcast i8* %buffer to i32*
|
||||
; CHECK-NEXT: store i32 %n, i32* [[T0]], align 4
|
||||
; CHECK-NEXT: [[RET:%.*]] = insertvalue { i8*, i32 } { i8* bitcast ({ i8*, i32 } (i8*)* @i.resume.0 to i8*), i32 undef }, i32 %n, 1
|
||||
; CHECK-NEXT: ret { i8*, i32 } [[RET]]
|
||||
; CHECK-NEXT: }
|
||||
|
||||
; CHECK-LABEL: define internal { i8*, i32 } @i.resume.0(i8* {{.*}} %0)
|
||||
; CHECK-NEXT: :
|
||||
; CHECK-NEXT: [[NSLOT:%.*]] = bitcast i8* %0 to i32*
|
||||
; CHECK-NEXT: [[T1:%.*]] = load i32, i32* [[NSLOT]], align 8
|
||||
; CHECK-NEXT: %inc = add i32 [[T1]], 1
|
||||
; CHECK-NEXT: br label %loop2
|
||||
; CHECK: :
|
||||
; CHECK-NEXT: store i32 %k, i32* [[NSLOT]], align 8
|
||||
; CHECK-NEXT: [[RET:%.*]] = insertvalue { i8*, i32 } { i8* bitcast ({ i8*, i32 } (i8*)* @i.resume.0 to i8*), i32 undef }, i32 %k, 1
|
||||
; CHECK-NEXT: ret { i8*, i32 } [[RET]]
|
||||
; CHECK: loop2:
|
||||
; CHECK-NEXT: %k = phi i32 [ %inc, {{.*}} ], [ %k2, %loop2 ]
|
||||
; CHECK-NEXT: [[SAVE:%.*]] = call i8* @llvm.stacksave()
|
||||
; CHECK-NEXT: [[T0:%.*]] = zext i32 %k to i64
|
||||
; CHECK-NEXT: [[ALLOC:%.*]] = alloca i8, i64 [[T0]], align 8
|
||||
; CHECK-NEXT: call void @use(i8* nonnull [[ALLOC]])
|
||||
; CHECK-NEXT: call void @llvm.stackrestore(i8* [[SAVE]])
|
||||
; CHECK-NEXT: %k2 = lshr i32 %k, 1
|
||||
; CHECK-NEXT: %cmp = icmp ugt i32 %k, 128
|
||||
; CHECK-NEXT: br i1 %cmp, label %loop2,
|
||||
; CHECK-NEXT: }
|
||||
|
||||
declare {i8*, i32} @prototype_j(i8*)
|
||||
define {i8*, i32} @j(i8* %buffer, i32 %n) {
|
||||
; CHECK-LABEL: @j(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[N_VAL_SPILL_ADDR:%.*]] = bitcast i8* [[BUFFER:%.*]] to i32*
|
||||
; CHECK-NEXT: store i32 [[N:%.*]], i32* [[N_VAL_SPILL_ADDR]], align 4
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = insertvalue { i8*, i32 } { i8* bitcast ({ i8*, i32 } (i8*)* @j.resume.0 to i8*), i32 undef }, i32 [[N]], 1
|
||||
; CHECK-NEXT: ret { i8*, i32 } [[TMP0]]
|
||||
;
|
||||
entry:
|
||||
%id = call token @llvm.coro.id.retcon(i32 1024, i32 8, i8* %buffer, i8* bitcast ({i8*, i32} (i8*)* @prototype_j to i8*), i8* bitcast (i8* (i32)* @allocate to i8*), i8* bitcast (void (i8*)* @deallocate to i8*))
|
||||
%hdl = call i8* @llvm.coro.begin(token %id, i8* null)
|
||||
@ -232,6 +183,19 @@ end:
|
||||
|
||||
declare i32 @getSize()
|
||||
define {i8*, i32} @k(i8* %buffer, i32 %n, i1 %cond) {
|
||||
; CHECK-LABEL: @k(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[SIZE:%.*]] = tail call i32 @getSize()
|
||||
; CHECK-NEXT: br i1 [[COND:%.*]], label [[ALLOCA_BLOCK:%.*]], label [[CORO_RETURN:%.*]]
|
||||
; CHECK: coro.return:
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = insertvalue { i8*, i32 } { i8* bitcast ({ i8*, i32 } (i8*, i1)* @k.resume.0 to i8*), i32 undef }, i32 [[N:%.*]], 1
|
||||
; CHECK-NEXT: ret { i8*, i32 } [[TMP0]]
|
||||
; CHECK: alloca_block:
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[SIZE]] to i64
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
|
||||
; CHECK-NEXT: call void @use(i8* nonnull [[TMP2]])
|
||||
; CHECK-NEXT: br label [[CORO_RETURN]]
|
||||
;
|
||||
entry:
|
||||
%id = call token @llvm.coro.id.retcon(i32 1024, i32 8, i8* %buffer, i8* bitcast ({i8*, i32} (i8*, i1)* @prototype_g to i8*), i8* bitcast (i8* (i32)* @allocate to i8*), i8* bitcast (void (i8*)* @deallocate to i8*))
|
||||
%hdl = call i8* @llvm.coro.begin(token %id, i8* null)
|
||||
|
@ -1,3 +1,4 @@
|
||||
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
||||
; RUN: opt < %s -enable-coroutines -O2 -S | FileCheck %s
|
||||
; RUN: opt < %s -enable-coroutines -passes='default<O2>' -S | FileCheck %s
|
||||
|
||||
@ -5,6 +6,19 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
|
||||
target triple = "x86_64-apple-macosx10.12.0"
|
||||
|
||||
define {i8*, i32} @f(i8* %buffer, i32* %array) {
|
||||
; CHECK-LABEL: @f(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[ARRAY_SPILL_ADDR:%.*]] = bitcast i8* [[BUFFER:%.*]] to i32**
|
||||
; CHECK-NEXT: store i32* [[ARRAY:%.*]], i32** [[ARRAY_SPILL_ADDR]], align 8
|
||||
; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[ARRAY]], align 4
|
||||
; CHECK-NEXT: [[LOAD_POS:%.*]] = icmp sgt i32 [[LOAD]], 0
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = select i1 [[LOAD_POS]], void (i8*, i1)* @f.resume.0, void (i8*, i1)* @f.resume.1
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[LOAD_POS]], i32 [[LOAD]], i32 0
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = bitcast void (i8*, i1)* [[TMP0]] to i8*
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = insertvalue { i8*, i32 } undef, i8* [[TMP2]], 0
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = insertvalue { i8*, i32 } [[TMP3]], i32 [[TMP1]], 1
|
||||
; CHECK-NEXT: ret { i8*, i32 } [[TMP4]]
|
||||
;
|
||||
entry:
|
||||
%id = call token @llvm.coro.id.retcon.once(i32 8, i32 8, i8* %buffer, i8* bitcast (void (i8*, i1)* @prototype to i8*), i8* bitcast (i8* (i32)* @allocate to i8*), i8* bitcast (void (i8*)* @deallocate to i8*))
|
||||
%hdl = call i8* @llvm.coro.begin(token %id, i8* null)
|
||||
@ -33,45 +47,23 @@ cleanup:
|
||||
unreachable
|
||||
}
|
||||
|
||||
; CHECK-LABEL: define { i8*, i32 } @f(i8* %buffer, i32* %array)
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[T0:%.*]] = bitcast i8* %buffer to i32**
|
||||
; CHECK-NEXT: store i32* %array, i32** [[T0]], align 8
|
||||
; CHECK-NEXT: %load = load i32, i32* %array, align 4
|
||||
; CHECK-NEXT: %load.pos = icmp sgt i32 %load, 0
|
||||
; CHECK-NEXT: [[CONT:%.*]] = select i1 %load.pos, void (i8*, i1)* @f.resume.0, void (i8*, i1)* @f.resume.1
|
||||
; CHECK-NEXT: [[VAL:%.*]] = select i1 %load.pos, i32 %load, i32 0
|
||||
; CHECK-NEXT: [[CONT_CAST:%.*]] = bitcast void (i8*, i1)* [[CONT]] to i8*
|
||||
; CHECK-NEXT: [[T0:%.*]] = insertvalue { i8*, i32 } undef, i8* [[CONT_CAST]], 0
|
||||
; CHECK-NEXT: [[T1:%.*]] = insertvalue { i8*, i32 } [[T0]], i32 [[VAL]], 1
|
||||
; CHECK-NEXT: ret { i8*, i32 } [[T1]]
|
||||
; CHECK-NEXT: }
|
||||
|
||||
; CHECK-LABEL: define internal void @f.resume.0(i8* {{.*}} %0, i1 zeroext %1)
|
||||
; CHECK-NEXT: :
|
||||
; CHECK-NEXT: br i1 %1,
|
||||
; CHECK: :
|
||||
; CHECK-NEXT: [[T0:%.*]] = bitcast i8* %0 to i32**
|
||||
; CHECK-NEXT: [[RELOAD:%.*]] = load i32*, i32** [[T0]], align 8
|
||||
; CHECK-NEXT: store i32 0, i32* [[RELOAD]], align 4
|
||||
; CHECK-NEXT: br label
|
||||
; CHECK: :
|
||||
; CHECK-NEXT: ret void
|
||||
; CHECK-NEXT: }
|
||||
|
||||
; CHECK-LABEL: define internal void @f.resume.1(i8* {{.*}} %0, i1 zeroext %1)
|
||||
; CHECK-NEXT: :
|
||||
; CHECK-NEXT: br i1 %1,
|
||||
; CHECK: :
|
||||
; CHECK-NEXT: [[T0:%.*]] = bitcast i8* %0 to i32**
|
||||
; CHECK-NEXT: [[RELOAD:%.*]] = load i32*, i32** [[T0]], align 8
|
||||
; CHECK-NEXT: store i32 10, i32* [[RELOAD]], align 4
|
||||
; CHECK-NEXT: br label
|
||||
; CHECK: :
|
||||
; CHECK-NEXT: ret void
|
||||
; CHECK-NEXT: }
|
||||
|
||||
define void @test(i32* %array) {
|
||||
; CHECK-LABEL: @test(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = alloca i32*, align 8
|
||||
; CHECK-NEXT: [[DOTSUB:%.*]] = bitcast i32** [[TMP0]] to i8*
|
||||
; CHECK-NEXT: store i32* [[ARRAY:%.*]], i32** [[TMP0]], align 8
|
||||
; CHECK-NEXT: [[LOAD_I:%.*]] = load i32, i32* [[ARRAY]], align 4
|
||||
; CHECK-NEXT: [[LOAD_POS_I:%.*]] = icmp sgt i32 [[LOAD_I]], 0
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[LOAD_POS_I]], void (i8*, i1)* @f.resume.0, void (i8*, i1)* @f.resume.1
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[LOAD_POS_I]], i32 [[LOAD_I]], i32 0
|
||||
; CHECK-NEXT: call void @print(i32 [[TMP2]])
|
||||
; CHECK-NEXT: call void [[TMP1]](i8* nonnull [[DOTSUB]], i1 zeroext false)
|
||||
; CHECK-NEXT: ret void
|
||||
;
|
||||
entry:
|
||||
%0 = alloca [8 x i8], align 8
|
||||
%buffer = bitcast [8 x i8]* %0 to i8*
|
||||
@ -88,18 +80,6 @@ entry:
|
||||
|
||||
; Unfortunately, we don't seem to fully optimize this right now due
|
||||
; to some sort of phase-ordering thing.
|
||||
; CHECK-LABEL: define void @test(i32* %array)
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[BUFFER:%.*]] = alloca i32*, align 8
|
||||
; CHECK-NEXT: [[BUFFER_CAST:%.*]] = bitcast i32** [[BUFFER]] to i8*
|
||||
; CHECK-NEXT: store i32* %array, i32** [[BUFFER]], align 8
|
||||
; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* %array, align 4
|
||||
; CHECK-NEXT: [[LOAD_POS:%.*]] = icmp sgt i32 [[LOAD]], 0
|
||||
; CHECK-NEXT: [[CONT:%.*]] = select i1 [[LOAD_POS]], void (i8*, i1)* @f.resume.0, void (i8*, i1)* @f.resume.1
|
||||
; CHECK-NEXT: [[VAL:%.*]] = select i1 [[LOAD_POS]], i32 [[LOAD]], i32 0
|
||||
; CHECK-NEXT: call void @print(i32 [[VAL]])
|
||||
; CHECK-NEXT: call void [[CONT]](i8* nonnull [[BUFFER_CAST]], i1 zeroext false)
|
||||
; CHECK-NEXT: ret void
|
||||
|
||||
declare token @llvm.coro.id.retcon.once(i32, i32, i8*, i8*, i8*, i8*)
|
||||
declare i8* @llvm.coro.begin(token, i8*)
|
||||
|
@ -1,7 +1,14 @@
|
||||
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
||||
; RUN: opt < %s -enable-coroutines -O2 -S | FileCheck %s
|
||||
; RUN: opt < %s -enable-coroutines -passes='default<O2>' -aa-pipeline=default -S | FileCheck %s
|
||||
|
||||
define i8* @f(i8* %buffer, i32 %n) {
|
||||
; CHECK-LABEL: @f(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[N_VAL_SPILL_ADDR:%.*]] = bitcast i8* [[BUFFER:%.*]] to i32*
|
||||
; CHECK-NEXT: store i32 [[N:%.*]], i32* [[N_VAL_SPILL_ADDR]], align 4
|
||||
; CHECK-NEXT: ret i8* bitcast (i8* (i8*, i32, i1)* @f.resume.0 to i8*)
|
||||
;
|
||||
entry:
|
||||
%id = call token @llvm.coro.id.retcon(i32 8, i32 4, i8* %buffer, i8* bitcast (i8* (i8*, i32, i1)* @prototype to i8*), i8* bitcast (i8* (i32)* @allocate to i8*), i8* bitcast (void (i8*)* @deallocate to i8*))
|
||||
%hdl = call i8* @llvm.coro.begin(token %id, i8* null)
|
||||
@ -24,28 +31,18 @@ cleanup:
|
||||
unreachable
|
||||
}
|
||||
|
||||
; CHECK-LABEL: define i8* @f(i8* %buffer, i32 %n)
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[T0:%.*]] = bitcast i8* %buffer to i32*
|
||||
; CHECK-NEXT: store i32 %n, i32* [[T0]], align 4
|
||||
; CHECK-NEXT: ret i8* bitcast (i8* (i8*, i32, i1)* @f.resume.0 to i8*)
|
||||
; CHECK-NEXT: }
|
||||
|
||||
; CHECK-LABEL: define internal i8* @f.resume.0(i8* {{.*}} %0, i32 %1, i1 zeroext %2)
|
||||
; CHECK-NEXT: :
|
||||
; CHECK-NEXT: [[T0:%.*]] = bitcast i8* %0 to i32*
|
||||
; CHECK-NEXT: [[T1:%.*]] = load i32, i32* [[T0]], align 4
|
||||
; CHECK-NEXT: br i1 %2,
|
||||
; CHECK: :
|
||||
; CHECK-NEXT: %sum = add i32 [[T1]], %1
|
||||
; CHECK-NEXT: store i32 %sum, i32* [[T0]], align 4
|
||||
; CHECK-NEXT: ret i8* bitcast (i8* (i8*, i32, i1)* @f.resume.0 to i8*)
|
||||
; CHECK: :
|
||||
; CHECK-NEXT: call void @print(i32 [[T1]])
|
||||
; CHECK-NEXT: ret i8* null
|
||||
; CHECK-NEXT: }
|
||||
|
||||
define i32 @main() {
|
||||
; CHECK-LABEL: @main(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = alloca [8 x i8], align 4
|
||||
; CHECK-NEXT: [[DOTSUB:%.*]] = getelementptr inbounds [8 x i8], [8 x i8]* [[TMP0]], i64 0, i64 0
|
||||
; CHECK-NEXT: [[N_VAL_RELOAD_ADDR_I1:%.*]] = bitcast [8 x i8]* [[TMP0]] to i32*
|
||||
; CHECK-NEXT: store i32 7, i32* [[N_VAL_RELOAD_ADDR_I1]], align 4, !alias.scope !0
|
||||
; CHECK-NEXT: call void @print(i32 7), !noalias !3
|
||||
; CHECK-NEXT: ret i32 0
|
||||
;
|
||||
entry:
|
||||
%0 = alloca [8 x i8], align 4
|
||||
%buffer = bitcast [8 x i8]* %0 to i8*
|
||||
@ -63,13 +60,6 @@ entry:
|
||||
|
||||
; Unfortunately, we don't seem to fully optimize this right now due
|
||||
; to some sort of phase-ordering thing.
|
||||
; CHECK-LABEL: define i32 @main
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK: [[BUFFER:%.*]] = alloca [8 x i8], align 4
|
||||
; CHECK: [[SLOT:%.*]] = bitcast [8 x i8]* [[BUFFER]] to i32*
|
||||
; CHECK-NEXT: store i32 7, i32* [[SLOT]], align 4
|
||||
; CHECK-NEXT: call void @print(i32 7)
|
||||
; CHECK-NEXT: ret i32 0
|
||||
|
||||
declare token @llvm.coro.id.retcon(i32, i32, i8*, i8*, i8*, i8*)
|
||||
declare i8* @llvm.coro.begin(token, i8*)
|
||||
|
@ -1,8 +1,16 @@
|
||||
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
||||
; First example from Doc/Coroutines.rst (two block loop) converted to retcon
|
||||
; RUN: opt < %s -enable-coroutines -O2 -S | FileCheck %s
|
||||
; RUN: opt < %s -enable-coroutines -passes='default<O2>' -S | FileCheck %s
|
||||
|
||||
define {i8*, i32} @f(i8* %buffer, i32 %n) {
|
||||
; CHECK-LABEL: @f(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[N_VAL_SPILL_ADDR:%.*]] = bitcast i8* [[BUFFER:%.*]] to i32*
|
||||
; CHECK-NEXT: store i32 [[N:%.*]], i32* [[N_VAL_SPILL_ADDR]], align 4
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = insertvalue { i8*, i32 } { i8* bitcast ({ i8*, i32 } (i8*, i8)* @f.resume.0 to i8*), i32 undef }, i32 [[N]], 1
|
||||
; CHECK-NEXT: ret { i8*, i32 } [[TMP0]]
|
||||
;
|
||||
entry:
|
||||
%id = call token @llvm.coro.id.retcon(i32 8, i32 4, i8* %buffer, i8* bitcast ({i8*, i32} (i8*, i8)* @prototype to i8*), i8* bitcast (i8* (i32)* @allocate to i8*), i8* bitcast (void (i8*)* @deallocate to i8*))
|
||||
%hdl = call i8* @llvm.coro.begin(token %id, i8* null)
|
||||
@ -23,30 +31,26 @@ cleanup:
|
||||
unreachable
|
||||
}
|
||||
|
||||
; CHECK-LABEL: define { i8*, i32 } @f(i8* %buffer, i32 %n)
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[T0:%.*]] = bitcast i8* %buffer to i32*
|
||||
; CHECK-NEXT: store i32 %n, i32* [[T0]], align 4
|
||||
; CHECK-NEXT: [[RET:%.*]] = insertvalue { i8*, i32 } { i8* bitcast ({ i8*, i32 } (i8*, i8)* @f.resume.0 to i8*), i32 undef }, i32 %n, 1
|
||||
; CHECK-NEXT: ret { i8*, i32 } [[RET]]
|
||||
; CHECK-NEXT: }
|
||||
|
||||
; CHECK-LABEL: define internal { i8*, i32 } @f.resume.0(i8* {{.*}} %0, i8 zeroext %1)
|
||||
; CHECK-NEXT: :
|
||||
; CHECK-NEXT: [[T0:%.*]] = icmp eq i8 %1, 0
|
||||
; CHECK-NEXT: br i1 [[T0]],
|
||||
; CHECK: :
|
||||
; CHECK-NEXT: [[T0:%.*]] = bitcast i8* %0 to i32*
|
||||
; CHECK-NEXT: [[T1:%.*]] = load i32, i32* [[T0]], align 4
|
||||
; CHECK-NEXT: %inc = add i32 [[T1]], 1
|
||||
; CHECK-NEXT: store i32 %inc, i32* [[T0]], align 4
|
||||
; CHECK-NEXT: [[RET:%.*]] = insertvalue { i8*, i32 } { i8* bitcast ({ i8*, i32 } (i8*, i8)* @f.resume.0 to i8*), i32 undef }, i32 %inc, 1
|
||||
; CHECK-NEXT: ret { i8*, i32 } [[RET]]
|
||||
; CHECK: :
|
||||
; CHECK-NEXT: ret { i8*, i32 } { i8* null, i32 undef }
|
||||
; CHECK-NEXT: }
|
||||
|
||||
define i32 @main() {
|
||||
; CHECK-LABEL: @main(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = alloca [8 x i8], align 4
|
||||
; CHECK-NEXT: [[DOTSUB:%.*]] = getelementptr inbounds [8 x i8], [8 x i8]* [[TMP0]], i64 0, i64 0
|
||||
; CHECK-NEXT: [[N_VAL_SPILL_ADDR_I:%.*]] = bitcast [8 x i8]* [[TMP0]] to i32*
|
||||
; CHECK-NEXT: store i32 4, i32* [[N_VAL_SPILL_ADDR_I]], align 4
|
||||
; CHECK-NEXT: call void @print(i32 4)
|
||||
; CHECK-NEXT: [[N_VAL_RELOAD_I:%.*]] = load i32, i32* [[N_VAL_SPILL_ADDR_I]], align 4, !alias.scope !0
|
||||
; CHECK-NEXT: [[INC_I:%.*]] = add i32 [[N_VAL_RELOAD_I]], 1
|
||||
; CHECK-NEXT: store i32 [[INC_I]], i32* [[N_VAL_SPILL_ADDR_I]], align 4, !alias.scope !0
|
||||
; CHECK-NEXT: call void @print(i32 [[INC_I]])
|
||||
; CHECK-NEXT: [[N_VAL_RELOAD_I1:%.*]] = load i32, i32* [[N_VAL_SPILL_ADDR_I]], align 4, !alias.scope !3
|
||||
; CHECK-NEXT: [[INC_I2:%.*]] = add i32 [[N_VAL_RELOAD_I1]], 1
|
||||
; CHECK-NEXT: store i32 [[INC_I2]], i32* [[N_VAL_SPILL_ADDR_I]], align 4, !alias.scope !3
|
||||
; CHECK-NEXT: call void @print(i32 [[INC_I2]])
|
||||
; CHECK-NEXT: ret i32 0
|
||||
;
|
||||
entry:
|
||||
%0 = alloca [8 x i8], align 4
|
||||
%buffer = bitcast [8 x i8]* %0 to i8*
|
||||
@ -73,21 +77,6 @@ entry:
|
||||
|
||||
; Unfortunately, we don't seem to fully optimize this right now due
|
||||
; to some sort of phase-ordering thing.
|
||||
; CHECK-LABEL: define i32 @main
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK: [[BUFFER:%.*]] = alloca [8 x i8], align 4
|
||||
; CHECK: [[SLOT:%.*]] = bitcast [8 x i8]* [[BUFFER]] to i32*
|
||||
; CHECK-NEXT: store i32 4, i32* [[SLOT]], align 4
|
||||
; CHECK-NEXT: call void @print(i32 4)
|
||||
; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[SLOT]], align 4
|
||||
; CHECK-NEXT: [[INC:%.*]] = add i32 [[LOAD]], 1
|
||||
; CHECK-NEXT: store i32 [[INC]], i32* [[SLOT]], align 4
|
||||
; CHECK-NEXT: call void @print(i32 [[INC]])
|
||||
; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[SLOT]], align 4
|
||||
; CHECK-NEXT: [[INC:%.*]] = add i32 [[LOAD]], 1
|
||||
; CHECK-NEXT: store i32 [[INC]], i32* [[SLOT]], align 4
|
||||
; CHECK-NEXT: call void @print(i32 [[INC]])
|
||||
; CHECK-NEXT: ret i32 0
|
||||
|
||||
declare token @llvm.coro.id.retcon(i32, i32, i8*, i8*, i8*, i8*)
|
||||
declare i8* @llvm.coro.begin(token, i8*)
|
||||
|
@ -1,8 +1,16 @@
|
||||
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
||||
; First example from Doc/Coroutines.rst (two block loop) converted to retcon
|
||||
; RUN: opt < %s -enable-coroutines -O2 -S -enable-new-pm=0 | FileCheck %s
|
||||
; RUN: opt < %s -enable-coroutines -passes='default<O2>' -S | FileCheck %s
|
||||
; RUN: opt < %s -enable-coroutines -O2 -S -enable-new-pm=0 | FileCheck --check-prefixes=ALL,OLDPM %s
|
||||
; RUN: opt < %s -enable-coroutines -passes='default<O2>' -S | FileCheck --check-prefixes=ALL,NEWPM %s
|
||||
|
||||
define i8* @f(i8* %buffer, i32 %n) {
|
||||
; ALL-LABEL: @f(
|
||||
; ALL-NEXT: entry:
|
||||
; ALL-NEXT: [[N_VAL_SPILL_ADDR:%.*]] = bitcast i8* [[BUFFER:%.*]] to i32*
|
||||
; ALL-NEXT: store i32 [[N:%.*]], i32* [[N_VAL_SPILL_ADDR]], align 4
|
||||
; ALL-NEXT: tail call void @print(i32 [[N]])
|
||||
; ALL-NEXT: ret i8* bitcast (i8* (i8*, i1)* @f.resume.0 to i8*)
|
||||
;
|
||||
entry:
|
||||
%id = call token @llvm.coro.id.retcon(i32 8, i32 4, i8* %buffer, i8* bitcast (i8* (i8*, i1)* @prototype to i8*), i8* bitcast (i8* (i32)* @allocate to i8*), i8* bitcast (void (i8*)* @deallocate to i8*))
|
||||
%hdl = call i8* @llvm.coro.begin(token %id, i8* null)
|
||||
@ -23,29 +31,27 @@ cleanup:
|
||||
unreachable
|
||||
}
|
||||
|
||||
; CHECK-LABEL: define i8* @f(i8* %buffer, i32 %n)
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[T0:%.*]] = bitcast i8* %buffer to i32*
|
||||
; CHECK-NEXT: store i32 %n, i32* [[T0]], align 4
|
||||
; CHECK-NEXT: call void @print(i32 %n)
|
||||
; CHECK-NEXT: ret i8* bitcast (i8* (i8*, i1)* @f.resume.0 to i8*)
|
||||
; CHECK-NEXT: }
|
||||
|
||||
; CHECK-LABEL: define internal i8* @f.resume.0(i8* {{.*}} %0, i1 zeroext %1)
|
||||
; CHECK-NEXT: :
|
||||
; CHECK-NEXT: br i1 %1,
|
||||
; CHECK: :
|
||||
; CHECK-NEXT: [[T0:%.*]] = bitcast i8* %0 to i32*
|
||||
; CHECK-NEXT: [[T1:%.*]] = load i32, i32* [[T0]], align 4
|
||||
; CHECK-NEXT: %inc = add i32 [[T1]], 1
|
||||
; CHECK-NEXT: store i32 %inc, i32* [[T0]], align 4
|
||||
; CHECK-NEXT: call void @print(i32 %inc)
|
||||
; CHECK-NEXT: ret i8* bitcast (i8* (i8*, i1)* @f.resume.0 to i8*)
|
||||
; CHECK: :
|
||||
; CHECK-NEXT: ret i8* null
|
||||
; CHECK-NEXT: }
|
||||
|
||||
define i32 @main() {
|
||||
; ALL-LABEL: @main(
|
||||
; ALL-NEXT: entry:
|
||||
; ALL-NEXT: [[TMP0:%.*]] = alloca [8 x i8], align 4
|
||||
; ALL-NEXT: [[DOTSUB:%.*]] = getelementptr inbounds [8 x i8], [8 x i8]* [[TMP0]], i64 0, i64 0
|
||||
; ALL-NEXT: [[N_VAL_SPILL_ADDR_I:%.*]] = bitcast [8 x i8]* [[TMP0]] to i32*
|
||||
; ALL-NEXT: store i32 4, i32* [[N_VAL_SPILL_ADDR_I]], align 4
|
||||
; ALL-NEXT: call void @print(i32 4)
|
||||
; ALL-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META0:![0-9]+]])
|
||||
; ALL-NEXT: [[N_VAL_RELOAD_I:%.*]] = load i32, i32* [[N_VAL_SPILL_ADDR_I]], align 4, !alias.scope !0
|
||||
; ALL-NEXT: [[INC_I:%.*]] = add i32 [[N_VAL_RELOAD_I]], 1
|
||||
; ALL-NEXT: store i32 [[INC_I]], i32* [[N_VAL_SPILL_ADDR_I]], align 4, !alias.scope !0
|
||||
; ALL-NEXT: call void @print(i32 [[INC_I]]), !noalias !0
|
||||
; ALL-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]])
|
||||
; ALL-NEXT: [[N_VAL_RELOAD_I1:%.*]] = load i32, i32* [[N_VAL_SPILL_ADDR_I]], align 4, !alias.scope !3
|
||||
; ALL-NEXT: [[INC_I2:%.*]] = add i32 [[N_VAL_RELOAD_I1]], 1
|
||||
; ALL-NEXT: call void @print(i32 [[INC_I2]]), !noalias !3
|
||||
; ALL-NEXT: ret i32 0
|
||||
;
|
||||
entry:
|
||||
%0 = alloca [8 x i8], align 4
|
||||
%buffer = bitcast [8 x i8]* %0 to i8*
|
||||
@ -63,24 +69,30 @@ entry:
|
||||
|
||||
; Unfortunately, we don't seem to fully optimize this right now due
|
||||
; to some sort of phase-ordering thing.
|
||||
; CHECK-LABEL: define i32 @main
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK: [[BUFFER:%.*]] = alloca [8 x i8], align 4
|
||||
; CHECK: [[SLOT:%.*]] = bitcast [8 x i8]* [[BUFFER]] to i32*
|
||||
; CHECK-NEXT: store i32 4, i32* [[SLOT]], align 4
|
||||
; CHECK-NEXT: call void @print(i32 4)
|
||||
; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl
|
||||
; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[SLOT]], align 4
|
||||
; CHECK-NEXT: [[INC:%.*]] = add i32 [[LOAD]], 1
|
||||
; CHECK-NEXT: store i32 [[INC]], i32* [[SLOT]], align 4
|
||||
; CHECK-NEXT: call void @print(i32 [[INC]])
|
||||
; CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl
|
||||
; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[SLOT]], align 4
|
||||
; CHECK-NEXT: [[INC:%.*]] = add i32 [[LOAD]], 1
|
||||
; CHECK-NEXT: call void @print(i32 [[INC]])
|
||||
; CHECK-NEXT: ret i32 0
|
||||
|
||||
define hidden { i8*, i8* } @g(i8* %buffer, i16* %ptr) {
|
||||
; OLDPM-LABEL: @g(
|
||||
; OLDPM-NEXT: entry:
|
||||
; OLDPM-NEXT: [[TMP0:%.*]] = tail call i8* @allocate(i32 8) #[[ATTR0:[0-9]+]]
|
||||
; OLDPM-NEXT: [[TMP1:%.*]] = bitcast i8* [[BUFFER:%.*]] to i8**
|
||||
; OLDPM-NEXT: store i8* [[TMP0]], i8** [[TMP1]], align 8
|
||||
; OLDPM-NEXT: [[PTR_SPILL_ADDR:%.*]] = bitcast i8* [[TMP0]] to i16**
|
||||
; OLDPM-NEXT: store i16* [[PTR:%.*]], i16** [[PTR_SPILL_ADDR]], align 8
|
||||
; OLDPM-NEXT: [[TMP2:%.*]] = bitcast i16* [[PTR]] to i8*
|
||||
; OLDPM-NEXT: [[TMP3:%.*]] = insertvalue { i8*, i8* } { i8* bitcast ({ i8*, i8* } (i8*, i1)* @g.resume.0 to i8*), i8* undef }, i8* [[TMP2]], 1
|
||||
; OLDPM-NEXT: ret { i8*, i8* } [[TMP3]]
|
||||
;
|
||||
; NEWPM-LABEL: @g(
|
||||
; NEWPM-NEXT: entry:
|
||||
; NEWPM-NEXT: [[TMP0:%.*]] = tail call i8* @allocate(i32 8)
|
||||
; NEWPM-NEXT: [[TMP1:%.*]] = bitcast i8* [[BUFFER:%.*]] to i8**
|
||||
; NEWPM-NEXT: store i8* [[TMP0]], i8** [[TMP1]], align 8
|
||||
; NEWPM-NEXT: [[PTR_SPILL_ADDR:%.*]] = bitcast i8* [[TMP0]] to i16**
|
||||
; NEWPM-NEXT: store i16* [[PTR:%.*]], i16** [[PTR_SPILL_ADDR]], align 8
|
||||
; NEWPM-NEXT: [[TMP2:%.*]] = bitcast i16* [[PTR]] to i8*
|
||||
; NEWPM-NEXT: [[TMP3:%.*]] = insertvalue { i8*, i8* } { i8* bitcast ({ i8*, i8* } (i8*, i1)* @g.resume.0 to i8*), i8* undef }, i8* [[TMP2]], 1
|
||||
; NEWPM-NEXT: ret { i8*, i8* } [[TMP3]]
|
||||
;
|
||||
entry:
|
||||
%id = call token @llvm.coro.id.retcon(i32 8, i32 4, i8* %buffer, i8* bitcast ({ i8*, i8* } (i8*, i1)* @g_prototype to i8*), i8* bitcast (i8* (i32)* @allocate to i8*), i8* bitcast (void (i8*)* @deallocate to i8*))
|
||||
%hdl = call i8* @llvm.coro.begin(token %id, i8* null)
|
||||
|
@ -1,7 +1,20 @@
|
||||
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
||||
; RUN: opt < %s -enable-coroutines -O2 -S | FileCheck %s
|
||||
target datalayout = "E-p:32:32"
|
||||
|
||||
define i8* @f(i8* %buffer, i32 %n, i8** swifterror %errorslot) {
|
||||
; CHECK-LABEL: @f(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[N_VAL_SPILL_ADDR:%.*]] = bitcast i8* [[BUFFER:%.*]] to i32*
|
||||
; CHECK-NEXT: store i32 [[N:%.*]], i32* [[N_VAL_SPILL_ADDR]], align 4
|
||||
; CHECK-NEXT: tail call void @print(i32 [[N]])
|
||||
; CHECK-NEXT: store i8* null, i8** [[ERRORSLOT:%.*]], align 4
|
||||
; CHECK-NEXT: tail call void @maybeThrow(i8** nonnull swifterror [[ERRORSLOT]])
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = load i8*, i8** [[ERRORSLOT]], align 4
|
||||
; CHECK-NEXT: tail call void @logError(i8* [[TMP0]])
|
||||
; CHECK-NEXT: store i8* [[TMP0]], i8** [[ERRORSLOT]], align 4
|
||||
; CHECK-NEXT: ret i8* bitcast (i8* (i8*, i1, i8**)* @f.resume.0 to i8*)
|
||||
;
|
||||
entry:
|
||||
%id = call token @llvm.coro.id.retcon(i32 8, i32 4, i8* %buffer, i8* bitcast (i8* (i8*, i1, i8**)* @f_prototype to i8*), i8* bitcast (i8* (i32)* @allocate to i8*), i8* bitcast (void (i8*)* @deallocate to i8*))
|
||||
%hdl = call i8* @llvm.coro.begin(token %id, i8* null)
|
||||
@ -26,41 +39,25 @@ cleanup:
|
||||
unreachable
|
||||
}
|
||||
|
||||
; CHECK-LABEL: define i8* @f(i8* %buffer, i32 %n, i8** swifterror %errorslot)
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[T0:%.*]] = bitcast i8* %buffer to i32*
|
||||
; CHECK-NEXT: store i32 %n, i32* [[T0]], align 4
|
||||
; CHECK-NEXT: call void @print(i32 %n)
|
||||
; TODO: figure out a way to eliminate this
|
||||
; CHECK-NEXT: store i8* null, i8** %errorslot
|
||||
; CHECK-NEXT: call void @maybeThrow(i8** nonnull swifterror %errorslot)
|
||||
; CHECK-NEXT: [[T1:%.*]] = load i8*, i8** %errorslot
|
||||
; CHECK-NEXT: call void @logError(i8* [[T1]])
|
||||
; CHECK-NEXT: store i8* [[T1]], i8** %errorslot
|
||||
; CHECK-NEXT: ret i8* bitcast (i8* (i8*, i1, i8**)* @f.resume.0 to i8*)
|
||||
; CHECK-NEXT: }
|
||||
|
||||
; CHECK-LABEL: define internal i8* @f.resume.0(i8* {{.*}} %0, i1 zeroext %1, i8** swifterror %2)
|
||||
; CHECK-NEXT: :
|
||||
; CHECK-NEXT: br i1 %1,
|
||||
; CHECK: :
|
||||
; CHECK-NEXT: [[ERROR:%.*]] = load i8*, i8** %2, align 4
|
||||
; CHECK-NEXT: [[T0:%.*]] = bitcast i8* %0 to i32*
|
||||
; CHECK-NEXT: [[T1:%.*]] = load i32, i32* [[T0]], align 4
|
||||
; CHECK-NEXT: %inc = add i32 [[T1]], 1
|
||||
; CHECK-NEXT: store i32 %inc, i32* [[T0]], align 4
|
||||
; CHECK-NEXT: call void @print(i32 %inc)
|
||||
; CHECK-NEXT: store i8* [[ERROR]], i8** %2
|
||||
; CHECK-NEXT: call void @maybeThrow(i8** nonnull swifterror %2)
|
||||
; CHECK-NEXT: [[T2:%.*]] = load i8*, i8** %2
|
||||
; CHECK-NEXT: call void @logError(i8* [[T2]])
|
||||
; CHECK-NEXT: store i8* [[T2]], i8** %2
|
||||
; CHECK-NEXT: ret i8* bitcast (i8* (i8*, i1, i8**)* @f.resume.0 to i8*)
|
||||
; CHECK: :
|
||||
; CHECK-NEXT: ret i8* null
|
||||
; CHECK-NEXT: }
|
||||
|
||||
define i8* @g(i8* %buffer, i32 %n) {
|
||||
; CHECK-LABEL: @g(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = alloca swifterror i8*, align 4
|
||||
; CHECK-NEXT: [[N_VAL_SPILL_ADDR:%.*]] = bitcast i8* [[BUFFER:%.*]] to i32*
|
||||
; CHECK-NEXT: store i32 [[N:%.*]], i32* [[N_VAL_SPILL_ADDR]], align 4
|
||||
; CHECK-NEXT: tail call void @print(i32 [[N]])
|
||||
; CHECK-NEXT: store i8* null, i8** [[TMP0]], align 4
|
||||
; CHECK-NEXT: call void @maybeThrow(i8** nonnull swifterror [[TMP0]])
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = load i8*, i8** [[TMP0]], align 4
|
||||
; CHECK-NEXT: [[DOTSPILL_ADDR:%.*]] = getelementptr inbounds i8, i8* [[BUFFER]], i32 4
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[DOTSPILL_ADDR]] to i8**
|
||||
; CHECK-NEXT: store i8* [[TMP1]], i8** [[TMP2]], align 4
|
||||
; CHECK-NEXT: call void @logError(i8* [[TMP1]])
|
||||
; CHECK-NEXT: ret i8* bitcast (i8* (i8*, i1)* @g.resume.0 to i8*)
|
||||
;
|
||||
entry:
|
||||
%errorslot = alloca swifterror i8*, align 4
|
||||
store i8* null, i8** %errorslot
|
||||
@ -86,44 +83,7 @@ cleanup:
|
||||
unreachable
|
||||
}
|
||||
|
||||
; CHECK-LABEL: define i8* @g(i8* %buffer, i32 %n)
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[ERRORSLOT:%.*]] = alloca swifterror i8*, align 4
|
||||
; CHECK-NEXT: [[T0:%.*]] = bitcast i8* %buffer to i32*
|
||||
; CHECK-NEXT: store i32 %n, i32* [[T0]], align 4
|
||||
; CHECK-NEXT: call void @print(i32 %n)
|
||||
; CHECK-NEXT: store i8* null, i8** [[ERRORSLOT]], align 4
|
||||
; CHECK-NEXT: call void @maybeThrow(i8** nonnull swifterror [[ERRORSLOT]])
|
||||
; CHECK-NEXT: [[T1:%.*]] = load i8*, i8** [[ERRORSLOT]], align 4
|
||||
; CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds i8, i8* %buffer, i32 4
|
||||
; CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to i8**
|
||||
; CHECK-NEXT: store i8* [[T1]], i8** [[T3]], align 4
|
||||
; CHECK-NEXT: call void @logError(i8* [[T1]])
|
||||
; CHECK-NEXT: ret i8* bitcast (i8* (i8*, i1)* @g.resume.0 to i8*)
|
||||
; CHECK-NEXT: }
|
||||
|
||||
; CHECK-LABEL: define internal i8* @g.resume.0(i8* {{.*}} %0, i1 zeroext %1)
|
||||
; CHECK-NEXT: :
|
||||
; CHECK-NEXT: [[ERRORSLOT:%.*]] = alloca swifterror i8*, align 4
|
||||
; CHECK-NEXT: br i1 %1,
|
||||
; CHECK: :
|
||||
; CHECK-NEXT: [[T0:%.*]] = bitcast i8* %0 to i32*
|
||||
; CHECK-NEXT: [[T1:%.*]] = load i32, i32* [[T0]], align 4
|
||||
; CHECK-NEXT: %inc = add i32 [[T1]], 1
|
||||
; CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds i8, i8* %0, i32 4
|
||||
; CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to i8**
|
||||
; CHECK-NEXT: [[T4:%.*]] = load i8*, i8** [[T3]]
|
||||
; CHECK-NEXT: store i32 %inc, i32* [[T0]], align 4
|
||||
; CHECK-NEXT: call void @print(i32 %inc)
|
||||
; CHECK-NEXT: store i8* [[T4]], i8** [[ERRORSLOT]]
|
||||
; CHECK-NEXT: call void @maybeThrow(i8** nonnull swifterror [[ERRORSLOT]])
|
||||
; CHECK-NEXT: [[T5:%.*]] = load i8*, i8** [[ERRORSLOT]]
|
||||
; CHECK-NEXT: store i8* [[T5]], i8** [[T3]], align 4
|
||||
; CHECK-NEXT: call void @logError(i8* [[T5]])
|
||||
; CHECK-NEXT: ret i8* bitcast (i8* (i8*, i1)* @g.resume.0 to i8*)
|
||||
; CHECK: :
|
||||
; CHECK-NEXT: ret i8* null
|
||||
; CHECK-NEXT: }
|
||||
|
||||
declare token @llvm.coro.id.retcon(i32, i32, i8*, i8*, i8*, i8*)
|
||||
declare i8* @llvm.coro.begin(token, i8*)
|
||||
|
Loading…
Reference in New Issue
Block a user