1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 20:51:52 +01:00

[NFC][InstCombine] Autogenerate a few tests being affected by an upcoming patch

This commit is contained in:
Roman Lebedev 2020-10-03 22:46:50 +03:00
parent 9f25ff7107
commit 19460feaba
3 changed files with 290 additions and 121 deletions

View File

@ -1,3 +1,4 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S < %s -instcombine | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
@ -6,9 +7,11 @@ target triple = "x86_64-apple-macosx10.7.0"
; Check transforms involving atomic operations
define i32 @test1(i32* %p) {
; CHECK-LABEL: define i32 @test1(
; CHECK: %x = load atomic i32, i32* %p seq_cst, align 4
; CHECK: shl i32 %x, 1
; CHECK-LABEL: @test1(
; CHECK-NEXT: [[X:%.*]] = load atomic i32, i32* [[P:%.*]] seq_cst, align 4
; CHECK-NEXT: [[Z:%.*]] = shl i32 [[X]], 1
; CHECK-NEXT: ret i32 [[Z]]
;
%x = load atomic i32, i32* %p seq_cst, align 4
%y = load i32, i32* %p, align 4
%z = add i32 %x, %y
@ -16,9 +19,12 @@ define i32 @test1(i32* %p) {
}
define i32 @test2(i32* %p) {
; CHECK-LABEL: define i32 @test2(
; CHECK: %x = load volatile i32, i32* %p, align 4
; CHECK: %y = load volatile i32, i32* %p, align 4
; CHECK-LABEL: @test2(
; CHECK-NEXT: [[X:%.*]] = load volatile i32, i32* [[P:%.*]], align 4
; CHECK-NEXT: [[Y:%.*]] = load volatile i32, i32* [[P]], align 4
; CHECK-NEXT: [[Z:%.*]] = add i32 [[X]], [[Y]]
; CHECK-NEXT: ret i32 [[Z]]
;
%x = load volatile i32, i32* %p, align 4
%y = load volatile i32, i32* %p, align 4
%z = add i32 %x, %y
@ -29,8 +35,11 @@ define i32 @test2(i32* %p) {
; memory location are a bit unclear, but conservatively, we know we don't
; want to remove the volatile.
define i32 @test3(i32* %p) {
; CHECK-LABEL: define i32 @test3(
; CHECK: %x = load volatile i32, i32* %p, align 4
; CHECK-LABEL: @test3(
; CHECK-NEXT: [[X:%.*]] = load volatile i32, i32* [[P:%.*]], align 4
; CHECK-NEXT: [[Z:%.*]] = shl i32 [[X]], 1
; CHECK-NEXT: ret i32 [[Z]]
;
%x = load volatile i32, i32* %p, align 4
%y = load i32, i32* %p, align 4
%z = add i32 %x, %y
@ -39,21 +48,26 @@ define i32 @test3(i32* %p) {
; Forwarding from a stronger ordered atomic is fine
define i32 @test4(i32* %p) {
; CHECK-LABEL: define i32 @test4(
; CHECK: %x = load atomic i32, i32* %p seq_cst, align 4
; CHECK: shl i32 %x, 1
; CHECK-LABEL: @test4(
; CHECK-NEXT: [[X:%.*]] = load atomic i32, i32* [[P:%.*]] seq_cst, align 4
; CHECK-NEXT: [[Z:%.*]] = shl i32 [[X]], 1
; CHECK-NEXT: ret i32 [[Z]]
;
%x = load atomic i32, i32* %p seq_cst, align 4
%y = load atomic i32, i32* %p unordered, align 4
%z = add i32 %x, %y
ret i32 %z
}
; Forwarding from a non-atomic is not. (The earlier load
; could in priciple be promoted to atomic and then forwarded,
; Forwarding from a non-atomic is not. (The earlier load
; could in priciple be promoted to atomic and then forwarded,
; but we can't just drop the atomic from the load.)
define i32 @test5(i32* %p) {
; CHECK-LABEL: define i32 @test5(
; CHECK: %x = load atomic i32, i32* %p unordered, align 4
; CHECK-LABEL: @test5(
; CHECK-NEXT: [[X:%.*]] = load atomic i32, i32* [[P:%.*]] unordered, align 4
; CHECK-NEXT: [[Z:%.*]] = shl i32 [[X]], 1
; CHECK-NEXT: ret i32 [[Z]]
;
%x = load atomic i32, i32* %p unordered, align 4
%y = load i32, i32* %p, align 4
%z = add i32 %x, %y
@ -62,9 +76,11 @@ define i32 @test5(i32* %p) {
; Forwarding atomic to atomic is fine
define i32 @test6(i32* %p) {
; CHECK-LABEL: define i32 @test6(
; CHECK: %x = load atomic i32, i32* %p unordered, align 4
; CHECK: shl i32 %x, 1
; CHECK-LABEL: @test6(
; CHECK-NEXT: [[X:%.*]] = load atomic i32, i32* [[P:%.*]] unordered, align 4
; CHECK-NEXT: [[Z:%.*]] = shl i32 [[X]], 1
; CHECK-NEXT: ret i32 [[Z]]
;
%x = load atomic i32, i32* %p unordered, align 4
%y = load atomic i32, i32* %p unordered, align 4
%z = add i32 %x, %y
@ -73,9 +89,12 @@ define i32 @test6(i32* %p) {
; FIXME: we currently don't do anything for monotonic
define i32 @test7(i32* %p) {
; CHECK-LABEL: define i32 @test7(
; CHECK: %x = load atomic i32, i32* %p seq_cst, align 4
; CHECK: %y = load atomic i32, i32* %p monotonic, align 4
; CHECK-LABEL: @test7(
; CHECK-NEXT: [[X:%.*]] = load atomic i32, i32* [[P:%.*]] seq_cst, align 4
; CHECK-NEXT: [[Y:%.*]] = load atomic i32, i32* [[P]] monotonic, align 4
; CHECK-NEXT: [[Z:%.*]] = add i32 [[X]], [[Y]]
; CHECK-NEXT: ret i32 [[Z]]
;
%x = load atomic i32, i32* %p seq_cst, align 4
%y = load atomic i32, i32* %p monotonic, align 4
%z = add i32 %x, %y
@ -84,9 +103,12 @@ define i32 @test7(i32* %p) {
; FIXME: We could forward in racy code
define i32 @test8(i32* %p) {
; CHECK-LABEL: define i32 @test8(
; CHECK: %x = load atomic i32, i32* %p seq_cst, align 4
; CHECK: %y = load atomic i32, i32* %p acquire, align 4
; CHECK-LABEL: @test8(
; CHECK-NEXT: [[X:%.*]] = load atomic i32, i32* [[P:%.*]] seq_cst, align 4
; CHECK-NEXT: [[Y:%.*]] = load atomic i32, i32* [[P]] acquire, align 4
; CHECK-NEXT: [[Z:%.*]] = add i32 [[X]], [[Y]]
; CHECK-NEXT: ret i32 [[Z]]
;
%x = load atomic i32, i32* %p seq_cst, align 4
%y = load atomic i32, i32* %p acquire, align 4
%z = add i32 %x, %y
@ -96,45 +118,57 @@ define i32 @test8(i32* %p) {
; An unordered access to null is still unreachable. There's no
; ordering imposed.
define i32 @test9() {
; CHECK-LABEL: define i32 @test9(
; CHECK: store i32 undef, i32* null
; CHECK-LABEL: @test9(
; CHECK-NEXT: store i32 undef, i32* null, align 536870912
; CHECK-NEXT: ret i32 undef
;
%x = load atomic i32, i32* null unordered, align 4
ret i32 %x
}
define i32 @test9_no_null_opt() #0 {
; CHECK-LABEL: define i32 @test9_no_null_opt(
; CHECK: load atomic i32, i32* null unordered
; CHECK-LABEL: @test9_no_null_opt(
; CHECK-NEXT: [[X:%.*]] = load atomic i32, i32* null unordered, align 536870912
; CHECK-NEXT: ret i32 [[X]]
;
%x = load atomic i32, i32* null unordered, align 4
ret i32 %x
}
; FIXME: Could also fold
define i32 @test10() {
; CHECK-LABEL: define i32 @test10(
; CHECK: load atomic i32, i32* null monotonic
; CHECK-LABEL: @test10(
; CHECK-NEXT: [[X:%.*]] = load atomic i32, i32* null monotonic, align 536870912
; CHECK-NEXT: ret i32 [[X]]
;
%x = load atomic i32, i32* null monotonic, align 4
ret i32 %x
}
define i32 @test10_no_null_opt() #0 {
; CHECK-LABEL: define i32 @test10_no_null_opt(
; CHECK: load atomic i32, i32* null monotonic
; CHECK-LABEL: @test10_no_null_opt(
; CHECK-NEXT: [[X:%.*]] = load atomic i32, i32* null monotonic, align 536870912
; CHECK-NEXT: ret i32 [[X]]
;
%x = load atomic i32, i32* null monotonic, align 4
ret i32 %x
}
; Would this be legal to fold? Probably?
define i32 @test11() {
; CHECK-LABEL: define i32 @test11(
; CHECK: load atomic i32, i32* null seq_cst
; CHECK-LABEL: @test11(
; CHECK-NEXT: [[X:%.*]] = load atomic i32, i32* null seq_cst, align 536870912
; CHECK-NEXT: ret i32 [[X]]
;
%x = load atomic i32, i32* null seq_cst, align 4
ret i32 %x
}
define i32 @test11_no_null_opt() #0 {
; CHECK-LABEL: define i32 @test11_no_null_opt(
; CHECK: load atomic i32, i32* null seq_cst
; CHECK-LABEL: @test11_no_null_opt(
; CHECK-NEXT: [[X:%.*]] = load atomic i32, i32* null seq_cst, align 536870912
; CHECK-NEXT: ret i32 [[X]]
;
%x = load atomic i32, i32* null seq_cst, align 4
ret i32 %x
}
@ -142,45 +176,57 @@ define i32 @test11_no_null_opt() #0 {
; An unordered access to null is still unreachable. There's no
; ordering imposed.
define i32 @test12() {
; CHECK-LABEL: define i32 @test12(
; CHECK: store atomic i32 undef, i32* null
; CHECK-LABEL: @test12(
; CHECK-NEXT: store atomic i32 undef, i32* null unordered, align 536870912
; CHECK-NEXT: ret i32 0
;
store atomic i32 0, i32* null unordered, align 4
ret i32 0
}
define i32 @test12_no_null_opt() #0 {
; CHECK-LABEL: define i32 @test12_no_null_opt(
; CHECK: store atomic i32 0, i32* null unordered
; CHECK-LABEL: @test12_no_null_opt(
; CHECK-NEXT: store atomic i32 0, i32* null unordered, align 536870912
; CHECK-NEXT: ret i32 0
;
store atomic i32 0, i32* null unordered, align 4
ret i32 0
}
; FIXME: Could also fold
define i32 @test13() {
; CHECK-LABEL: define i32 @test13(
; CHECK: store atomic i32 0, i32* null monotonic
; CHECK-LABEL: @test13(
; CHECK-NEXT: store atomic i32 0, i32* null monotonic, align 536870912
; CHECK-NEXT: ret i32 0
;
store atomic i32 0, i32* null monotonic, align 4
ret i32 0
}
define i32 @test13_no_null_opt() #0 {
; CHECK-LABEL: define i32 @test13_no_null_opt(
; CHECK: store atomic i32 0, i32* null monotonic
; CHECK-LABEL: @test13_no_null_opt(
; CHECK-NEXT: store atomic i32 0, i32* null monotonic, align 536870912
; CHECK-NEXT: ret i32 0
;
store atomic i32 0, i32* null monotonic, align 4
ret i32 0
}
; Would this be legal to fold? Probably?
define i32 @test14() {
; CHECK-LABEL: define i32 @test14(
; CHECK: store atomic i32 0, i32* null seq_cst
; CHECK-LABEL: @test14(
; CHECK-NEXT: store atomic i32 0, i32* null seq_cst, align 536870912
; CHECK-NEXT: ret i32 0
;
store atomic i32 0, i32* null seq_cst, align 4
ret i32 0
}
define i32 @test14_no_null_opt() #0 {
; CHECK-LABEL: define i32 @test14_no_null_opt(
; CHECK: store atomic i32 0, i32* null seq_cst
; CHECK-LABEL: @test14_no_null_opt(
; CHECK-NEXT: store atomic i32 0, i32* null seq_cst, align 536870912
; CHECK-NEXT: ret i32 0
;
store atomic i32 0, i32* null seq_cst, align 4
ret i32 0
}
@ -189,9 +235,12 @@ define i32 @test14_no_null_opt() #0 {
@b = external global i32
define i32 @test15(i1 %cnd) {
; CHECK-LABEL: define i32 @test15(
; CHECK: load atomic i32, i32* @a unordered, align 4
; CHECK: load atomic i32, i32* @b unordered, align 4
; CHECK-LABEL: @test15(
; CHECK-NEXT: [[A_VAL:%.*]] = load atomic i32, i32* @a unordered, align 4
; CHECK-NEXT: [[B_VAL:%.*]] = load atomic i32, i32* @b unordered, align 4
; CHECK-NEXT: [[X:%.*]] = select i1 [[CND:%.*]], i32 [[A_VAL]], i32 [[B_VAL]]
; CHECK-NEXT: ret i32 [[X]]
;
%addr = select i1 %cnd, i32* @a, i32* @b
%x = load atomic i32, i32* %addr unordered, align 4
ret i32 %x
@ -199,8 +248,11 @@ define i32 @test15(i1 %cnd) {
; FIXME: This would be legal to transform
define i32 @test16(i1 %cnd) {
; CHECK-LABEL: define i32 @test16(
; CHECK: load atomic i32, i32* %addr monotonic, align 4
; CHECK-LABEL: @test16(
; CHECK-NEXT: [[ADDR:%.*]] = select i1 [[CND:%.*]], i32* @a, i32* @b
; CHECK-NEXT: [[X:%.*]] = load atomic i32, i32* [[ADDR]] monotonic, align 4
; CHECK-NEXT: ret i32 [[X]]
;
%addr = select i1 %cnd, i32* @a, i32* @b
%x = load atomic i32, i32* %addr monotonic, align 4
ret i32 %x
@ -208,17 +260,28 @@ define i32 @test16(i1 %cnd) {
; FIXME: This would be legal to transform
define i32 @test17(i1 %cnd) {
; CHECK-LABEL: define i32 @test17(
; CHECK: load atomic i32, i32* %addr seq_cst, align 4
; CHECK-LABEL: @test17(
; CHECK-NEXT: [[ADDR:%.*]] = select i1 [[CND:%.*]], i32* @a, i32* @b
; CHECK-NEXT: [[X:%.*]] = load atomic i32, i32* [[ADDR]] seq_cst, align 4
; CHECK-NEXT: ret i32 [[X]]
;
%addr = select i1 %cnd, i32* @a, i32* @b
%x = load atomic i32, i32* %addr seq_cst, align 4
ret i32 %x
}
define i32 @test22(i1 %cnd) {
; CHECK-LABEL: define i32 @test22(
; CHECK: [[PHI:%.*]] = phi i32
; CHECK: store atomic i32 [[PHI]], i32* @a unordered, align 4
; CHECK-LABEL: @test22(
; CHECK-NEXT: br i1 [[CND:%.*]], label [[BLOCK1:%.*]], label [[BLOCK2:%.*]]
; CHECK: block1:
; CHECK-NEXT: br label [[MERGE:%.*]]
; CHECK: block2:
; CHECK-NEXT: br label [[MERGE]]
; CHECK: merge:
; CHECK-NEXT: [[STOREMERGE:%.*]] = phi i32 [ 2, [[BLOCK2]] ], [ 1, [[BLOCK1]] ]
; CHECK-NEXT: store atomic i32 [[STOREMERGE]], i32* @a unordered, align 4
; CHECK-NEXT: ret i32 0
;
br i1 %cnd, label %block1, label %block2
block1:
@ -234,8 +297,17 @@ merge:
; TODO: probably also legal here
define i32 @test23(i1 %cnd) {
; CHECK-LABEL: define i32 @test23(
; CHECK: br i1 %cnd, label %block1, label %block2
; CHECK-LABEL: @test23(
; CHECK-NEXT: br i1 [[CND:%.*]], label [[BLOCK1:%.*]], label [[BLOCK2:%.*]]
; CHECK: block1:
; CHECK-NEXT: store atomic i32 1, i32* @a monotonic, align 4
; CHECK-NEXT: br label [[MERGE:%.*]]
; CHECK: block2:
; CHECK-NEXT: store atomic i32 2, i32* @a monotonic, align 4
; CHECK-NEXT: br label [[MERGE]]
; CHECK: merge:
; CHECK-NEXT: ret i32 0
;
br i1 %cnd, label %block1, label %block2
block1:
@ -252,9 +324,14 @@ merge:
declare void @clobber()
define i32 @test18(float* %p) {
; CHECK-LABEL: define i32 @test18(
; CHECK: load atomic i32, i32* [[A:%.*]] unordered, align 4
; CHECK: store atomic i32 [[B:%.*]], i32* [[C:%.*]] unordered, align 4
; CHECK-LABEL: @test18(
; CHECK-NEXT: [[TMP1:%.*]] = bitcast float* [[P:%.*]] to i32*
; CHECK-NEXT: [[X1:%.*]] = load atomic i32, i32* [[TMP1]] unordered, align 4
; CHECK-NEXT: call void @clobber()
; CHECK-NEXT: [[TMP2:%.*]] = bitcast float* [[P]] to i32*
; CHECK-NEXT: store atomic i32 [[X1]], i32* [[TMP2]] unordered, align 4
; CHECK-NEXT: ret i32 0
;
%x = load atomic float, float* %p unordered, align 4
call void @clobber() ;; keep the load around
store atomic float %x, float* %p unordered, align 4
@ -263,9 +340,12 @@ define i32 @test18(float* %p) {
; TODO: probably also legal in this case
define i32 @test19(float* %p) {
; CHECK-LABEL: define i32 @test19(
; CHECK: load atomic float, float* %p seq_cst, align 4
; CHECK: store atomic float %x, float* %p seq_cst, align 4
; CHECK-LABEL: @test19(
; CHECK-NEXT: [[X:%.*]] = load atomic float, float* [[P:%.*]] seq_cst, align 4
; CHECK-NEXT: call void @clobber()
; CHECK-NEXT: store atomic float [[X]], float* [[P]] seq_cst, align 4
; CHECK-NEXT: ret i32 0
;
%x = load atomic float, float* %p seq_cst, align 4
call void @clobber() ;; keep the load around
store atomic float %x, float* %p seq_cst, align 4
@ -273,57 +353,74 @@ define i32 @test19(float* %p) {
}
define i32 @test20(i32** %p, i8* %v) {
; CHECK-LABEL: define i32 @test20(
; CHECK: store atomic i8* %v, i8** [[D:%.*]] unordered, align 4
; CHECK-LABEL: @test20(
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32** [[P:%.*]] to i8**
; CHECK-NEXT: store atomic i8* [[V:%.*]], i8** [[TMP1]] unordered, align 4
; CHECK-NEXT: ret i32 0
;
%cast = bitcast i8* %v to i32*
store atomic i32* %cast, i32** %p unordered, align 4
ret i32 0
}
define i32 @test21(i32** %p, i8* %v) {
; CHECK-LABEL: define i32 @test21(
; CHECK: store atomic i32* %cast, i32** %p monotonic, align 4
; CHECK-LABEL: @test21(
; CHECK-NEXT: [[CAST:%.*]] = bitcast i8* [[V:%.*]] to i32*
; CHECK-NEXT: store atomic i32* [[CAST]], i32** [[P:%.*]] monotonic, align 4
; CHECK-NEXT: ret i32 0
;
%cast = bitcast i8* %v to i32*
store atomic i32* %cast, i32** %p monotonic, align 4
ret i32 0
}
define void @pr27490a(i8** %p1, i8** %p2) {
; CHECK-LABEL: define void @pr27490
; CHECK: %1 = bitcast i8** %p1 to i64*
; CHECK: %l1 = load i64, i64* %1, align 8
; CHECK: %2 = bitcast i8** %p2 to i64*
; CHECK: store volatile i64 %l1, i64* %2, align 8
; CHECK-LABEL: @pr27490a(
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8** [[P1:%.*]] to i64*
; CHECK-NEXT: [[L1:%.*]] = load i64, i64* [[TMP1]], align 8
; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8** [[P2:%.*]] to i64*
; CHECK-NEXT: store volatile i64 [[L1]], i64* [[TMP2]], align 8
; CHECK-NEXT: ret void
;
%l = load i8*, i8** %p1
store volatile i8* %l, i8** %p2
ret void
}
define void @pr27490b(i8** %p1, i8** %p2) {
; CHECK-LABEL: define void @pr27490
; CHECK: %1 = bitcast i8** %p1 to i64*
; CHECK: %l1 = load i64, i64* %1, align 8
; CHECK: %2 = bitcast i8** %p2 to i64*
; CHECK: store atomic i64 %l1, i64* %2 seq_cst, align 8
; CHECK-LABEL: @pr27490b(
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8** [[P1:%.*]] to i64*
; CHECK-NEXT: [[L1:%.*]] = load i64, i64* [[TMP1]], align 8
; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8** [[P2:%.*]] to i64*
; CHECK-NEXT: store atomic i64 [[L1]], i64* [[TMP2]] seq_cst, align 8
; CHECK-NEXT: ret void
;
%l = load i8*, i8** %p1
store atomic i8* %l, i8** %p2 seq_cst, align 8
ret void
}
;; At the moment, we can't form atomic vectors by folding since these are
;; At the moment, we can't form atomic vectors by folding since these are
;; not representable in the IR. This was pr29121. The right long term
;; solution is to extend the IR to handle this case.
define <2 x float> @no_atomic_vector_load(i64* %p) {
; CHECK-LABEL: @no_atomic_vector_load
; CHECK: load atomic i64, i64* %p unordered, align 8
; CHECK-LABEL: @no_atomic_vector_load(
; CHECK-NEXT: [[LOAD:%.*]] = load atomic i64, i64* [[P:%.*]] unordered, align 8
; CHECK-NEXT: [[DOTCAST:%.*]] = bitcast i64 [[LOAD]] to <2 x float>
; CHECK-NEXT: ret <2 x float> [[DOTCAST]]
;
%load = load atomic i64, i64* %p unordered, align 8
%.cast = bitcast i64 %load to <2 x float>
ret <2 x float> %.cast
}
define void @no_atomic_vector_store(<2 x float> %p, i8* %p2) {
; CHECK-LABEL: @no_atomic_vector_store
; CHECK: store atomic i64 %1, i64* %2 unordered, align 8
; CHECK-LABEL: @no_atomic_vector_store(
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x float> [[P:%.*]] to i64
; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[P2:%.*]] to i64*
; CHECK-NEXT: store atomic i64 [[TMP1]], i64* [[TMP2]] unordered, align 8
; CHECK-NEXT: ret void
;
%1 = bitcast <2 x float> %p to i64
%2 = bitcast i8* %p2 to i64*
store atomic i64 %1, i64* %2 unordered, align 8

View File

@ -1,3 +1,4 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -instcombine -S < %s | FileCheck %s
target datalayout = "e-m:e-p:64:64:64-i64:64-f80:128-n8:16:32:64-S128"
@ -5,7 +6,11 @@ target datalayout = "e-m:e-p:64:64:64-i64:64-f80:128-n8:16:32:64-S128"
define i32 @test_load_cast_combine_tbaa(float* %ptr) {
; Ensure (cast (load (...))) -> (load (cast (...))) preserves TBAA.
; CHECK-LABEL: @test_load_cast_combine_tbaa(
; CHECK: load i32, i32* %{{.*}}, !tbaa !0
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[PTR:%.*]] to i32*
; CHECK-NEXT: [[L1:%.*]] = load i32, i32* [[TMP0]], align 4, [[TBAA0:!tbaa !.*]]
; CHECK-NEXT: ret i32 [[L1]]
;
entry:
%l = load float, float* %ptr, !tbaa !0
%c = bitcast float %l to i32
@ -15,7 +20,11 @@ entry:
define i32 @test_load_cast_combine_noalias(float* %ptr) {
; Ensure (cast (load (...))) -> (load (cast (...))) preserves no-alias metadata.
; CHECK-LABEL: @test_load_cast_combine_noalias(
; CHECK: load i32, i32* %{{.*}}, !alias.scope !3, !noalias !4
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[PTR:%.*]] to i32*
; CHECK-NEXT: [[L1:%.*]] = load i32, i32* [[TMP0]], align 4, !alias.scope !3, !noalias !4
; CHECK-NEXT: ret i32 [[L1]]
;
entry:
%l = load float, float* %ptr, !alias.scope !3, !noalias !4
%c = bitcast float %l to i32
@ -27,9 +36,11 @@ define float @test_load_cast_combine_range(i32* %ptr) {
; would be nice to preserve or update it somehow but this is hard when moving
; between types.
; CHECK-LABEL: @test_load_cast_combine_range(
; CHECK: load float, float* %{{.*}}
; CHECK-NOT: !range
; CHECK: ret float
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[PTR:%.*]] to float*
; CHECK-NEXT: [[L1:%.*]] = load float, float* [[TMP0]], align 4
; CHECK-NEXT: ret float [[L1]]
;
entry:
%l = load i32, i32* %ptr, !range !5
%c = bitcast i32 %l to float
@ -39,7 +50,11 @@ entry:
define i32 @test_load_cast_combine_invariant(float* %ptr) {
; Ensure (cast (load (...))) -> (load (cast (...))) preserves invariant metadata.
; CHECK-LABEL: @test_load_cast_combine_invariant(
; CHECK: load i32, i32* %{{.*}}, !invariant.load !7
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[PTR:%.*]] to i32*
; CHECK-NEXT: [[L1:%.*]] = load i32, i32* [[TMP0]], align 4, !invariant.load !7
; CHECK-NEXT: ret i32 [[L1]]
;
entry:
%l = load float, float* %ptr, !invariant.load !6
%c = bitcast float %l to i32
@ -50,7 +65,11 @@ define i32 @test_load_cast_combine_nontemporal(float* %ptr) {
; Ensure (cast (load (...))) -> (load (cast (...))) preserves nontemporal
; metadata.
; CHECK-LABEL: @test_load_cast_combine_nontemporal(
; CHECK: load i32, i32* %{{.*}}, !nontemporal !8
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[PTR:%.*]] to i32*
; CHECK-NEXT: [[L1:%.*]] = load i32, i32* [[TMP0]], align 4, !nontemporal !8
; CHECK-NEXT: ret i32 [[L1]]
;
entry:
%l = load float, float* %ptr, !nontemporal !7
%c = bitcast float %l to i32
@ -61,7 +80,11 @@ define i8* @test_load_cast_combine_align(i32** %ptr) {
; Ensure (cast (load (...))) -> (load (cast (...))) preserves align
; metadata.
; CHECK-LABEL: @test_load_cast_combine_align(
; CHECK: load i8*, i8** %{{.*}}, !align !9
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32** [[PTR:%.*]] to i8**
; CHECK-NEXT: [[L1:%.*]] = load i8*, i8** [[TMP0]], align 8, !align !9
; CHECK-NEXT: ret i8* [[L1]]
;
entry:
%l = load i32*, i32** %ptr, !align !8
%c = bitcast i32* %l to i8*
@ -72,7 +95,11 @@ define i8* @test_load_cast_combine_deref(i32** %ptr) {
; Ensure (cast (load (...))) -> (load (cast (...))) preserves dereferenceable
; metadata.
; CHECK-LABEL: @test_load_cast_combine_deref(
; CHECK: load i8*, i8** %{{.*}}, !dereferenceable !9
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32** [[PTR:%.*]] to i8**
; CHECK-NEXT: [[L1:%.*]] = load i8*, i8** [[TMP0]], align 8, !dereferenceable !9
; CHECK-NEXT: ret i8* [[L1]]
;
entry:
%l = load i32*, i32** %ptr, !dereferenceable !8
%c = bitcast i32* %l to i8*
@ -83,7 +110,11 @@ define i8* @test_load_cast_combine_deref_or_null(i32** %ptr) {
; Ensure (cast (load (...))) -> (load (cast (...))) preserves
; dereferenceable_or_null metadata.
; CHECK-LABEL: @test_load_cast_combine_deref_or_null(
; CHECK: load i8*, i8** %{{.*}}, !dereferenceable_or_null !9
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32** [[PTR:%.*]] to i8**
; CHECK-NEXT: [[L1:%.*]] = load i8*, i8** [[TMP0]], align 8, !dereferenceable_or_null !9
; CHECK-NEXT: ret i8* [[L1]]
;
entry:
%l = load i32*, i32** %ptr, !dereferenceable_or_null !8
%c = bitcast i32* %l to i8*
@ -94,7 +125,23 @@ define void @test_load_cast_combine_loop(float* %src, i32* %dst, i32 %n) {
; Ensure (cast (load (...))) -> (load (cast (...))) preserves loop access
; metadata.
; CHECK-LABEL: @test_load_cast_combine_loop(
; CHECK: load i32, i32* %{{.*}}, !llvm.access.group !6
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[I:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[I_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[I]] to i64
; CHECK-NEXT: [[SRC_GEP:%.*]] = getelementptr inbounds float, float* [[SRC:%.*]], i64 [[TMP0]]
; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[I]] to i64
; CHECK-NEXT: [[DST_GEP:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i64 [[TMP1]]
; CHECK-NEXT: [[TMP2:%.*]] = bitcast float* [[SRC_GEP]] to i32*
; CHECK-NEXT: [[L1:%.*]] = load i32, i32* [[TMP2]], align 4, !llvm.access.group !6
; CHECK-NEXT: store i32 [[L1]], i32* [[DST_GEP]], align 4
; CHECK-NEXT: [[I_NEXT]] = add i32 [[I]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[I_NEXT]], [[N:%.*]]
; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT:%.*]], [[LOOP1:!llvm.loop !.*]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
entry:
br label %loop
@ -126,9 +173,14 @@ define void @test_load_cast_combine_nonnull(float** %ptr) {
; file, and no LABEL lines are to be added after this point.
;
; CHECK-LABEL: @test_load_cast_combine_nonnull(
; CHECK: %[[V:.*]] = load i64, i64* %{{.*}}, !range ![[MD:[0-9]+]]
; CHECK-NOT: !nonnull
; CHECK: store i64 %[[V]], i64*
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = bitcast float** [[PTR:%.*]] to i64*
; CHECK-NEXT: [[P1:%.*]] = load i64, i64* [[TMP0]], align 8, !range ![[MD:[0-9]+]]
; CHECK-NEXT: [[GEP:%.*]] = getelementptr float*, float** [[PTR]], i64 42
; CHECK-NEXT: [[TMP1:%.*]] = bitcast float** [[GEP]] to i64*
; CHECK-NEXT: store i64 [[P1]], i64* [[TMP1]], align 8
; CHECK-NEXT: ret void
;
entry:
%p = load float*, float** %ptr, !nonnull !6
%gep = getelementptr float*, float** %ptr, i32 42

View File

@ -1,3 +1,4 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -instcombine -S < %s | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128-ni:4"
@ -5,16 +6,17 @@ target triple = "x86_64-unknown-linux-gnu"
define i8 addrspace(4)* @f_0() {
; CHECK-LABEL: @f_0(
; CHECK: ret i8 addrspace(4)* getelementptr (i8, i8 addrspace(4)* null, i64 50)
; CHECK-NEXT: ret i8 addrspace(4)* getelementptr (i8, i8 addrspace(4)* null, i64 50)
;
%result = getelementptr i8, i8 addrspace(4)* null, i64 50
ret i8 addrspace(4)* %result
}
define i8 addrspace(3)* @f_1() {
; inttoptr is fine here since addrspace(3) is integral.
; CHECK-LABEL: @f_1(
; CHECK: ret i8 addrspace(3)* inttoptr (i64 50 to i8 addrspace(3)*)
; CHECK-NEXT: ret i8 addrspace(3)* inttoptr (i64 50 to i8 addrspace(3)*)
;
%result = getelementptr i8, i8 addrspace(3)* null, i64 50
ret i8 addrspace(3)* %result
}
@ -22,13 +24,13 @@ define i8 addrspace(3)* @f_1() {
define void @f_2(i8 addrspace(4)** %ptr0, i8 addrspace(4)** %ptr1) {
; It is not okay to convert the load/store pair to load and store
; integers, since pointers in address space 4 are non-integral.
; CHECK-LABEL: @f_2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[VAL:%.*]] = load i8 addrspace(4)*, i8 addrspace(4)** [[PTR0:%.*]], align 8
; CHECK-NEXT: store i8 addrspace(4)* [[VAL]], i8 addrspace(4)** [[PTR1:%.*]], align 8
; CHECK-NEXT: ret void
;
entry:
; CHECK: %val = load i8 addrspace(4)*, i8 addrspace(4)** %ptr0, align 8
; CHECK: store i8 addrspace(4)* %val, i8 addrspace(4)** %ptr1, align 8
; CHECK-NOT: load i64
; CHECK-NOT: store i64
%val = load i8 addrspace(4)*, i8 addrspace(4)** %ptr0
store i8 addrspace(4)* %val, i8 addrspace(4)** %ptr1
ret void
@ -37,44 +39,60 @@ entry:
define void @f_3(i8 addrspace(3)** %ptr0, i8 addrspace(3)** %ptr1) {
; It *is* okay to convert the load/store pair to load and store
; integers, since pointers in address space 3 are integral.
; CHECK-LABEL: @f_3(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = bitcast i8 addrspace(3)** [[PTR0:%.*]] to i64*
; CHECK-NEXT: [[VAL1:%.*]] = load i64, i64* [[TMP0]], align 8
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 addrspace(3)** [[PTR1:%.*]] to i64*
; CHECK-NEXT: store i64 [[VAL1]], i64* [[TMP1]], align 8
; CHECK-NEXT: ret void
;
entry:
; CHECK: load i64
; CHECK: store i64
%val = load i8 addrspace(3)*, i8 addrspace(3)** %ptr0
store i8 addrspace(3)* %val, i8 addrspace(3)** %ptr1
ret void
}
define i64 @g(i8 addrspace(4)** %gp) {
; CHECK-LABEL: @g(
; CHECK: load
; CHECK-LABEL: @g(
; CHECK-NEXT: [[DOTPRE:%.*]] = load i8 addrspace(4)*, i8 addrspace(4)** [[GP:%.*]], align 8
; CHECK-NEXT: [[V74:%.*]] = call i8 addrspace(4)* @alloc()
; CHECK-NEXT: [[V77:%.*]] = getelementptr i8, i8 addrspace(4)* [[V74]], i64 -8
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8 addrspace(4)* [[V77]] to i8 addrspace(4)* addrspace(4)*
; CHECK-NEXT: [[TMP2:%.*]] = addrspacecast i8 addrspace(4)* addrspace(4)* [[TMP1]] to i8 addrspace(4)**
; CHECK-NEXT: store i8 addrspace(4)* [[DOTPRE]], i8 addrspace(4)** [[TMP2]], align 8
; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8 addrspace(4)* [[V77]] to i64 addrspace(4)*
; CHECK-NEXT: [[V80:%.*]] = addrspacecast i64 addrspace(4)* [[TMP3]] to i64*
; CHECK-NEXT: [[V81:%.*]] = load i64, i64* [[V80]], align 8
; CHECK-NEXT: ret i64 [[V81]]
;
%.pre = load i8 addrspace(4)*, i8 addrspace(4)** %gp, align 8
%v74 = call i8 addrspace(4)* @alloc()
%v75 = addrspacecast i8 addrspace(4)* %v74 to i8*
%v76 = bitcast i8* %v75 to i8 addrspace(4)**
%v77 = getelementptr i8 addrspace(4)*, i8 addrspace(4)** %v76, i64 -1
; CHECK: store
store i8 addrspace(4)* %.pre, i8 addrspace(4)** %v77, align 8
%v80 = bitcast i8 addrspace(4)** %v77 to i64*
; CHECK: load
; CHECK-NOT: ptrtoint
%v81 = load i64, i64* %v80, align 8
ret i64 %v81
}
define i64 @g2(i8* addrspace(4)* %gp) {
; CHECK-LABEL: @g2(
; CHECK: load
; CHECK-LABEL: @g2(
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* addrspace(4)* [[GP:%.*]] to i64 addrspace(4)*
; CHECK-NEXT: [[DOTPRE1:%.*]] = load i64, i64 addrspace(4)* [[TMP1]], align 8
; CHECK-NEXT: [[V74:%.*]] = call i8 addrspace(4)* @alloc()
; CHECK-NEXT: [[V77:%.*]] = getelementptr i8, i8 addrspace(4)* [[V74]], i64 -8
; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8 addrspace(4)* [[V77]] to i64 addrspace(4)*
; CHECK-NEXT: store i64 [[DOTPRE1]], i64 addrspace(4)* [[TMP2]], align 8
; CHECK-NEXT: ret i64 [[DOTPRE1]]
;
%.pre = load i8*, i8* addrspace(4)* %gp, align 8
%v74 = call i8 addrspace(4)* @alloc()
%v76 = bitcast i8 addrspace(4)* %v74 to i8* addrspace(4)*
%v77 = getelementptr i8*, i8* addrspace(4)* %v76, i64 -1
; CHECK: store
store i8* %.pre, i8* addrspace(4)* %v77, align 8
%v80 = bitcast i8* addrspace(4)* %v77 to i64 addrspace(4)*
; CHECK-NOT: store
%v81 = load i64, i64 addrspace(4)* %v80, align 8
ret i64 %v81
}
@ -82,8 +100,10 @@ define i64 @g2(i8* addrspace(4)* %gp) {
declare i8 addrspace(4)* @alloc()
define i64 @f_4(i8 addrspace(4)* %v0) {
; CHECK-LABEL: @f_4(
; CHECK-NOT: ptrtoint
; CHECK-LABEL: @f_4(
; CHECK-NEXT: [[V6:%.*]] = call i64 bitcast (i64 (i64)* @f_5 to i64 (i8 addrspace(4)*)*)(i8 addrspace(4)* [[V0:%.*]])
; CHECK-NEXT: ret i64 [[V6]]
;
%v5 = bitcast i64 (i64)* @f_5 to i64 (i8 addrspace(4)*)*
%v6 = call i64 %v5(i8 addrspace(4)* %v0)
ret i64 %v6