From cc3b86bd5cd3cefa8d0cf8c99ddef25ad89aeb2a Mon Sep 17 00:00:00 2001 From: Roman Lebedev Date: Fri, 2 Jul 2021 23:14:21 +0300 Subject: [PATCH] [NFC][Codegen] Tune a few tests to not end with a naked `unreachable` terminator These rely on the fact that currently simplifycfg won't really propagate said `unreachable`, but that is about to change. --- test/CodeGen/AArch64/pr33172.ll | 2 +- test/CodeGen/ARM/Windows/memset.ll | 2 +- test/CodeGen/ARM/machine-cse-cmp.ll | 3 +++ test/CodeGen/ARM/memfunc.ll | 18 +++++++++--------- .../Hexagon/branchfolder-keep-impdef.ll | 2 +- test/CodeGen/Hexagon/reg-scavengebug.ll | 4 ++-- 6 files changed, 17 insertions(+), 14 deletions(-) diff --git a/test/CodeGen/AArch64/pr33172.ll b/test/CodeGen/AArch64/pr33172.ll index 098d5358b02..e1b4cdc6603 100644 --- a/test/CodeGen/AArch64/pr33172.ll +++ b/test/CodeGen/AArch64/pr33172.ll @@ -22,7 +22,7 @@ entry: store i64 %wide.load8281058.4, i64* bitcast (float* getelementptr inbounds ([200 x float], [200 x float]* @main.x, i64 0, i64 16) to i64*), align 8 store i64 %wide.load8291059.4, i64* bitcast (float* getelementptr inbounds ([200 x float], [200 x float]* @main.x, i64 0, i64 18) to i64*), align 8 tail call void @llvm.memset.p0i8.i64(i8* align 8 bitcast ([200 x float]* @main.b to i8*), i8 0, i64 undef, i1 false) #2 - unreachable + ret void } ; Function Attrs: argmemonly nounwind diff --git a/test/CodeGen/ARM/Windows/memset.ll b/test/CodeGen/ARM/Windows/memset.ll index 8cb257c1566..d4d918a29c1 100644 --- a/test/CodeGen/ARM/Windows/memset.ll +++ b/test/CodeGen/ARM/Windows/memset.ll @@ -7,7 +7,7 @@ declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) nounwind define void @function() { entry: call void @llvm.memset.p0i8.i32(i8* bitcast ([512 x i8]* @source to i8*), i8 0, i32 512, i1 false) - unreachable + ret void } ; CHECK: movs r1, #0 diff --git a/test/CodeGen/ARM/machine-cse-cmp.ll b/test/CodeGen/ARM/machine-cse-cmp.ll index 49dbb03135f..ab5f58c27e7 100644 --- a/test/CodeGen/ARM/machine-cse-cmp.ll +++ b/test/CodeGen/ARM/machine-cse-cmp.ll @@ -1,6 +1,8 @@ ; RUN: llc < %s -mtriple=armv7-apple-ios | FileCheck %s ;rdar://8003725 +declare void @llvm.trap() + @G1 = external global i32 @G2 = external global i32 @@ -38,6 +40,7 @@ for.body.lr.ph: ; preds = %entry %1 = icmp sgt i32 %0, 1 %smax = select i1 %1, i32 %0, i32 1 call void @llvm.memset.p0i8.i32(i8* getelementptr inbounds ([250 x i8], [250 x i8]* @bar, i32 0, i32 0), i8 0, i32 %smax, i1 false) + call void @llvm.trap() unreachable for.cond1.preheader: ; preds = %entry diff --git a/test/CodeGen/ARM/memfunc.ll b/test/CodeGen/ARM/memfunc.ll index 0fe1f630c57..217b88a32de 100644 --- a/test/CodeGen/ARM/memfunc.ll +++ b/test/CodeGen/ARM/memfunc.ll @@ -94,7 +94,7 @@ entry: ; CHECK-GNUEABI: bl memset call void @llvm.memset.p0i8.i32(i8* align 8 %dest, i8 0, i32 500, i1 false) - unreachable + ret void } ; Check that alloca arguments to memory intrinsics are automatically aligned if at least 8 bytes in size @@ -140,7 +140,7 @@ entry: %2 = bitcast [9 x i8]* %arr2 to i8* call void @llvm.memset.p0i8.i32(i8* %2, i8 1, i32 %n, i1 false) - unreachable + ret void } ; Check that alloca arguments are not aligned if less than 8 bytes in size @@ -179,7 +179,7 @@ entry: %2 = bitcast [7 x i8]* %arr2 to i8* call void @llvm.memset.p0i8.i32(i8* %2, i8 1, i32 %n, i1 false) - unreachable + ret void } ; Check that alloca arguments are not aligned if size+offset is less than 8 bytes @@ -218,7 +218,7 @@ entry: %2 = getelementptr inbounds [9 x i8], [9 x i8]* %arr2, i32 0, i32 4 call void @llvm.memset.p0i8.i32(i8* %2, i8 1, i32 %n, i1 false) - unreachable + ret void } ; Check that alloca arguments are not aligned if the offset is not a multiple of 4 @@ -257,7 +257,7 @@ entry: %2 = getelementptr inbounds [13 x i8], [13 x i8]* %arr2, i32 0, i32 1 call void @llvm.memset.p0i8.i32(i8* %2, i8 1, i32 %n, i1 false) - unreachable + ret void } ; Check that alloca arguments are not aligned if the offset is unknown @@ -296,7 +296,7 @@ entry: %2 = getelementptr inbounds [13 x i8], [13 x i8]* %arr2, i32 0, i32 %i call void @llvm.memset.p0i8.i32(i8* %2, i8 1, i32 %n, i1 false) - unreachable + ret void } ; Check that alloca arguments are not aligned if the GEP is not inbounds @@ -335,7 +335,7 @@ entry: %2 = getelementptr [13 x i8], [13 x i8]* %arr2, i32 0, i32 4 call void @llvm.memset.p0i8.i32(i8* %2, i8 1, i32 %n, i1 false) - unreachable + ret void } ; Check that alloca arguments are not aligned when the offset is past the end of the allocation @@ -374,7 +374,7 @@ entry: %2 = getelementptr inbounds [13 x i8], [13 x i8]* %arr2, i32 0, i32 16 call void @llvm.memset.p0i8.i32(i8* %2, i8 1, i32 %n, i1 false) - unreachable + ret void } ; Check that global variables are aligned if they are large enough, but only if @@ -401,7 +401,7 @@ entry: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* getelementptr inbounds ([128 x i8], [128 x i8]* @arr8, i32 0, i32 0), i32 %n, i1 false) call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* getelementptr inbounds ([128 x i8], [128 x i8]* @arr9, i32 0, i32 0), i32 %n, i1 false) call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* getelementptr inbounds ([8 x i8], [8 x i8]* @arr10, i32 0, i32 0), i32 %n, i1 false) - unreachable + ret void } ; CHECK: {{\.data|\.section.+data}} diff --git a/test/CodeGen/Hexagon/branchfolder-keep-impdef.ll b/test/CodeGen/Hexagon/branchfolder-keep-impdef.ll index 777952724ff..db56e0a2faf 100644 --- a/test/CodeGen/Hexagon/branchfolder-keep-impdef.ll +++ b/test/CodeGen/Hexagon/branchfolder-keep-impdef.ll @@ -19,7 +19,7 @@ b2: ; preds = %b1, %b0 %t1 = phi i8* [ %t0, %b1 ], [ undef, %b0 ] %t2 = getelementptr inbounds i8, i8* %t1, i32 %p0 tail call void @llvm.memmove.p0i8.p0i8.i32(i8* undef, i8* %t2, i32 undef, i1 false) #1 - unreachable + ret void } declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) #0 diff --git a/test/CodeGen/Hexagon/reg-scavengebug.ll b/test/CodeGen/Hexagon/reg-scavengebug.ll index d53799bc4d1..b712d1556ce 100644 --- a/test/CodeGen/Hexagon/reg-scavengebug.ll +++ b/test/CodeGen/Hexagon/reg-scavengebug.ll @@ -155,10 +155,10 @@ b2: ; preds = %b1 %v120 = getelementptr <16 x i32>, <16 x i32>* %v2, i32 6 %v121 = tail call <16 x i32> @llvm.hexagon.V6.vshufoh(<16 x i32> undef, <16 x i32> undef) store <16 x i32> %v121, <16 x i32>* %v120, align 64, !tbaa !0 - unreachable + ret void b3: ; preds = %b1 - unreachable + ret void b4: ; preds = %b0 ret void