1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 11:13:28 +01:00

[LoopUnroll] Allow unrolling if the unrolled size does not exceed loop size.

Summary:
In the following cases, unrolling can be beneficial, even when
optimizing for code size:
 1) very low trip counts
 2) potential to constant fold most instructions after fully unrolling.

We can unroll in those cases, by setting the unrolling threshold to the
loop size. This might highlight some cost modeling issues and fixing
them will have a positive impact in general.

Reviewers: vsk, efriedma, dmgreen, paquette

Reviewed By: paquette

Differential Revision: https://reviews.llvm.org/D60265

llvm-svn: 358586
This commit is contained in:
Florian Hahn 2019-04-17 15:57:43 +00:00
parent 87c5f8ff0d
commit 08a6ae3810
2 changed files with 184 additions and 2 deletions

View File

@ -207,6 +207,7 @@ TargetTransformInfo::UnrollingPreferences llvm::gatherUnrollingPreferences(
if (OptForSize) {
UP.Threshold = UP.OptSizeThreshold;
UP.PartialThreshold = UP.PartialOptSizeThreshold;
UP.MaxPercentThresholdBoost = 100;
}
// Apply any user values specified by cl::opt
@ -993,6 +994,7 @@ static LoopUnrollResult tryToUnrollLoop(
if (OnlyWhenForced && !(TM & TM_Enable))
return LoopUnrollResult::Unmodified;
bool OptForSize = L->getHeader()->getParent()->hasOptSize();
unsigned NumInlineCandidates;
bool NotDuplicatable;
bool Convergent;
@ -1000,8 +1002,11 @@ static LoopUnrollResult tryToUnrollLoop(
L, SE, TTI, BFI, PSI, OptLevel, ProvidedThreshold, ProvidedCount,
ProvidedAllowPartial, ProvidedRuntime, ProvidedUpperBound,
ProvidedAllowPeeling);
// Exit early if unrolling is disabled.
if (UP.Threshold == 0 && (!UP.Partial || UP.PartialThreshold == 0))
// Exit early if unrolling is disabled. For OptForSize, we pick the loop size
// as threshold later on.
if (UP.Threshold == 0 && (!UP.Partial || UP.PartialThreshold == 0) &&
!OptForSize)
return LoopUnrollResult::Unmodified;
SmallPtrSet<const Value *, 32> EphValues;
@ -1016,6 +1021,12 @@ static LoopUnrollResult tryToUnrollLoop(
<< " instructions.\n");
return LoopUnrollResult::Unmodified;
}
// When optimizing for size, use LoopSize as threshold, to (fully) unroll
// loops, if it does not increase code size.
if (OptForSize)
UP.Threshold = std::max(UP.Threshold, LoopSize);
if (NumInlineCandidates != 0) {
LLVM_DEBUG(dbgs() << " Not unrolling loop with inlinable calls.\n");
return LoopUnrollResult::Unmodified;

View File

@ -0,0 +1,171 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -loop-unroll -mtriple=arm64-apple-iphoneos -S %s | FileCheck %s
; Check we unroll even with optsize, if the result is smaller, either because
; we have single iteration loops or bodies with constant folding opportunities
; after fully unrolling.
declare i32 @get()
define void @fully_unrolled_single_iteration(i32* %src) #0 {
; CHECK-LABEL: @fully_unrolled_single_iteration(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[ARR:%.*]] = alloca [4 x i32], align 4
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[V:%.*]] = load i32, i32* [[SRC:%.*]]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 0
; CHECK-NEXT: store i32 [[V]], i32* [[ARRAYIDX]], align 4
; CHECK-NEXT: [[PTR:%.*]] = bitcast [4 x i32]* [[ARR]] to i32*
; CHECK-NEXT: call void @use(i32* nonnull [[PTR]])
; CHECK-NEXT: ret void
;
entry:
%arr = alloca [4 x i32], align 4
br label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%src.idx = getelementptr inbounds i32, i32* %src, i64 %indvars.iv
%v = load i32, i32* %src.idx
%arrayidx = getelementptr inbounds [4 x i32], [4 x i32]* %arr, i64 0, i64 %indvars.iv
store i32 %v, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 1
br i1 %exitcond, label %for.cond.cleanup, label %for.body
for.cond.cleanup: ; preds = %for.cond
%ptr = bitcast [4 x i32]* %arr to i32*
call void @use(i32* nonnull %ptr) #4
ret void
}
define void @fully_unrolled_smaller() #0 {
; CHECK-LABEL: @fully_unrolled_smaller(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[ARR:%.*]] = alloca [4 x i32], align 4
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 0
; CHECK-NEXT: store i32 16, i32* [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 1
; CHECK-NEXT: store i32 4104, i32* [[ARRAYIDX_1]], align 4
; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 2
; CHECK-NEXT: store i32 1048592, i32* [[ARRAYIDX_2]], align 4
; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 3
; CHECK-NEXT: store i32 268435480, i32* [[ARRAYIDX_3]], align 4
; CHECK-NEXT: [[PTR:%.*]] = bitcast [4 x i32]* [[ARR]] to i32*
; CHECK-NEXT: call void @use(i32* nonnull [[PTR]])
; CHECK-NEXT: ret void
;
entry:
%arr = alloca [4 x i32], align 4
br label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%indvars.iv.tr = trunc i64 %indvars.iv to i32
%shl.0 = shl i32 %indvars.iv.tr, 3
%shl.1 = shl i32 16, %shl.0
%or = or i32 %shl.1, %shl.0
%arrayidx = getelementptr inbounds [4 x i32], [4 x i32]* %arr, i64 0, i64 %indvars.iv
store i32 %or, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv, 3
br i1 %exitcond, label %for.cond.cleanup, label %for.body
for.cond.cleanup: ; preds = %for.cond
%ptr = bitcast [4 x i32]* %arr to i32*
call void @use(i32* nonnull %ptr) #4
ret void
}
define void @fully_unrolled_smaller_Oz() #1 {
; CHECK-LABEL: @fully_unrolled_smaller_Oz(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[ARR:%.*]] = alloca [4 x i32], align 4
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 0
; CHECK-NEXT: store i32 16, i32* [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 1
; CHECK-NEXT: store i32 4104, i32* [[ARRAYIDX_1]], align 4
; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 2
; CHECK-NEXT: store i32 1048592, i32* [[ARRAYIDX_2]], align 4
; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 3
; CHECK-NEXT: store i32 268435480, i32* [[ARRAYIDX_3]], align 4
; CHECK-NEXT: [[PTR:%.*]] = bitcast [4 x i32]* [[ARR]] to i32*
; CHECK-NEXT: call void @use(i32* nonnull [[PTR]])
; CHECK-NEXT: ret void
;
entry:
%arr = alloca [4 x i32], align 4
br label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%indvars.iv.tr = trunc i64 %indvars.iv to i32
%shl.0 = shl i32 %indvars.iv.tr, 3
%shl.1 = shl i32 16, %shl.0
%or = or i32 %shl.1, %shl.0
%arrayidx = getelementptr inbounds [4 x i32], [4 x i32]* %arr, i64 0, i64 %indvars.iv
store i32 %or, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv, 3
br i1 %exitcond, label %for.cond.cleanup, label %for.body
for.cond.cleanup: ; preds = %for.cond
%ptr = bitcast [4 x i32]* %arr to i32*
call void @use(i32* nonnull %ptr) #4
ret void
}
define void @fully_unrolled_bigger() #0 {
; CHECK-LABEL: @fully_unrolled_bigger(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[ARR:%.*]] = alloca [4 x i32], align 4
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: [[INDVARS_IV_TR:%.*]] = trunc i64 [[INDVARS_IV]] to i32
; CHECK-NEXT: [[SHL_0:%.*]] = shl i32 [[INDVARS_IV_TR]], 3
; CHECK-NEXT: [[SHL_1:%.*]] = shl i32 16, [[SHL_0]]
; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHL_1]], [[SHL_0]]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 [[INDVARS_IV]]
; CHECK-NEXT: store i32 [[OR]], i32* [[ARRAYIDX]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV]], 6
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]]
; CHECK: for.cond.cleanup:
; CHECK-NEXT: [[PTR:%.*]] = bitcast [4 x i32]* [[ARR]] to i32*
; CHECK-NEXT: call void @use(i32* nonnull [[PTR]])
; CHECK-NEXT: ret void
;
entry:
%arr = alloca [4 x i32], align 4
br label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%indvars.iv.tr = trunc i64 %indvars.iv to i32
%shl.0 = shl i32 %indvars.iv.tr, 3
%shl.1 = shl i32 16, %shl.0
%or = or i32 %shl.1, %shl.0
%arrayidx = getelementptr inbounds [4 x i32], [4 x i32]* %arr, i64 0, i64 %indvars.iv
store i32 %or, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv, 6
br i1 %exitcond, label %for.cond.cleanup, label %for.body
for.cond.cleanup: ; preds = %for.cond
%ptr = bitcast [4 x i32]* %arr to i32*
call void @use(i32* nonnull %ptr) #4
ret void
}
declare void @use(i32*)
attributes #0 = { optsize }
attributes #1 = { minsize optsize }