mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-26 04:32:44 +01:00
3f23d4b8c3
tryLatency compares two sched candidates. For the top zone it prefers the one with lesser depth, but only if that depth is greater than the total latency of the instructions we've already scheduled -- otherwise its latency would be hidden and there would be no stall. Unfortunately it only tests the depth of one of the candidates. This can lead to situations where the TopDepthReduce heuristic does not kick in, but a lower priority heuristic chooses the other candidate, whose depth *is* greater than the already scheduled latency, which causes a stall. The fix is to apply the heuristic if the depth of *either* candidate is greater than the already scheduled latency. All this also applies to the BotHeightReduce heuristic in the bottom zone. Differential Revision: https://reviews.llvm.org/D72392
126 lines
4.2 KiB
LLVM
126 lines
4.2 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu \
|
|
; RUN: -mcpu=pwr9 < %s | FileCheck %s
|
|
define dso_local i64 @test1(i8* nocapture readonly %p, i32 signext %count) local_unnamed_addr #0 {
|
|
; CHECK-LABEL: test1:
|
|
; CHECK: # %bb.0: # %entry
|
|
; CHECK-NEXT: li 5, -13
|
|
; CHECK-NEXT: li 6, 7
|
|
; CHECK-NEXT: li 7, 11
|
|
; CHECK-NEXT: li 8, 15
|
|
; CHECK-NEXT: lxvx 0, 3, 5
|
|
; CHECK-NEXT: li 5, 19
|
|
; CHECK-NEXT: ldx 6, 3, 6
|
|
; CHECK-NEXT: ldx 7, 3, 7
|
|
; CHECK-NEXT: lxvx 1, 3, 5
|
|
; CHECK-NEXT: li 5, 3
|
|
; CHECK-NEXT: ldx 5, 3, 5
|
|
; CHECK-NEXT: ldx 3, 3, 8
|
|
; CHECK-NEXT: mfvsrld 9, 0
|
|
; CHECK-NEXT: mffprd 8, 0
|
|
; CHECK-NEXT: mfvsrld 10, 1
|
|
; CHECK-NEXT: mffprd 11, 1
|
|
; CHECK-NEXT: mulld 8, 9, 8
|
|
; CHECK-NEXT: mulld 5, 8, 5
|
|
; CHECK-NEXT: mulld 5, 5, 10
|
|
; CHECK-NEXT: mulld 5, 5, 11
|
|
; CHECK-NEXT: mulld 5, 5, 6
|
|
; CHECK-NEXT: mulld 5, 5, 7
|
|
; CHECK-NEXT: maddld 3, 5, 3, 4
|
|
; CHECK-NEXT: blr
|
|
entry:
|
|
%add.ptr = getelementptr inbounds i8, i8* %p, i64 -13
|
|
%0 = bitcast i8* %add.ptr to <2 x i64>*
|
|
%1 = load <2 x i64>, <2 x i64>* %0, align 16
|
|
%add.ptr1 = getelementptr inbounds i8, i8* %p, i64 19
|
|
%2 = bitcast i8* %add.ptr1 to <2 x i64>*
|
|
%3 = load <2 x i64>, <2 x i64>* %2, align 16
|
|
%add.ptr3 = getelementptr inbounds i8, i8* %p, i64 3
|
|
%4 = bitcast i8* %add.ptr3 to i64*
|
|
%5 = load i64, i64* %4, align 8
|
|
%add.ptr5 = getelementptr inbounds i8, i8* %p, i64 7
|
|
%6 = bitcast i8* %add.ptr5 to i64*
|
|
%7 = load i64, i64* %6, align 8
|
|
%add.ptr7 = getelementptr inbounds i8, i8* %p, i64 11
|
|
%8 = bitcast i8* %add.ptr7 to i64*
|
|
%9 = load i64, i64* %8, align 8
|
|
%add.ptr9 = getelementptr inbounds i8, i8* %p, i64 15
|
|
%10 = bitcast i8* %add.ptr9 to i64*
|
|
%11 = load i64, i64* %10, align 8
|
|
%vecext = extractelement <2 x i64> %1, i32 1
|
|
%vecext13 = extractelement <2 x i64> %1, i32 0
|
|
%vecext15 = extractelement <2 x i64> %3, i32 0
|
|
%vecext17 = extractelement <2 x i64> %3, i32 1
|
|
%mul = mul i64 %vecext13, %vecext
|
|
%mul10 = mul i64 %mul, %5
|
|
%mul11 = mul i64 %mul10, %vecext15
|
|
%mul12 = mul i64 %mul11, %vecext17
|
|
%mul14 = mul i64 %mul12, %7
|
|
%mul16 = mul i64 %mul14, %9
|
|
%mul18 = mul i64 %mul16, %11
|
|
%conv = sext i32 %count to i64
|
|
%add19 = add i64 %mul18, %conv
|
|
ret i64 %add19
|
|
}
|
|
|
|
define dso_local i64 @test2(i8* nocapture readonly %p, i32 signext %count) local_unnamed_addr #0 {
|
|
; CHECK-LABEL: test2:
|
|
; CHECK: # %bb.0: # %entry
|
|
; CHECK-NEXT: li 5, 0
|
|
; CHECK-NEXT: ori 6, 5, 40009
|
|
; CHECK-NEXT: ori 7, 5, 40001
|
|
; CHECK-NEXT: ori 5, 5, 40005
|
|
; CHECK-NEXT: ldx 6, 3, 6
|
|
; CHECK-NEXT: ldx 7, 3, 7
|
|
; CHECK-NEXT: ldx 3, 3, 5
|
|
; CHECK-NEXT: mulld 5, 7, 6
|
|
; CHECK-NEXT: maddld 3, 5, 3, 4
|
|
; CHECK-NEXT: blr
|
|
entry:
|
|
%add.ptr = getelementptr inbounds i8, i8* %p, i64 40009
|
|
%0 = bitcast i8* %add.ptr to i64*
|
|
%1 = load i64, i64* %0, align 8
|
|
%add.ptr2 = getelementptr inbounds i8, i8* %p, i64 40001
|
|
%2 = bitcast i8* %add.ptr2 to i64*
|
|
%3 = load i64, i64* %2, align 8
|
|
%add.ptr4 = getelementptr inbounds i8, i8* %p, i64 40005
|
|
%4 = bitcast i8* %add.ptr4 to i64*
|
|
%5 = load i64, i64* %4, align 8
|
|
%mul = mul i64 %3, %1
|
|
%mul5 = mul i64 %mul, %5
|
|
%conv = sext i32 %count to i64
|
|
%add6 = add i64 %mul5, %conv
|
|
ret i64 %add6
|
|
}
|
|
|
|
define dso_local i64 @test3(i8* nocapture readonly %p, i32 signext %count) local_unnamed_addr {
|
|
; CHECK-LABEL: test3:
|
|
; CHECK: # %bb.0: # %entry
|
|
; CHECK-NEXT: lis 5, 1
|
|
; CHECK-NEXT: ori 6, 5, 14497
|
|
; CHECK-NEXT: ori 7, 5, 14465
|
|
; CHECK-NEXT: ori 5, 5, 14481
|
|
; CHECK-NEXT: ldx 6, 3, 6
|
|
; CHECK-NEXT: ldx 7, 3, 7
|
|
; CHECK-NEXT: ldx 3, 3, 5
|
|
; CHECK-NEXT: mulld 5, 7, 6
|
|
; CHECK-NEXT: maddld 3, 5, 3, 4
|
|
; CHECK-NEXT: blr
|
|
entry:
|
|
%add.ptr = getelementptr inbounds i8, i8* %p, i64 80033
|
|
%0 = bitcast i8* %add.ptr to i64*
|
|
%1 = load i64, i64* %0, align 8
|
|
%add.ptr2 = getelementptr inbounds i8, i8* %p, i64 80001
|
|
%2 = bitcast i8* %add.ptr2 to i64*
|
|
%3 = load i64, i64* %2, align 8
|
|
%add.ptr4 = getelementptr inbounds i8, i8* %p, i64 80017
|
|
%4 = bitcast i8* %add.ptr4 to i64*
|
|
%5 = load i64, i64* %4, align 8
|
|
%mul = mul i64 %3, %1
|
|
%mul5 = mul i64 %mul, %5
|
|
%conv = sext i32 %count to i64
|
|
%add6 = add i64 %mul5, %conv
|
|
ret i64 %add6
|
|
}
|
|
|