; Test that loops with sufficient registers do not reload or spill on ; stack. These cases include calls and it is necessary to have the GR128 / ; FP128 registers part of the callee saved registers list in order to avoid ; spilling / reloading. ; ; RUN: llc -switch-peel-threshold=101 < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s %0 = type { %0*, %0*, %0*, i32, %1*, i64, i64, i64, i64, i64, i64, %2, %5, %7 } %1 = type { i32, i32, i32 (%1*, i64, i32)*, i32 (%1*, i64, i64, i32, i8**)*, i32 (%1*, i64, i64, i64, i32)*, i32 (%1*)*, void (i8*)*, i8*, i8* } %2 = type { i64, i64, %3** } %3 = type { %4*, i64 } %4 = type { i64, i8* } %5 = type { i64, i64, %6** } %6 = type { i64, %4*, i32, i64, i8* } %7 = type { i64, i64, %8** } %8 = type { i64, i64*, i64*, %4*, i64, i32*, %5, i32, i64, i64 } declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) define void @fun0(%0*) { ; CHECK-LABEL: .LBB0_4 ; CHECK: => This Inner Loop Header: Depth=2 ; CHECK-NOT: 16-byte Folded Spill ; CHECK-NOT: 16-byte Folded Reload %2 = load i64, i64* undef, align 8 %3 = udiv i64 128, %2 %4 = mul i64 %3, %2 %5 = load i64, i64* undef, align 8 switch i32 undef, label %36 [ i32 1, label %6 i32 2, label %7 i32 3, label %8 i32 4, label %9 i32 5, label %10 i32 6, label %11 ] ;