mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-01-31 20:51:52 +01:00
f704804dd2
allocas in LLVM IR have a specified alignment. When that alignment is specified, the alloca has at least that alignment at runtime. If the specified type of the alloca has a higher preferred alignment, SelectionDAG currently ignores that specified alignment, and increases the alignment. It does this even if it would trigger stack realignment. I don't think this makes sense, so this patch changes that. I was looking into this for SVE in particular: for SVE, overaligning vscale'ed types is extra expensive because it requires realigning the stack multiple times, or using dynamic allocation. (This currently isn't implemented.) I updated the expected assembly for a couple tests; in particular, for arg-copy-elide.ll, the optimization in question does not increase the alignment the way SelectionDAG normally would. For the rest, I just increased the specified alignment on the allocas to match what SelectionDAG was inferring. Differential Revision: https://reviews.llvm.org/D79532
83 lines
3.7 KiB
LLVM
83 lines
3.7 KiB
LLVM
; RUN: llc < %s -mtriple=armv7-apple-ios -O0 | FileCheck %s
|
|
|
|
; rdar://12713765
|
|
; When realign-stack is set to false, make sure we are not creating stack
|
|
; objects that are assumed to be 64-byte aligned.
|
|
@T3_retval = common global <16 x float> zeroinitializer, align 16
|
|
|
|
define void @test1(<16 x float>* noalias sret %agg.result) nounwind ssp "no-realign-stack" {
|
|
entry:
|
|
; CHECK-LABEL: test1:
|
|
; CHECK: ldr r[[R1:[0-9]+]], [pc, r[[R1]]]
|
|
; CHECK: mov r[[R2:[0-9]+]], r[[R1]]
|
|
; CHECK: vld1.32 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R2]]:128]!
|
|
; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R2]]:128]
|
|
; CHECK: add r[[R3:[0-9]+]], r[[R1]], #32
|
|
; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R3]]:128]
|
|
; CHECK: add r[[R3:[0-9]+]], r[[R1]], #48
|
|
; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R3]]:128]
|
|
; CHECK: mov r[[R2:[0-9]+]], sp
|
|
; CHECK: add r[[R3:[0-9]+]], r[[R2]], #48
|
|
; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R3]]:128]
|
|
; CHECK: add r[[R4:[0-9]+]], r[[R2]], #32
|
|
; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R4]]:128]
|
|
; CHECK: mov r[[R5:[0-9]+]], r[[R2]]
|
|
; CHECK: vst1.32 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R5]]:128]!
|
|
; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R5]]:128]
|
|
; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R5]]:128]
|
|
; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R2]]:128]
|
|
; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R4]]:128]
|
|
; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R3]]:128]
|
|
; CHECK: add r[[R1:[0-9]+]], r0, #48
|
|
; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R1]]:128]
|
|
; CHECK: add r[[R1:[0-9]+]], r0, #32
|
|
; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R1]]:128]
|
|
; CHECK: vst1.32 {{{d[0-9]+}}, {{d[0-9]+}}}, [r0:128]!
|
|
; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r0:128]
|
|
%retval = alloca <16 x float>, align 64
|
|
%0 = load <16 x float>, <16 x float>* @T3_retval, align 16
|
|
store <16 x float> %0, <16 x float>* %retval
|
|
%1 = load <16 x float>, <16 x float>* %retval
|
|
store <16 x float> %1, <16 x float>* %agg.result, align 16
|
|
ret void
|
|
}
|
|
|
|
define void @test2(<16 x float>* noalias sret %agg.result) nounwind ssp {
|
|
entry:
|
|
; CHECK-LABEL: test2:
|
|
; CHECK: ldr r[[R1:[0-9]+]], [pc, r[[R1]]]
|
|
; CHECK: add r[[R2:[0-9]+]], r[[R1]], #48
|
|
; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R2]]:128]
|
|
; CHECK: add r[[R2:[0-9]+]], r[[R1]], #32
|
|
; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R2]]:128]
|
|
; CHECK: vld1.32 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R1]]:128]!
|
|
; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R1]]:128]
|
|
; CHECK: mov r[[R1:[0-9]+]], sp
|
|
; CHECK: orr r[[R2:[0-9]+]], r[[R1]], #16
|
|
; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R2]]:128]
|
|
; CHECK: mov r[[R3:[0-9]+]], #32
|
|
; CHECK: mov r[[R9:[0-9]+]], r[[R1]]
|
|
; CHECK: vst1.32 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R9]]:128], r[[R3]]
|
|
; CHECK: mov r[[R3:[0-9]+]], r[[R9]]
|
|
; CHECK: vst1.32 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R3]]:128]!
|
|
; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R3]]:128]
|
|
; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R9]]:128]
|
|
; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R3]]:128]
|
|
; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R2]]:128]
|
|
; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R1]]:128]
|
|
; CHECK: add r[[R1:[0-9]+]], r0, #48
|
|
; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R1]]:128]
|
|
; CHECK: add r[[R1:[0-9]+]], r0, #32
|
|
; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R1]]:128]
|
|
; CHECK: vst1.32 {{{d[0-9]+}}, {{d[0-9]+}}}, [r0:128]!
|
|
; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r0:128]
|
|
|
|
|
|
%retval = alloca <16 x float>, align 64
|
|
%0 = load <16 x float>, <16 x float>* @T3_retval, align 16
|
|
store <16 x float> %0, <16 x float>* %retval
|
|
%1 = load <16 x float>, <16 x float>* %retval
|
|
store <16 x float> %1, <16 x float>* %agg.result, align 16
|
|
ret void
|
|
}
|