1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 18:54:02 +01:00
llvm-mirror/test/CodeGen/PowerPC/aix32-vector-vararg-callee-split.ll
Sean Fertile 5ffeb9384e [PowerPC][AIX] Handle variadic vector formal arguments.
Patch adds support for passing vector arguments to variadic functions.
Arguments which are fixed shadow GPRs and stack space even when they are
passed in vector registers, while arguments passed through ellipses are
passed in(properly aligned GPRs if available and on the stack once all
GPR arguments registers are consumed.

Differential Revision: https://reviews.llvm.org/D97485
2021-03-04 10:56:53 -05:00

50 lines
2.1 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
; RUN: llc -verify-machineinstrs -stop-before=ppc-vsx-copy -vec-extabi \
; RUN: -mcpu=pwr7 -mtriple powerpc-ibm-aix-xcoff < %s | \
; RUN: FileCheck %s
;; Testing a variadic callee where a vector argument passed through ellipsis
;; is passed partially in registers and on the stack. The 3 fixed double
;; arguments shadow r3-r8, and a vector int <4 x i32> is passed in R9/R10 and
;; on the stack starting at the shadow of R9.
define <4 x i32> @split_spill(double %d1, double %d2, double %d3, ...) {
; CHECK-LABEL: name: split_spill
; CHECK: bb.0.entry:
; CHECK: liveins: $r9, $r10
; CHECK: [[COPY:%[0-9]+]]:gprc = COPY $r10
; CHECK: [[COPY1:%[0-9]+]]:gprc = COPY $r9
; CHECK: STW [[COPY1]], 0, %fixed-stack.0 :: (store 4 into %fixed-stack.0, align 16)
; CHECK: STW [[COPY]], 4, %fixed-stack.0 :: (store 4 into %fixed-stack.0 + 4)
; CHECK: LIFETIME_START %stack.0.arg_list
; CHECK: [[ADDI:%[0-9]+]]:gprc = ADDI %fixed-stack.0, 0
; CHECK: [[LXVW4X:%[0-9]+]]:vsrc = LXVW4X $zero, killed [[ADDI]] :: (load 16 from %ir.4)
; CHECK: LIFETIME_END %stack.0.arg_list
; CHECK: $v2 = COPY [[LXVW4X]]
; CHECK: BLR implicit $lr, implicit $rm, implicit $v2
entry:
%arg_list = alloca i8*, align 4
%0 = bitcast i8** %arg_list to i8*
call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0)
call void @llvm.va_start(i8* nonnull %0)
%argp.cur = load i8*, i8** %arg_list, align 4
%1 = ptrtoint i8* %argp.cur to i32
%2 = add i32 %1, 15
%3 = and i32 %2, -16
%argp.cur.aligned = inttoptr i32 %3 to i8*
%argp.next = getelementptr inbounds i8, i8* %argp.cur.aligned, i32 16
store i8* %argp.next, i8** %arg_list, align 4
%4 = inttoptr i32 %3 to <4 x i32>*
%5 = load <4 x i32>, <4 x i32>* %4, align 16
call void @llvm.va_end(i8* nonnull %0)
call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0)
ret <4 x i32> %5
}
declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
declare void @llvm.va_start(i8*)
declare void @llvm.va_end(i8*)
declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)