mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-25 20:23:11 +01:00
cc12b285b6
This will currently accept the old number of bytes syntax, and convert it to a scalar. This should be removed in the near future (I think I converted all of the tests already, but likely missed a few). Not sure what the exact syntax and policy should be. We can continue printing the number of bytes for non-generic instructions to avoid test churn and only allow non-scalar types for generic instructions. This will currently print the LLT in parentheses, but accept parsing the existing integers and implicitly converting to scalar. The parentheses are a bit ugly, but the parser logic seems unable to deal without either parentheses or some keyword to indicate the start of a type.
61 lines
2.7 KiB
LLVM
61 lines
2.7 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
|
|
; RUN: llc -verify-machineinstrs -stop-before=ppc-vsx-copy -vec-extabi \
|
|
; RUN: -mcpu=pwr7 -mtriple powerpc64-ibm-aix-xcoff < %s | \
|
|
; RUN: FileCheck %s
|
|
|
|
; Testing passing a vector <4 x i32> through ellipses of a variadic function.
|
|
define <4 x i32> @callee(i32 signext %count, ...) {
|
|
; CHECK-LABEL: name: callee
|
|
; CHECK: bb.0.entry:
|
|
; CHECK: liveins: $x4, $x5, $x6, $x7, $x8, $x9, $x10
|
|
; CHECK: [[COPY:%[0-9]+]]:g8rc = COPY $x10
|
|
; CHECK: [[COPY1:%[0-9]+]]:g8rc = COPY $x9
|
|
; CHECK: [[COPY2:%[0-9]+]]:g8rc = COPY $x8
|
|
; CHECK: [[COPY3:%[0-9]+]]:g8rc = COPY $x7
|
|
; CHECK: [[COPY4:%[0-9]+]]:g8rc = COPY $x6
|
|
; CHECK: [[COPY5:%[0-9]+]]:g8rc = COPY $x5
|
|
; CHECK: [[COPY6:%[0-9]+]]:g8rc = COPY $x4
|
|
; CHECK: STD [[COPY6]], 0, %fixed-stack.0 :: (store (s64) into %fixed-stack.0)
|
|
; CHECK: STD [[COPY5]], 8, %fixed-stack.0 :: (store (s64) into %fixed-stack.0 + 8)
|
|
; CHECK: STD [[COPY4]], 16, %fixed-stack.0 :: (store (s64))
|
|
; CHECK: STD [[COPY3]], 24, %fixed-stack.0 :: (store (s64))
|
|
; CHECK: STD [[COPY2]], 32, %fixed-stack.0 :: (store (s64))
|
|
; CHECK: STD [[COPY1]], 40, %fixed-stack.0 :: (store (s64))
|
|
; CHECK: STD [[COPY]], 48, %fixed-stack.0 :: (store (s64))
|
|
; CHECK: LIFETIME_START %stack.0.arg_list
|
|
; CHECK: [[ADDI8_:%[0-9]+]]:g8rc = ADDI8 %fixed-stack.0, 0
|
|
; CHECK: STD killed [[ADDI8_]], 0, %stack.0.arg_list :: (store (s64) into %ir.0)
|
|
; CHECK: [[ADDI8_1:%[0-9]+]]:g8rc = ADDI8 %fixed-stack.0, 15
|
|
; CHECK: [[RLDICR:%[0-9]+]]:g8rc = RLDICR killed [[ADDI8_1]], 0, 59
|
|
; CHECK: [[LXVW4X:%[0-9]+]]:vsrc = LXVW4X $zero8, killed [[RLDICR]] :: (load (s128) from %ir.4)
|
|
; CHECK: LIFETIME_END %stack.0.arg_list
|
|
; CHECK: $v2 = COPY [[LXVW4X]]
|
|
; CHECK: BLR8 implicit $lr8, implicit $rm, implicit $v2
|
|
entry:
|
|
%arg_list = alloca i8*, align 8
|
|
%0 = bitcast i8** %arg_list to i8*
|
|
call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %0)
|
|
call void @llvm.va_start(i8* nonnull %0)
|
|
%argp.cur = load i8*, i8** %arg_list, align 8
|
|
%1 = ptrtoint i8* %argp.cur to i64
|
|
%2 = add i64 %1, 15
|
|
%3 = and i64 %2, -16
|
|
%argp.cur.aligned = inttoptr i64 %3 to i8*
|
|
%argp.next = getelementptr inbounds i8, i8* %argp.cur.aligned, i64 16
|
|
store i8* %argp.next, i8** %arg_list, align 8
|
|
%4 = inttoptr i64 %3 to <4 x i32>*
|
|
%5 = load <4 x i32>, <4 x i32>* %4, align 16
|
|
call void @llvm.va_end(i8* nonnull %0)
|
|
call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %0)
|
|
ret <4 x i32> %5
|
|
}
|
|
|
|
declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
|
|
|
|
declare void @llvm.va_start(i8*)
|
|
|
|
declare void @llvm.va_end(i8*)
|
|
|
|
declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
|
|
|