1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-24 11:42:57 +01:00
llvm-mirror/test/CodeGen/PowerPC/unal-vec-ldst.ll
2019-08-23 13:30:45 +00:00

626 lines
15 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -verify-machineinstrs < %s | FileCheck %s
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
define <16 x i8> @test_l_v16i8(<16 x i8>* %p) #0 {
; CHECK-LABEL: test_l_v16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 15
; CHECK-NEXT: lvsl 3, 0, 3
; CHECK-NEXT: lvx 2, 3, 4
; CHECK-NEXT: lvx 4, 0, 3
; CHECK-NEXT: vperm 2, 4, 2, 3
; CHECK-NEXT: blr
entry:
%r = load <16 x i8>, <16 x i8>* %p, align 1
ret <16 x i8> %r
}
define <32 x i8> @test_l_v32i8(<32 x i8>* %p) #0 {
; CHECK-LABEL: test_l_v32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 31
; CHECK-NEXT: lvsl 5, 0, 3
; CHECK-NEXT: lvx 2, 3, 4
; CHECK-NEXT: li 4, 16
; CHECK-NEXT: lvx 4, 3, 4
; CHECK-NEXT: lvx 0, 0, 3
; CHECK-NEXT: vperm 3, 4, 2, 5
; CHECK-NEXT: vperm 2, 0, 4, 5
; CHECK-NEXT: blr
entry:
%r = load <32 x i8>, <32 x i8>* %p, align 1
ret <32 x i8> %r
}
define <8 x i16> @test_l_v8i16(<8 x i16>* %p) #0 {
; CHECK-LABEL: test_l_v8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 15
; CHECK-NEXT: lvsl 3, 0, 3
; CHECK-NEXT: lvx 2, 3, 4
; CHECK-NEXT: lvx 4, 0, 3
; CHECK-NEXT: vperm 2, 4, 2, 3
; CHECK-NEXT: blr
entry:
%r = load <8 x i16>, <8 x i16>* %p, align 2
ret <8 x i16> %r
}
define <16 x i16> @test_l_v16i16(<16 x i16>* %p) #0 {
; CHECK-LABEL: test_l_v16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 31
; CHECK-NEXT: lvsl 5, 0, 3
; CHECK-NEXT: lvx 2, 3, 4
; CHECK-NEXT: li 4, 16
; CHECK-NEXT: lvx 4, 3, 4
; CHECK-NEXT: lvx 0, 0, 3
; CHECK-NEXT: vperm 3, 4, 2, 5
; CHECK-NEXT: vperm 2, 0, 4, 5
; CHECK-NEXT: blr
entry:
%r = load <16 x i16>, <16 x i16>* %p, align 2
ret <16 x i16> %r
}
define <4 x i32> @test_l_v4i32(<4 x i32>* %p) #0 {
; CHECK-LABEL: test_l_v4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 15
; CHECK-NEXT: lvsl 3, 0, 3
; CHECK-NEXT: lvx 2, 3, 4
; CHECK-NEXT: lvx 4, 0, 3
; CHECK-NEXT: vperm 2, 4, 2, 3
; CHECK-NEXT: blr
entry:
%r = load <4 x i32>, <4 x i32>* %p, align 4
ret <4 x i32> %r
}
define <8 x i32> @test_l_v8i32(<8 x i32>* %p) #0 {
; CHECK-LABEL: test_l_v8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 31
; CHECK-NEXT: lvsl 5, 0, 3
; CHECK-NEXT: lvx 2, 3, 4
; CHECK-NEXT: li 4, 16
; CHECK-NEXT: lvx 4, 3, 4
; CHECK-NEXT: lvx 0, 0, 3
; CHECK-NEXT: vperm 3, 4, 2, 5
; CHECK-NEXT: vperm 2, 0, 4, 5
; CHECK-NEXT: blr
entry:
%r = load <8 x i32>, <8 x i32>* %p, align 4
ret <8 x i32> %r
}
define <2 x i64> @test_l_v2i64(<2 x i64>* %p) #0 {
; CHECK-LABEL: test_l_v2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvd2x 34, 0, 3
; CHECK-NEXT: blr
entry:
%r = load <2 x i64>, <2 x i64>* %p, align 8
ret <2 x i64> %r
}
define <4 x i64> @test_l_v4i64(<4 x i64>* %p) #0 {
; CHECK-LABEL: test_l_v4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 16
; CHECK-NEXT: lxvd2x 34, 0, 3
; CHECK-NEXT: lxvd2x 35, 3, 4
; CHECK-NEXT: blr
entry:
%r = load <4 x i64>, <4 x i64>* %p, align 8
ret <4 x i64> %r
}
define <4 x float> @test_l_v4float(<4 x float>* %p) #0 {
; CHECK-LABEL: test_l_v4float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 15
; CHECK-NEXT: lvsl 3, 0, 3
; CHECK-NEXT: lvx 2, 3, 4
; CHECK-NEXT: lvx 4, 0, 3
; CHECK-NEXT: vperm 2, 4, 2, 3
; CHECK-NEXT: blr
entry:
%r = load <4 x float>, <4 x float>* %p, align 4
ret <4 x float> %r
}
define <8 x float> @test_l_v8float(<8 x float>* %p) #0 {
; CHECK-LABEL: test_l_v8float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 31
; CHECK-NEXT: lvsl 5, 0, 3
; CHECK-NEXT: lvx 2, 3, 4
; CHECK-NEXT: li 4, 16
; CHECK-NEXT: lvx 4, 3, 4
; CHECK-NEXT: lvx 0, 0, 3
; CHECK-NEXT: vperm 3, 4, 2, 5
; CHECK-NEXT: vperm 2, 0, 4, 5
; CHECK-NEXT: blr
entry:
%r = load <8 x float>, <8 x float>* %p, align 4
ret <8 x float> %r
}
define <2 x double> @test_l_v2double(<2 x double>* %p) #0 {
; CHECK-LABEL: test_l_v2double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvd2x 34, 0, 3
; CHECK-NEXT: blr
entry:
%r = load <2 x double>, <2 x double>* %p, align 8
ret <2 x double> %r
}
define <4 x double> @test_l_v4double(<4 x double>* %p) #0 {
; CHECK-LABEL: test_l_v4double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 16
; CHECK-NEXT: lxvd2x 34, 0, 3
; CHECK-NEXT: lxvd2x 35, 3, 4
; CHECK-NEXT: blr
entry:
%r = load <4 x double>, <4 x double>* %p, align 8
ret <4 x double> %r
}
define <16 x i8> @test_l_p8v16i8(<16 x i8>* %p) #2 {
; CHECK-LABEL: test_l_p8v16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvw4x 34, 0, 3
; CHECK-NEXT: blr
entry:
%r = load <16 x i8>, <16 x i8>* %p, align 1
ret <16 x i8> %r
}
define <32 x i8> @test_l_p8v32i8(<32 x i8>* %p) #2 {
; CHECK-LABEL: test_l_p8v32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 16
; CHECK-NEXT: lxvw4x 34, 0, 3
; CHECK-NEXT: lxvw4x 35, 3, 4
; CHECK-NEXT: blr
entry:
%r = load <32 x i8>, <32 x i8>* %p, align 1
ret <32 x i8> %r
}
define <8 x i16> @test_l_p8v8i16(<8 x i16>* %p) #2 {
; CHECK-LABEL: test_l_p8v8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvw4x 34, 0, 3
; CHECK-NEXT: blr
entry:
%r = load <8 x i16>, <8 x i16>* %p, align 2
ret <8 x i16> %r
}
define <16 x i16> @test_l_p8v16i16(<16 x i16>* %p) #2 {
; CHECK-LABEL: test_l_p8v16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 16
; CHECK-NEXT: lxvw4x 34, 0, 3
; CHECK-NEXT: lxvw4x 35, 3, 4
; CHECK-NEXT: blr
entry:
%r = load <16 x i16>, <16 x i16>* %p, align 2
ret <16 x i16> %r
}
define <4 x i32> @test_l_p8v4i32(<4 x i32>* %p) #2 {
; CHECK-LABEL: test_l_p8v4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvw4x 34, 0, 3
; CHECK-NEXT: blr
entry:
%r = load <4 x i32>, <4 x i32>* %p, align 4
ret <4 x i32> %r
}
define <8 x i32> @test_l_p8v8i32(<8 x i32>* %p) #2 {
; CHECK-LABEL: test_l_p8v8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 16
; CHECK-NEXT: lxvw4x 34, 0, 3
; CHECK-NEXT: lxvw4x 35, 3, 4
; CHECK-NEXT: blr
entry:
%r = load <8 x i32>, <8 x i32>* %p, align 4
ret <8 x i32> %r
}
define <2 x i64> @test_l_p8v2i64(<2 x i64>* %p) #2 {
; CHECK-LABEL: test_l_p8v2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvd2x 34, 0, 3
; CHECK-NEXT: blr
entry:
%r = load <2 x i64>, <2 x i64>* %p, align 8
ret <2 x i64> %r
}
define <4 x i64> @test_l_p8v4i64(<4 x i64>* %p) #2 {
; CHECK-LABEL: test_l_p8v4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 16
; CHECK-NEXT: lxvd2x 34, 0, 3
; CHECK-NEXT: lxvd2x 35, 3, 4
; CHECK-NEXT: blr
entry:
%r = load <4 x i64>, <4 x i64>* %p, align 8
ret <4 x i64> %r
}
define <4 x float> @test_l_p8v4float(<4 x float>* %p) #2 {
; CHECK-LABEL: test_l_p8v4float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvw4x 34, 0, 3
; CHECK-NEXT: blr
entry:
%r = load <4 x float>, <4 x float>* %p, align 4
ret <4 x float> %r
}
define <8 x float> @test_l_p8v8float(<8 x float>* %p) #2 {
; CHECK-LABEL: test_l_p8v8float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 16
; CHECK-NEXT: lxvw4x 34, 0, 3
; CHECK-NEXT: lxvw4x 35, 3, 4
; CHECK-NEXT: blr
entry:
%r = load <8 x float>, <8 x float>* %p, align 4
ret <8 x float> %r
}
define <2 x double> @test_l_p8v2double(<2 x double>* %p) #2 {
; CHECK-LABEL: test_l_p8v2double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvd2x 34, 0, 3
; CHECK-NEXT: blr
entry:
%r = load <2 x double>, <2 x double>* %p, align 8
ret <2 x double> %r
}
define <4 x double> @test_l_p8v4double(<4 x double>* %p) #2 {
; CHECK-LABEL: test_l_p8v4double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 16
; CHECK-NEXT: lxvd2x 34, 0, 3
; CHECK-NEXT: lxvd2x 35, 3, 4
; CHECK-NEXT: blr
entry:
%r = load <4 x double>, <4 x double>* %p, align 8
ret <4 x double> %r
}
define <4 x float> @test_l_qv4float(<4 x float>* %p) #1 {
; CHECK-LABEL: test_l_qv4float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 15
; CHECK-NEXT: qvlpclsx 0, 0, 3
; CHECK-NEXT: qvlfsx 1, 3, 4
; CHECK-NEXT: qvlfsx 2, 0, 3
; CHECK-NEXT: qvfperm 1, 2, 1, 0
; CHECK-NEXT: blr
entry:
%r = load <4 x float>, <4 x float>* %p, align 4
ret <4 x float> %r
}
define <8 x float> @test_l_qv8float(<8 x float>* %p) #1 {
; CHECK-LABEL: test_l_qv8float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 31
; CHECK-NEXT: qvlpclsx 1, 0, 3
; CHECK-NEXT: qvlfsx 0, 3, 4
; CHECK-NEXT: li 4, 16
; CHECK-NEXT: qvlfsx 3, 3, 4
; CHECK-NEXT: qvlfsx 4, 0, 3
; CHECK-NEXT: qvfperm 2, 3, 0, 1
; CHECK-NEXT: qvfperm 1, 4, 3, 1
; CHECK-NEXT: blr
entry:
%r = load <8 x float>, <8 x float>* %p, align 4
ret <8 x float> %r
}
define <4 x double> @test_l_qv4double(<4 x double>* %p) #1 {
; CHECK-LABEL: test_l_qv4double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 31
; CHECK-NEXT: qvlpcldx 0, 0, 3
; CHECK-NEXT: qvlfdx 1, 3, 4
; CHECK-NEXT: qvlfdx 2, 0, 3
; CHECK-NEXT: qvfperm 1, 2, 1, 0
; CHECK-NEXT: blr
entry:
%r = load <4 x double>, <4 x double>* %p, align 8
ret <4 x double> %r
}
define <8 x double> @test_l_qv8double(<8 x double>* %p) #1 {
; CHECK-LABEL: test_l_qv8double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 63
; CHECK-NEXT: qvlpcldx 1, 0, 3
; CHECK-NEXT: qvlfdx 0, 3, 4
; CHECK-NEXT: li 4, 32
; CHECK-NEXT: qvlfdx 3, 3, 4
; CHECK-NEXT: qvlfdx 4, 0, 3
; CHECK-NEXT: qvfperm 2, 3, 0, 1
; CHECK-NEXT: qvfperm 1, 4, 3, 1
; CHECK-NEXT: blr
entry:
%r = load <8 x double>, <8 x double>* %p, align 8
ret <8 x double> %r
}
define void @test_s_v16i8(<16 x i8>* %p, <16 x i8> %v) #0 {
; CHECK-LABEL: test_s_v16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stxvw4x 34, 0, 3
; CHECK-NEXT: blr
entry:
store <16 x i8> %v, <16 x i8>* %p, align 1
ret void
}
define void @test_s_v32i8(<32 x i8>* %p, <32 x i8> %v) #0 {
; CHECK-LABEL: test_s_v32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 16
; CHECK-NEXT: stxvw4x 34, 0, 3
; CHECK-NEXT: stxvw4x 35, 3, 4
; CHECK-NEXT: blr
entry:
store <32 x i8> %v, <32 x i8>* %p, align 1
ret void
}
define void @test_s_v8i16(<8 x i16>* %p, <8 x i16> %v) #0 {
; CHECK-LABEL: test_s_v8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stxvw4x 34, 0, 3
; CHECK-NEXT: blr
entry:
store <8 x i16> %v, <8 x i16>* %p, align 2
ret void
}
define void @test_s_v16i16(<16 x i16>* %p, <16 x i16> %v) #0 {
; CHECK-LABEL: test_s_v16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 16
; CHECK-NEXT: stxvw4x 34, 0, 3
; CHECK-NEXT: stxvw4x 35, 3, 4
; CHECK-NEXT: blr
entry:
store <16 x i16> %v, <16 x i16>* %p, align 2
ret void
}
define void @test_s_v4i32(<4 x i32>* %p, <4 x i32> %v) #0 {
; CHECK-LABEL: test_s_v4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stxvw4x 34, 0, 3
; CHECK-NEXT: blr
entry:
store <4 x i32> %v, <4 x i32>* %p, align 4
ret void
}
define void @test_s_v8i32(<8 x i32>* %p, <8 x i32> %v) #0 {
; CHECK-LABEL: test_s_v8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 16
; CHECK-NEXT: stxvw4x 34, 0, 3
; CHECK-NEXT: stxvw4x 35, 3, 4
; CHECK-NEXT: blr
entry:
store <8 x i32> %v, <8 x i32>* %p, align 4
ret void
}
define void @test_s_v2i64(<2 x i64>* %p, <2 x i64> %v) #0 {
; CHECK-LABEL: test_s_v2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stxvd2x 34, 0, 3
; CHECK-NEXT: blr
entry:
store <2 x i64> %v, <2 x i64>* %p, align 8
ret void
}
define void @test_s_v4i64(<4 x i64>* %p, <4 x i64> %v) #0 {
; CHECK-LABEL: test_s_v4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 16
; CHECK-NEXT: stxvd2x 34, 0, 3
; CHECK-NEXT: stxvd2x 35, 3, 4
; CHECK-NEXT: blr
entry:
store <4 x i64> %v, <4 x i64>* %p, align 8
ret void
}
define void @test_s_v4float(<4 x float>* %p, <4 x float> %v) #0 {
; CHECK-LABEL: test_s_v4float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stxvw4x 34, 0, 3
; CHECK-NEXT: blr
entry:
store <4 x float> %v, <4 x float>* %p, align 4
ret void
}
define void @test_s_v8float(<8 x float>* %p, <8 x float> %v) #0 {
; CHECK-LABEL: test_s_v8float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 16
; CHECK-NEXT: stxvw4x 34, 0, 3
; CHECK-NEXT: stxvw4x 35, 3, 4
; CHECK-NEXT: blr
entry:
store <8 x float> %v, <8 x float>* %p, align 4
ret void
}
define void @test_s_v2double(<2 x double>* %p, <2 x double> %v) #0 {
; CHECK-LABEL: test_s_v2double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: stxvd2x 34, 0, 3
; CHECK-NEXT: blr
entry:
store <2 x double> %v, <2 x double>* %p, align 8
ret void
}
define void @test_s_v4double(<4 x double>* %p, <4 x double> %v) #0 {
; CHECK-LABEL: test_s_v4double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: li 4, 16
; CHECK-NEXT: stxvd2x 34, 0, 3
; CHECK-NEXT: stxvd2x 35, 3, 4
; CHECK-NEXT: blr
entry:
store <4 x double> %v, <4 x double>* %p, align 8
ret void
}
define void @test_s_qv4float(<4 x float>* %p, <4 x float> %v) #1 {
; CHECK-LABEL: test_s_qv4float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: qvesplati 0, 1, 3
; CHECK-NEXT: stfs 1, 0(3)
; CHECK-NEXT: stfs 0, 12(3)
; CHECK-NEXT: qvesplati 0, 1, 2
; CHECK-NEXT: qvesplati 1, 1, 1
; CHECK-NEXT: stfs 0, 8(3)
; CHECK-NEXT: stfs 1, 4(3)
; CHECK-NEXT: blr
entry:
store <4 x float> %v, <4 x float>* %p, align 4
ret void
}
define void @test_s_qv8float(<8 x float>* %p, <8 x float> %v) #1 {
; CHECK-LABEL: test_s_qv8float:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: qvesplati 0, 2, 3
; CHECK-NEXT: stfs 2, 16(3)
; CHECK-NEXT: stfs 0, 28(3)
; CHECK-NEXT: qvesplati 0, 2, 2
; CHECK-NEXT: qvesplati 2, 2, 1
; CHECK-NEXT: stfs 1, 0(3)
; CHECK-NEXT: stfs 0, 24(3)
; CHECK-NEXT: qvesplati 0, 1, 3
; CHECK-NEXT: stfs 2, 20(3)
; CHECK-NEXT: qvesplati 2, 1, 2
; CHECK-NEXT: qvesplati 1, 1, 1
; CHECK-NEXT: stfs 0, 12(3)
; CHECK-NEXT: stfs 2, 8(3)
; CHECK-NEXT: stfs 1, 4(3)
; CHECK-NEXT: blr
entry:
store <8 x float> %v, <8 x float>* %p, align 4
ret void
}
define void @test_s_qv4double(<4 x double>* %p, <4 x double> %v) #1 {
; CHECK-LABEL: test_s_qv4double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: qvesplati 0, 1, 3
; CHECK-NEXT: stfd 1, 0(3)
; CHECK-NEXT: stfd 0, 24(3)
; CHECK-NEXT: qvesplati 0, 1, 2
; CHECK-NEXT: qvesplati 1, 1, 1
; CHECK-NEXT: stfd 0, 16(3)
; CHECK-NEXT: stfd 1, 8(3)
; CHECK-NEXT: blr
entry:
store <4 x double> %v, <4 x double>* %p, align 8
ret void
}
define void @test_s_qv8double(<8 x double>* %p, <8 x double> %v) #1 {
; CHECK-LABEL: test_s_qv8double:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: qvesplati 0, 2, 3
; CHECK-NEXT: stfd 2, 32(3)
; CHECK-NEXT: stfd 0, 56(3)
; CHECK-NEXT: qvesplati 0, 2, 2
; CHECK-NEXT: qvesplati 2, 2, 1
; CHECK-NEXT: stfd 1, 0(3)
; CHECK-NEXT: stfd 0, 48(3)
; CHECK-NEXT: qvesplati 0, 1, 3
; CHECK-NEXT: stfd 2, 40(3)
; CHECK-NEXT: qvesplati 2, 1, 2
; CHECK-NEXT: qvesplati 1, 1, 1
; CHECK-NEXT: stfd 0, 24(3)
; CHECK-NEXT: stfd 2, 16(3)
; CHECK-NEXT: stfd 1, 8(3)
; CHECK-NEXT: blr
entry:
store <8 x double> %v, <8 x double>* %p, align 8
ret void
}
attributes #0 = { nounwind "target-cpu"="pwr7" }
attributes #1 = { nounwind "target-cpu"="a2q" }
attributes #2 = { nounwind "target-cpu"="pwr8" }