mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-01-31 20:51:52 +01:00
[VE] Update VELIntrinsic tests
Update comment and style of regression tests for VELIntrinsic Reviewed By: simoll Differential Revision: https://reviews.llvm.org/D94490
This commit is contained in:
parent
0c7bd7a98b
commit
c563794bd1
@ -4,8 +4,8 @@
|
||||
;;;
|
||||
;;; Note:
|
||||
;;; We test VAND*vvl, VAND*vvl_v, VAND*rvl, VAND*rvl_v, VAND*vvml_v,
|
||||
;;; VAND*rvml_v, PVAND*vvl, PVAND*vvl_v, PVAND*rvl, PVAND*rvl_v, PVAND*vvml_v, and
|
||||
;;; PVAND*rvml_v instructions.
|
||||
;;; VAND*rvml_v, PVAND*vvl, PVAND*vvl_v, PVAND*rvl, PVAND*rvl_v, PVAND*vvml_v,
|
||||
;;; and PVAND*rvml_v instructions.
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
define fastcc <256 x double> @vand_vvvl(<256 x double> %0, <256 x double> %1) {
|
||||
|
@ -3,7 +3,7 @@
|
||||
;;; Test vector load intrinsic instructions
|
||||
;;;
|
||||
;;; Note:
|
||||
;;; We test VLD*rrl, VLD*irl, VLD*rrl_v, VLD*irl_v
|
||||
;;; We test VLD*rrl, VLD*irl, VLD*rrl_v, and VLD*irl_v instructions.
|
||||
|
||||
; Function Attrs: nounwind
|
||||
define void @vld_vssl(i8* %0, i64 %1) {
|
||||
@ -17,7 +17,7 @@ define void @vld_vssl(i8* %0, i64 %1) {
|
||||
; CHECK-NEXT: #NO_APP
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256)
|
||||
tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %3, i64 %1, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -38,7 +38,7 @@ define void @vld_vssvl(i8* %0, i64 %1, i8* %2) {
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %2, i32 256)
|
||||
%5 = tail call fast <256 x double> @llvm.ve.vl.vld.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256)
|
||||
tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %5, i64 %1, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -57,7 +57,7 @@ define void @vld_vssl_imm(i8* %0) {
|
||||
; CHECK-NEXT: #NO_APP
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256)
|
||||
tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %2, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -75,7 +75,7 @@ define void @vld_vssvl_imm(i8* %0, i8* %1) {
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vld.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256)
|
||||
tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %4, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -91,7 +91,7 @@ define void @vldnc_vssl(i8* %0, i64 %1) {
|
||||
; CHECK-NEXT: #NO_APP
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vldnc.vssl(i64 %1, i8* %0, i32 256)
|
||||
tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %3, i64 %1, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -112,7 +112,7 @@ define void @vldnc_vssvl(i8* %0, i64 %1, i8* %2) {
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vldnc.vssl(i64 %1, i8* %2, i32 256)
|
||||
%5 = tail call fast <256 x double> @llvm.ve.vl.vldnc.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256)
|
||||
tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %5, i64 %1, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -131,7 +131,7 @@ define void @vldnc_vssl_imm(i8* %0) {
|
||||
; CHECK-NEXT: #NO_APP
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%2 = tail call fast <256 x double> @llvm.ve.vl.vldnc.vssl(i64 8, i8* %0, i32 256)
|
||||
tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %2, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -149,7 +149,7 @@ define void @vldnc_vssvl_imm(i8* %0, i8* %1) {
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vldnc.vssl(i64 8, i8* %1, i32 256)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vldnc.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256)
|
||||
tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %4, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -165,7 +165,7 @@ define void @vldu_vssl(i8* %0, i64 %1) {
|
||||
; CHECK-NEXT: #NO_APP
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vldu.vssl(i64 %1, i8* %0, i32 256)
|
||||
tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %3, i64 %1, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -186,7 +186,7 @@ define void @vldu_vssvl(i8* %0, i64 %1, i8* %2) {
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vldu.vssl(i64 %1, i8* %2, i32 256)
|
||||
%5 = tail call fast <256 x double> @llvm.ve.vl.vldu.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256)
|
||||
tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %5, i64 %1, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -205,7 +205,7 @@ define void @vldu_vssl_imm(i8* %0) {
|
||||
; CHECK-NEXT: #NO_APP
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%2 = tail call fast <256 x double> @llvm.ve.vl.vldu.vssl(i64 8, i8* %0, i32 256)
|
||||
tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %2, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -223,7 +223,7 @@ define void @vldu_vssvl_imm(i8* %0, i8* %1) {
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vldu.vssl(i64 8, i8* %1, i32 256)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vldu.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256)
|
||||
tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %4, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -239,7 +239,7 @@ define void @vldunc_vssl(i8* %0, i64 %1) {
|
||||
; CHECK-NEXT: #NO_APP
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vldunc.vssl(i64 %1, i8* %0, i32 256)
|
||||
tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %3, i64 %1, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -260,7 +260,7 @@ define void @vldunc_vssvl(i8* %0, i64 %1, i8* %2) {
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vldunc.vssl(i64 %1, i8* %2, i32 256)
|
||||
%5 = tail call fast <256 x double> @llvm.ve.vl.vldunc.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256)
|
||||
tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %5, i64 %1, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -279,7 +279,7 @@ define void @vldunc_vssl_imm(i8* %0) {
|
||||
; CHECK-NEXT: #NO_APP
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%2 = tail call fast <256 x double> @llvm.ve.vl.vldunc.vssl(i64 8, i8* %0, i32 256)
|
||||
tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %2, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -297,7 +297,7 @@ define void @vldunc_vssvl_imm(i8* %0, i8* %1) {
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vldunc.vssl(i64 8, i8* %1, i32 256)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vldunc.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256)
|
||||
tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %4, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -313,7 +313,7 @@ define void @vldlsx_vssl(i8* %0, i64 %1) {
|
||||
; CHECK-NEXT: #NO_APP
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vldlsx.vssl(i64 %1, i8* %0, i32 256)
|
||||
tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %3, i64 %1, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -334,7 +334,7 @@ define void @vldlsx_vssvl(i8* %0, i64 %1, i8* %2) {
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vldlsx.vssl(i64 %1, i8* %2, i32 256)
|
||||
%5 = tail call fast <256 x double> @llvm.ve.vl.vldlsx.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256)
|
||||
tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %5, i64 %1, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -353,7 +353,7 @@ define void @vldlsx_vssl_imm(i8* %0) {
|
||||
; CHECK-NEXT: #NO_APP
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%2 = tail call fast <256 x double> @llvm.ve.vl.vldlsx.vssl(i64 8, i8* %0, i32 256)
|
||||
tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %2, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -371,7 +371,7 @@ define void @vldlsx_vssvl_imm(i8* %0, i8* %1) {
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vldlsx.vssl(i64 8, i8* %1, i32 256)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vldlsx.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256)
|
||||
tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %4, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -387,7 +387,7 @@ define void @vldlsxnc_vssl(i8* %0, i64 %1) {
|
||||
; CHECK-NEXT: #NO_APP
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vldlsxnc.vssl(i64 %1, i8* %0, i32 256)
|
||||
tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %3, i64 %1, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -408,7 +408,7 @@ define void @vldlsxnc_vssvl(i8* %0, i64 %1, i8* %2) {
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vldlsxnc.vssl(i64 %1, i8* %2, i32 256)
|
||||
%5 = tail call fast <256 x double> @llvm.ve.vl.vldlsxnc.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256)
|
||||
tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %5, i64 %1, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -427,7 +427,7 @@ define void @vldlsxnc_vssl_imm(i8* %0) {
|
||||
; CHECK-NEXT: #NO_APP
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%2 = tail call fast <256 x double> @llvm.ve.vl.vldlsxnc.vssl(i64 8, i8* %0, i32 256)
|
||||
tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %2, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -445,7 +445,7 @@ define void @vldlsxnc_vssvl_imm(i8* %0, i8* %1) {
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vldlsxnc.vssl(i64 8, i8* %1, i32 256)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vldlsxnc.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256)
|
||||
tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %4, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -461,7 +461,7 @@ define void @vldlzx_vssl(i8* %0, i64 %1) {
|
||||
; CHECK-NEXT: #NO_APP
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vldlzx.vssl(i64 %1, i8* %0, i32 256)
|
||||
tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %3, i64 %1, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -482,7 +482,7 @@ define void @vldlzx_vssvl(i8* %0, i64 %1, i8* %2) {
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vldlzx.vssl(i64 %1, i8* %2, i32 256)
|
||||
%5 = tail call fast <256 x double> @llvm.ve.vl.vldlzx.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256)
|
||||
tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %5, i64 %1, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -501,7 +501,7 @@ define void @vldlzx_vssl_imm(i8* %0) {
|
||||
; CHECK-NEXT: #NO_APP
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%2 = tail call fast <256 x double> @llvm.ve.vl.vldlzx.vssl(i64 8, i8* %0, i32 256)
|
||||
tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %2, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -519,7 +519,7 @@ define void @vldlzx_vssvl_imm(i8* %0, i8* %1) {
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vldlzx.vssl(i64 8, i8* %1, i32 256)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vldlzx.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256)
|
||||
tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %4, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -535,7 +535,7 @@ define void @vldlzxnc_vssl(i8* %0, i64 %1) {
|
||||
; CHECK-NEXT: #NO_APP
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vldlzxnc.vssl(i64 %1, i8* %0, i32 256)
|
||||
tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %3, i64 %1, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -556,7 +556,7 @@ define void @vldlzxnc_vssvl(i8* %0, i64 %1, i8* %2) {
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vldlzxnc.vssl(i64 %1, i8* %2, i32 256)
|
||||
%5 = tail call fast <256 x double> @llvm.ve.vl.vldlzxnc.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256)
|
||||
tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %5, i64 %1, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -575,7 +575,7 @@ define void @vldlzxnc_vssl_imm(i8* %0) {
|
||||
; CHECK-NEXT: #NO_APP
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%2 = tail call fast <256 x double> @llvm.ve.vl.vldlzxnc.vssl(i64 8, i8* %0, i32 256)
|
||||
tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %2, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -593,7 +593,7 @@ define void @vldlzxnc_vssvl_imm(i8* %0, i8* %1) {
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vldlzxnc.vssl(i64 8, i8* %1, i32 256)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vldlzxnc.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256)
|
||||
tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %4, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -609,7 +609,7 @@ define void @vld2d_vssl(i8* %0, i64 %1) {
|
||||
; CHECK-NEXT: #NO_APP
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vld2d.vssl(i64 %1, i8* %0, i32 256)
|
||||
tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %3, i64 %1, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -630,7 +630,7 @@ define void @vld2d_vssvl(i8* %0, i64 %1, i8* %2) {
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vld2d.vssl(i64 %1, i8* %2, i32 256)
|
||||
%5 = tail call fast <256 x double> @llvm.ve.vl.vld2d.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256)
|
||||
tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %5, i64 %1, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -649,7 +649,7 @@ define void @vld2d_vssl_imm(i8* %0) {
|
||||
; CHECK-NEXT: #NO_APP
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%2 = tail call fast <256 x double> @llvm.ve.vl.vld2d.vssl(i64 8, i8* %0, i32 256)
|
||||
tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %2, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -667,7 +667,7 @@ define void @vld2d_vssvl_imm(i8* %0, i8* %1) {
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vld2d.vssl(i64 8, i8* %1, i32 256)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vld2d.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256)
|
||||
tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %4, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -683,7 +683,7 @@ define void @vld2dnc_vssl(i8* %0, i64 %1) {
|
||||
; CHECK-NEXT: #NO_APP
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vld2dnc.vssl(i64 %1, i8* %0, i32 256)
|
||||
tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %3, i64 %1, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -704,7 +704,7 @@ define void @vld2dnc_vssvl(i8* %0, i64 %1, i8* %2) {
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vld2dnc.vssl(i64 %1, i8* %2, i32 256)
|
||||
%5 = tail call fast <256 x double> @llvm.ve.vl.vld2dnc.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256)
|
||||
tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %5, i64 %1, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -723,7 +723,7 @@ define void @vld2dnc_vssl_imm(i8* %0) {
|
||||
; CHECK-NEXT: #NO_APP
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%2 = tail call fast <256 x double> @llvm.ve.vl.vld2dnc.vssl(i64 8, i8* %0, i32 256)
|
||||
tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %2, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -741,7 +741,7 @@ define void @vld2dnc_vssvl_imm(i8* %0, i8* %1) {
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vld2dnc.vssl(i64 8, i8* %1, i32 256)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vld2dnc.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256)
|
||||
tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %4, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -757,7 +757,7 @@ define void @vldu2d_vssl(i8* %0, i64 %1) {
|
||||
; CHECK-NEXT: #NO_APP
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vldu2d.vssl(i64 %1, i8* %0, i32 256)
|
||||
tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %3, i64 %1, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -778,7 +778,7 @@ define void @vldu2d_vssvl(i8* %0, i64 %1, i8* %2) {
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vldu2d.vssl(i64 %1, i8* %2, i32 256)
|
||||
%5 = tail call fast <256 x double> @llvm.ve.vl.vldu2d.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256)
|
||||
tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %5, i64 %1, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -797,7 +797,7 @@ define void @vldu2d_vssl_imm(i8* %0) {
|
||||
; CHECK-NEXT: #NO_APP
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%2 = tail call fast <256 x double> @llvm.ve.vl.vldu2d.vssl(i64 8, i8* %0, i32 256)
|
||||
tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %2, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -815,7 +815,7 @@ define void @vldu2d_vssvl_imm(i8* %0, i8* %1) {
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vldu2d.vssl(i64 8, i8* %1, i32 256)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vldu2d.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256)
|
||||
tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %4, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -831,7 +831,7 @@ define void @vldu2dnc_vssl(i8* %0, i64 %1) {
|
||||
; CHECK-NEXT: #NO_APP
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vldu2dnc.vssl(i64 %1, i8* %0, i32 256)
|
||||
tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %3, i64 %1, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -852,7 +852,7 @@ define void @vldu2dnc_vssvl(i8* %0, i64 %1, i8* %2) {
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vldu2dnc.vssl(i64 %1, i8* %2, i32 256)
|
||||
%5 = tail call fast <256 x double> @llvm.ve.vl.vldu2dnc.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256)
|
||||
tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %5, i64 %1, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -871,7 +871,7 @@ define void @vldu2dnc_vssl_imm(i8* %0) {
|
||||
; CHECK-NEXT: #NO_APP
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%2 = tail call fast <256 x double> @llvm.ve.vl.vldu2dnc.vssl(i64 8, i8* %0, i32 256)
|
||||
tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %2, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -889,7 +889,7 @@ define void @vldu2dnc_vssvl_imm(i8* %0, i8* %1) {
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vldu2dnc.vssl(i64 8, i8* %1, i32 256)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vldu2dnc.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256)
|
||||
tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %4, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -905,7 +905,7 @@ define void @vldl2dsx_vssl(i8* %0, i64 %1) {
|
||||
; CHECK-NEXT: #NO_APP
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsx.vssl(i64 %1, i8* %0, i32 256)
|
||||
tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %3, i64 %1, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -926,7 +926,7 @@ define void @vldl2dsx_vssvl(i8* %0, i64 %1, i8* %2) {
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsx.vssl(i64 %1, i8* %2, i32 256)
|
||||
%5 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsx.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256)
|
||||
tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %5, i64 %1, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -945,7 +945,7 @@ define void @vldl2dsx_vssl_imm(i8* %0) {
|
||||
; CHECK-NEXT: #NO_APP
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%2 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsx.vssl(i64 8, i8* %0, i32 256)
|
||||
tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %2, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -963,7 +963,7 @@ define void @vldl2dsx_vssvl_imm(i8* %0, i8* %1) {
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsx.vssl(i64 8, i8* %1, i32 256)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsx.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256)
|
||||
tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %4, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -979,7 +979,7 @@ define void @vldl2dsxnc_vssl(i8* %0, i64 %1) {
|
||||
; CHECK-NEXT: #NO_APP
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsxnc.vssl(i64 %1, i8* %0, i32 256)
|
||||
tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %3, i64 %1, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -1000,7 +1000,7 @@ define void @vldl2dsxnc_vssvl(i8* %0, i64 %1, i8* %2) {
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsxnc.vssl(i64 %1, i8* %2, i32 256)
|
||||
%5 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsxnc.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256)
|
||||
tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %5, i64 %1, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -1019,7 +1019,7 @@ define void @vldl2dsxnc_vssl_imm(i8* %0) {
|
||||
; CHECK-NEXT: #NO_APP
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%2 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsxnc.vssl(i64 8, i8* %0, i32 256)
|
||||
tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %2, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -1037,7 +1037,7 @@ define void @vldl2dsxnc_vssvl_imm(i8* %0, i8* %1) {
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsxnc.vssl(i64 8, i8* %1, i32 256)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsxnc.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256)
|
||||
tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %4, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -1053,7 +1053,7 @@ define void @vldl2dzx_vssl(i8* %0, i64 %1) {
|
||||
; CHECK-NEXT: #NO_APP
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzx.vssl(i64 %1, i8* %0, i32 256)
|
||||
tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %3, i64 %1, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -1074,7 +1074,7 @@ define void @vldl2dzx_vssvl(i8* %0, i64 %1, i8* %2) {
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzx.vssl(i64 %1, i8* %2, i32 256)
|
||||
%5 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzx.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256)
|
||||
tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %5, i64 %1, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -1093,7 +1093,7 @@ define void @vldl2dzx_vssl_imm(i8* %0) {
|
||||
; CHECK-NEXT: #NO_APP
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%2 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzx.vssl(i64 8, i8* %0, i32 256)
|
||||
tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %2, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -1111,7 +1111,7 @@ define void @vldl2dzx_vssvl_imm(i8* %0, i8* %1) {
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzx.vssl(i64 8, i8* %1, i32 256)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzx.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256)
|
||||
tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %4, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -1127,7 +1127,7 @@ define void @vldl2dzxnc_vssl(i8* %0, i64 %1) {
|
||||
; CHECK-NEXT: #NO_APP
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzxnc.vssl(i64 %1, i8* %0, i32 256)
|
||||
tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %3, i64 %1, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -1148,7 +1148,7 @@ define void @vldl2dzxnc_vssvl(i8* %0, i64 %1, i8* %2) {
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzxnc.vssl(i64 %1, i8* %2, i32 256)
|
||||
%5 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzxnc.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256)
|
||||
tail call void asm sideeffect "vst $0, $1, $2", "v,r,r"(<256 x double> %5, i64 %1, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -1167,7 +1167,7 @@ define void @vldl2dzxnc_vssl_imm(i8* %0) {
|
||||
; CHECK-NEXT: #NO_APP
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%2 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzxnc.vssl(i64 8, i8* %0, i32 256)
|
||||
tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %2, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0)
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -1185,6 +1185,6 @@ define void @vldl2dzxnc_vssvl_imm(i8* %0, i8* %1) {
|
||||
; CHECK-NEXT: b.l.t (, %s10)
|
||||
%3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzxnc.vssl(i64 8, i8* %1, i32 256)
|
||||
%4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzxnc.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256)
|
||||
tail call void asm sideeffect "vst $0, 8, $1", "v,r"(<256 x double> %4, i8* %0)
|
||||
tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0)
|
||||
ret void
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user