mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-01-31 20:51:52 +01:00
884e13dafb
Summary: When splitting a load of a scalable type, the new address is calculated in SplitVecRes_LOAD using a vscale and an add instruction. This patch also adds a DAG combiner fold to visitADD for vscale: - Fold (add (vscale(C0)), (vscale(C1))) to (add (vscale(C0 + C1))) Reviewers: sdesmalen, efriedma, david-arm Reviewed By: david-arm Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D82792
56 lines
2.0 KiB
LLVM
56 lines
2.0 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
|
|
|
|
; LOAD
|
|
|
|
define <vscale x 4 x i16> @load_promote_4i8(<vscale x 4 x i16>* %a) {
|
|
; CHECK-LABEL: load_promote_4i8:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ptrue p0.s
|
|
; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0]
|
|
; CHECK-NEXT: ret
|
|
%load = load <vscale x 4 x i16>, <vscale x 4 x i16>* %a
|
|
ret <vscale x 4 x i16> %load
|
|
}
|
|
|
|
define <vscale x 16 x i16> @load_split_i16(<vscale x 16 x i16>* %a) {
|
|
; CHECK-LABEL: load_split_i16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ptrue p0.h
|
|
; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
|
|
; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0, #1, mul vl]
|
|
; CHECK-NEXT: ret
|
|
%load = load <vscale x 16 x i16>, <vscale x 16 x i16>* %a
|
|
ret <vscale x 16 x i16> %load
|
|
}
|
|
|
|
define <vscale x 32 x i16> @load_split_32i16(<vscale x 32 x i16>* %a) {
|
|
; CHECK-LABEL: load_split_32i16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ptrue p0.h
|
|
; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
|
|
; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0, #1, mul vl]
|
|
; CHECK-NEXT: ld1h { z2.h }, p0/z, [x0, #2, mul vl]
|
|
; CHECK-NEXT: ld1h { z3.h }, p0/z, [x0, #3, mul vl]
|
|
; CHECK-NEXT: ret
|
|
%load = load <vscale x 32 x i16>, <vscale x 32 x i16>* %a
|
|
ret <vscale x 32 x i16> %load
|
|
}
|
|
|
|
define <vscale x 16 x i64> @load_split_16i64(<vscale x 16 x i64>* %a) {
|
|
; CHECK-LABEL: load_split_16i64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ptrue p0.d
|
|
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0]
|
|
; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0, #1, mul vl]
|
|
; CHECK-NEXT: ld1d { z2.d }, p0/z, [x0, #2, mul vl]
|
|
; CHECK-NEXT: ld1d { z3.d }, p0/z, [x0, #3, mul vl]
|
|
; CHECK-NEXT: ld1d { z4.d }, p0/z, [x0, #4, mul vl]
|
|
; CHECK-NEXT: ld1d { z5.d }, p0/z, [x0, #5, mul vl]
|
|
; CHECK-NEXT: ld1d { z6.d }, p0/z, [x0, #6, mul vl]
|
|
; CHECK-NEXT: ld1d { z7.d }, p0/z, [x0, #7, mul vl]
|
|
; CHECK-NEXT: ret
|
|
%load = load <vscale x 16 x i64>, <vscale x 16 x i64>* %a
|
|
ret <vscale x 16 x i64> %load
|
|
}
|