mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-02-01 05:01:59 +01:00
c6e3a07bc6
The ACLE distinguishes between the following addressing modes for gather loads: * "scalar base, vector offset", and * "vector base, scalar offset". For the "vector base, scalar offset" case, the `int_aarch64_sve_ld1_gather_imm` intrinsic was added in 79f2422d. Currently, that intrinsic assumes that the scalar offset is passed as an immediate. As a result, it does not cater for cases where scalar offset is stored in a register. In this patch `int_aarch64_sve_ld1_gather_imm` is extended so that all cases are covered: * `int_aarch64_sve_ld1_gather_imm` is renamed as `int_aarch64_sve_ld1_gather_scalar_offset` * new DAG combine rules are added for GLD1_IMM for scenarios where the offset is a non-immediate scalar or an out-of-range immediate * sve-intrinsics-gather-loads-vector-base.ll is renamed as sve-intrinsics-gather-loads-vector-base-imm-offset.ll * sve-intrinsics-gather-loads-vector-base-scalar-offset.ll is added to test file for non-immediate offsets Similar changes are made for scatter store intrinsics. Reviewed By: sdesmalen, efriedma Differential Revision: https://reviews.llvm.org/D71773
73 lines
3.8 KiB
LLVM
73 lines
3.8 KiB
LLVM
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
|
|
|
|
; Verify that DAG combine rules for LD1 + sext/zext don't apply when the
|
|
; result of LD1 has multiple uses
|
|
|
|
define <vscale x 2 x i64> @no_dag_combine_zext_sext(<vscale x 2 x i1> %pg,
|
|
<vscale x 2 x i64> %base,
|
|
<vscale x 2 x i8>* %res_out,
|
|
<vscale x 2 x i1> %pred) {
|
|
; CHECK-LABEL: no_dag_combine_zext_sext
|
|
; CHECK: ld1b { z0.d }, p0/z, [z0.d, #16]
|
|
; CHECK-NEXT: st1b { z0.d }, p1, [x0]
|
|
; CHECK-NEXT: and z0.d, z0.d, #0xff
|
|
; CHECK-NEXT: ret
|
|
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
|
|
<vscale x 2 x i64> %base,
|
|
i64 16)
|
|
%res1 = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
|
%res2 = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
|
call void @llvm.masked.store.nxv2i8(<vscale x 2 x i8> %load,
|
|
<vscale x 2 x i8> *%res_out,
|
|
i32 8,
|
|
<vscale x 2 x i1> %pred)
|
|
|
|
ret <vscale x 2 x i64> %res1
|
|
}
|
|
|
|
define <vscale x 2 x i64> @no_dag_combine_sext(<vscale x 2 x i1> %pg,
|
|
<vscale x 2 x i64> %base,
|
|
<vscale x 2 x i8>* %res_out,
|
|
<vscale x 2 x i1> %pred) {
|
|
; CHECK-LABEL: no_dag_combine_sext
|
|
; CHECK: ld1b { z1.d }, p0/z, [z0.d, #16]
|
|
; CHECK-NEXT: ptrue p0.d
|
|
; CHECK-NEXT: sxtb z0.d, p0/m, z1.d
|
|
; CHECK-NEXT: st1b { z1.d }, p1, [x0]
|
|
; CHECK-NEXT: ret
|
|
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
|
|
<vscale x 2 x i64> %base,
|
|
i64 16)
|
|
%res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
|
call void @llvm.masked.store.nxv2i8(<vscale x 2 x i8> %load,
|
|
<vscale x 2 x i8> *%res_out,
|
|
i32 8,
|
|
<vscale x 2 x i1> %pred)
|
|
|
|
ret <vscale x 2 x i64> %res
|
|
}
|
|
|
|
define <vscale x 2 x i64> @no_dag_combine_zext(<vscale x 2 x i1> %pg,
|
|
<vscale x 2 x i64> %base,
|
|
<vscale x 2 x i8>* %res_out,
|
|
<vscale x 2 x i1> %pred) {
|
|
; CHECK-LABEL: no_dag_combine_zext
|
|
; CHECK: ld1b { z0.d }, p0/z, [z0.d, #16]
|
|
; CHECK-NEXT: st1b { z0.d }, p1, [x0]
|
|
; CHECK-NEXT: and z0.d, z0.d, #0xff
|
|
; CHECK-NEXT: ret
|
|
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
|
|
<vscale x 2 x i64> %base,
|
|
i64 16)
|
|
%res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
|
call void @llvm.masked.store.nxv2i8(<vscale x 2 x i8> %load,
|
|
<vscale x 2 x i8> *%res_out,
|
|
i32 8,
|
|
<vscale x 2 x i1> %pred)
|
|
|
|
ret <vscale x 2 x i64> %res
|
|
}
|
|
|
|
declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
|
declare void @llvm.masked.store.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>*, i32, <vscale x 2 x i1>)
|