mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-01-31 12:41:49 +01:00
[SVE][CodeGen] Fix bug in DAGCombiner::reduceBuildVecToShuffle
When trying to reduce a BUILD_VECTOR to a SHUFFLE_VECTOR it's important that we carefully check the vector types that led to that BUILD_VECTOR. In the test I have attached to this commit there is a case where the results of two SVE faddv instructions are being stored to consecutive memory locations. With my fix, as part of merging those stores we discover that each BUILD_VECTOR element came from an extract of a SVE vector element and therefore bail out. Differential Revision: https://reviews.llvm.org/D82564
This commit is contained in:
parent
986143ca63
commit
4bd32c5a01
@ -18287,6 +18287,9 @@ SDValue DAGCombiner::reduceBuildVecToShuffle(SDNode *N) {
|
||||
return SDValue();
|
||||
SDValue ExtractedFromVec = Op.getOperand(0);
|
||||
|
||||
if (ExtractedFromVec.getValueType().isScalableVector())
|
||||
return SDValue();
|
||||
|
||||
const APInt &ExtractIdx = Op.getConstantOperandAPInt(1);
|
||||
if (ExtractIdx.uge(ExtractedFromVec.getValueType().getVectorNumElements()))
|
||||
return SDValue();
|
||||
|
32
test/CodeGen/AArch64/sve-merging-stores.ll
Normal file
32
test/CodeGen/AArch64/sve-merging-stores.ll
Normal file
@ -0,0 +1,32 @@
|
||||
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
|
||||
|
||||
%complex = type { { double, double } }
|
||||
|
||||
; Function Attrs: argmemonly nounwind readonly
|
||||
declare <vscale x 2 x double> @llvm.aarch64.sve.tuple.get.nxv2f64.nxv4f64(<vscale x 4 x double>, i32 immarg) #3
|
||||
|
||||
; Function Attrs: argmemonly nounwind readonly
|
||||
declare <vscale x 4 x double> @llvm.aarch64.sve.ld2.nxv4f64.nxv2i1(<vscale x 2 x i1>, double*) #3
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare double @llvm.aarch64.sve.faddv.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>) #2
|
||||
|
||||
define void @foo1(%complex* %outval, <vscale x 2 x i1> %pred, double *%inptr) {
|
||||
; CHECK-LABEL: foo1:
|
||||
; CHECK: ld2d { z0.d, z1.d }, p0/z, [x1]
|
||||
; CHECK-NEXT: faddv d2, p0, z0.d
|
||||
; CHECK-NEXT: faddv d0, p0, z1.d
|
||||
; CHECK-NEXT: mov v2.d[1], v0.d[0]
|
||||
; CHECK-NEXT: str q2, [x0]
|
||||
%realp = getelementptr inbounds %complex, %complex* %outval, i64 0, i32 0, i32 0
|
||||
%imagp = getelementptr inbounds %complex, %complex* %outval, i64 0, i32 0, i32 1
|
||||
%1 = call <vscale x 4 x double> @llvm.aarch64.sve.ld2.nxv4f64.nxv2i1(<vscale x 2 x i1> %pred, double* nonnull %inptr)
|
||||
%2 = call <vscale x 2 x double> @llvm.aarch64.sve.tuple.get.nxv2f64.nxv4f64(<vscale x 4 x double> %1, i32 0)
|
||||
%3 = call double @llvm.aarch64.sve.faddv.nxv2f64(<vscale x 2 x i1> %pred, <vscale x 2 x double> %2)
|
||||
%4 = call <vscale x 2 x double> @llvm.aarch64.sve.tuple.get.nxv2f64.nxv4f64(<vscale x 4 x double> %1, i32 1)
|
||||
%5 = call double @llvm.aarch64.sve.faddv.nxv2f64(<vscale x 2 x i1> %pred, <vscale x 2 x double> %4)
|
||||
store double %3, double* %realp, align 8
|
||||
store double %5, double* %imagp, align 8
|
||||
ret void
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user