1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-24 03:33:20 +01:00

[AArch64 NEON] Fix a pattern match failure with NEON_VDUP.

This failure caused by improper condition when lowering shuffle_vector
to scalar_to_vector. After this patch NEON_VDUP with v1i64 will not
be generated.

llvm-svn: 197966
This commit is contained in:
Kevin Qin 2013-12-24 08:11:47 +00:00
parent e157c50343
commit 8f86911897
3 changed files with 33 additions and 6 deletions

View File

@ -4070,9 +4070,7 @@ AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
if (ValueCounts.size() == 0)
return DAG.getUNDEF(VT);
// Loads are better lowered with insert_vector_elt.
// Keep going if we are hitting this case.
if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode()))
if (isOnlyLowElement)
return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Value);
unsigned EltSize = VT.getVectorElementType().getSizeInBits();

View File

@ -3690,12 +3690,16 @@ def : LD1R_pattern<v2f32, f32, load, LD1R_2S>;
def : LD1R_pattern<v4i32, i32, load, LD1R_4S>;
def : LD1R_pattern<v4f32, f32, load, LD1R_4S>;
def : LD1R_pattern<v1i64, i64, load, LD1R_1D>;
def : LD1R_pattern<v1f64, f64, load, LD1R_1D>;
def : LD1R_pattern<v2i64, i64, load, LD1R_2D>;
def : LD1R_pattern<v2f64, f64, load, LD1R_2D>;
class LD1R_pattern_v1 <ValueType VTy, ValueType DTy, PatFrag LoadOp,
Instruction INST>
: Pat<(VTy (scalar_to_vector (DTy (LoadOp GPR64xsp:$Rn)))),
(VTy (INST GPR64xsp:$Rn))>;
def : LD1R_pattern_v1<v1i64, i64, load, LD1R_1D>;
def : LD1R_pattern_v1<v1f64, f64, load, LD1R_1D>;
multiclass VectorList_Bare_BHSD<string PREFIX, int Count,
RegisterClass RegList> {

View File

@ -236,6 +236,31 @@ entry:
ret <1 x double> %1
}
define <1 x i64> @testDUP.v1i64(i64* %a, i64* %b) #0 {
; As there is a store operation depending on %1, LD1R pattern can't be selected.
; So LDR and FMOV should be emitted.
; CHECK-LABEL: testDUP.v1i64
; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}]
; CHECK: fmov {{d[0-9]+}}, {{x[0-9]+}}
; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}]
%1 = load i64* %a, align 8
store i64 %1, i64* %b, align 8
%vecinit.i = insertelement <1 x i64> undef, i64 %1, i32 0
ret <1 x i64> %vecinit.i
}
define <1 x double> @testDUP.v1f64(double* %a, double* %b) #0 {
; As there is a store operation depending on %1, LD1R pattern can't be selected.
; So LDR and FMOV should be emitted.
; CHECK-LABEL: testDUP.v1f64
; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}]
; CHECK: str {{d[0-9]+}}, [{{x[0-9]+}}]
%1 = load double* %a, align 8
store double %1, double* %b, align 8
%vecinit.i = insertelement <1 x double> undef, double %1, i32 0
ret <1 x double> %vecinit.i
}
define %struct.int8x16x2_t @test_vld2q_dup_s8(i8* %a) {
; CHECK-LABEL: test_vld2q_dup_s8
; CHECK: ld2r {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, [x0]