From 2bbdc62e174451a875bd871326205e6b530d5ee2 Mon Sep 17 00:00:00 2001 From: Owen Anderson Date: Fri, 22 Oct 2010 19:05:25 +0000 Subject: [PATCH] Provide correct encodings for NEON vmlal. llvm-svn: 117131 --- lib/Target/ARM/ARMInstrNEON.td | 10 ++-- test/MC/ARM/neon-mul-accum-encoding.ll | 78 ++++++++++++++++++++++++++ 2 files changed, 83 insertions(+), 5 deletions(-) diff --git a/lib/Target/ARM/ARMInstrNEON.td b/lib/Target/ARM/ARMInstrNEON.td index 0426b1145dc..80704829655 100644 --- a/lib/Target/ARM/ARMInstrNEON.td +++ b/lib/Target/ARM/ARMInstrNEON.td @@ -1447,11 +1447,11 @@ class N3VLMulOp op21_20, bits<4> op11_8, bit op4, InstrItinClass itin, string OpcodeStr, string Dt, ValueType TyQ, ValueType TyD, SDNode MulOp, SDNode OpNode> : N3V; + (outs QPR:$Vd), (ins QPR:$src1, DPR:$Vn, DPR:$Vm), N3RegFrm, itin, + OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd", + [(set QPR:$Vd, (OpNode (TyQ QPR:$src1), + (TyQ (MulOp (TyD DPR:$Vn), + (TyD DPR:$Vm)))))]>; class N3VLMulOpSL op21_20, bits<4> op11_8, InstrItinClass itin, string OpcodeStr, string Dt, ValueType TyQ, ValueType TyD, SDNode MulOp, SDNode OpNode> diff --git a/test/MC/ARM/neon-mul-accum-encoding.ll b/test/MC/ARM/neon-mul-accum-encoding.ll index bcd4eb31d8e..891e76fd04e 100644 --- a/test/MC/ARM/neon-mul-accum-encoding.ll +++ b/test/MC/ARM/neon-mul-accum-encoding.ll @@ -87,3 +87,81 @@ define <4 x float> @vmla_4xfloat(<4 x float>* %A, <4 x float>* %B, <4 x float>* %tmp5 = fadd <4 x float> %tmp1, %tmp4 ret <4 x float> %tmp5 } + +; CHECK: vmlals_8xi8 +define <8 x i16> @vmlals_8xi8(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind { + %tmp1 = load <8 x i16>* %A + %tmp2 = load <8 x i8>* %B + %tmp3 = load <8 x i8>* %C + %tmp4 = sext <8 x i8> %tmp2 to <8 x i16> + %tmp5 = sext <8 x i8> %tmp3 to <8 x i16> +; CHECK: vmlal.s8 q8, d19, d18 @ encoding: [0xa2,0x08,0xc3,0xf2] + %tmp6 = mul <8 x i16> %tmp4, %tmp5 + %tmp7 = add <8 x i16> %tmp1, %tmp6 + ret <8 x i16> %tmp7 +} + +; CHECK: vmlals_4xi16 +define <4 x i32> @vmlals_4xi16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind { + %tmp1 = load <4 x i32>* %A + %tmp2 = load <4 x i16>* %B + %tmp3 = load <4 x i16>* %C + %tmp4 = sext <4 x i16> %tmp2 to <4 x i32> + %tmp5 = sext <4 x i16> %tmp3 to <4 x i32> +; CHECK: vmlal.s16 q8, d19, d18 @ encoding: [0xa2,0x08,0xd3,0xf2] + %tmp6 = mul <4 x i32> %tmp4, %tmp5 + %tmp7 = add <4 x i32> %tmp1, %tmp6 + ret <4 x i32> %tmp7 +} + +; CHECK: vmlals_2xi32 +define <2 x i64> @vmlals_2xi32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind { + %tmp1 = load <2 x i64>* %A + %tmp2 = load <2 x i32>* %B + %tmp3 = load <2 x i32>* %C + %tmp4 = sext <2 x i32> %tmp2 to <2 x i64> + %tmp5 = sext <2 x i32> %tmp3 to <2 x i64> +; CHECK: vmlal.s32 q8, d19, d18 @ encoding: [0xa2,0x08,0xe3,0xf2] + %tmp6 = mul <2 x i64> %tmp4, %tmp5 + %tmp7 = add <2 x i64> %tmp1, %tmp6 + ret <2 x i64> %tmp7 +} + +; CHECK: vmlalu_8xi8 +define <8 x i16> @vmlalu_8xi8(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind { + %tmp1 = load <8 x i16>* %A + %tmp2 = load <8 x i8>* %B + %tmp3 = load <8 x i8>* %C + %tmp4 = zext <8 x i8> %tmp2 to <8 x i16> + %tmp5 = zext <8 x i8> %tmp3 to <8 x i16> +; CHECK: vmlal.u8 q8, d19, d18 @ encoding: [0xa2,0x08,0xc3,0xf3] + %tmp6 = mul <8 x i16> %tmp4, %tmp5 + %tmp7 = add <8 x i16> %tmp1, %tmp6 + ret <8 x i16> %tmp7 +} + +; CHECK: vmlalu_4xi16 +define <4 x i32> @vmlalu_4xi16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind { + %tmp1 = load <4 x i32>* %A + %tmp2 = load <4 x i16>* %B + %tmp3 = load <4 x i16>* %C + %tmp4 = zext <4 x i16> %tmp2 to <4 x i32> + %tmp5 = zext <4 x i16> %tmp3 to <4 x i32> +; CHECK: vmlal.u16 q8, d19, d18 @ encoding: [0xa2,0x08,0xd3,0xf3] + %tmp6 = mul <4 x i32> %tmp4, %tmp5 + %tmp7 = add <4 x i32> %tmp1, %tmp6 + ret <4 x i32> %tmp7 +} + +; CHECK: vmlalu_2xi32 +define <2 x i64> @vmlalu_2xi32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind { + %tmp1 = load <2 x i64>* %A + %tmp2 = load <2 x i32>* %B + %tmp3 = load <2 x i32>* %C + %tmp4 = zext <2 x i32> %tmp2 to <2 x i64> + %tmp5 = zext <2 x i32> %tmp3 to <2 x i64> +; CHECK: vmlal.u32 q8, d19, d18 @ encoding: [0xa2,0x08,0xe3,0xf3] + %tmp6 = mul <2 x i64> %tmp4, %tmp5 + %tmp7 = add <2 x i64> %tmp1, %tmp6 + ret <2 x i64> %tmp7 +}