diff --git a/lib/Target/VE/VVPInstrInfo.td b/lib/Target/VE/VVPInstrInfo.td index 81fbfe03b48..2c88d5099a7 100644 --- a/lib/Target/VE/VVPInstrInfo.td +++ b/lib/Target/VE/VVPInstrInfo.td @@ -40,4 +40,7 @@ class vvp_commutative : def vvp_add : SDNode<"VEISD::VVP_ADD", SDTIntBinOpVVP>; def c_vvp_add : vvp_commutative; +def vvp_and : SDNode<"VEISD::VVP_AND", SDTIntBinOpVVP>; +def c_vvp_and : vvp_commutative; + // } Binary Operators diff --git a/lib/Target/VE/VVPInstrPatternsVec.td b/lib/Target/VE/VVPInstrPatternsVec.td index 2345173314a..7003fb38767 100644 --- a/lib/Target/VE/VVPInstrPatternsVec.td +++ b/lib/Target/VE/VVPInstrPatternsVec.td @@ -66,3 +66,6 @@ multiclass VectorBinaryArith_ShortLong< defm : VectorBinaryArith_ShortLong; +defm : VectorBinaryArith_ShortLong; diff --git a/lib/Target/VE/VVPNodes.def b/lib/Target/VE/VVPNodes.def index 4319b332388..1f9cbd79023 100644 --- a/lib/Target/VE/VVPNodes.def +++ b/lib/Target/VE/VVPNodes.def @@ -27,6 +27,7 @@ // Integer arithmetic. ADD_BINARY_VVP_OP(VVP_ADD,ADD) +ADD_BINARY_VVP_OP(VVP_AND,AND) #undef ADD_BINARY_VVP_OP #undef ADD_VVP_OP diff --git a/test/CodeGen/VE/Vector/vec_and.ll b/test/CodeGen/VE/Vector/vec_and.ll new file mode 100644 index 00000000000..8597e1aa511 --- /dev/null +++ b/test/CodeGen/VE/Vector/vec_and.ll @@ -0,0 +1,132 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s + +; <256 x i32> + +; Function Attrs: nounwind +define fastcc <256 x i32> @and_vv_v256i32(<256 x i32> %x, <256 x i32> %y) { +; CHECK-LABEL: and_vv_v256i32: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 256 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: pvand.lo %v0, %v0, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %z = and <256 x i32> %x, %y + ret <256 x i32> %z +} + +; Function Attrs: nounwind +define fastcc <256 x i32> @and_sv_v256i32(i32 %x, <256 x i32> %y) { +; CHECK-LABEL: and_sv_v256i32: +; CHECK: # %bb.0: +; CHECK-NEXT: and %s0, %s0, (32)0 +; CHECK-NEXT: lea %s1, 256 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: pvand.lo %v0, %s0, %v0 +; CHECK-NEXT: b.l.t (, %s10) + %xins = insertelement <256 x i32> undef, i32 %x, i32 0 + %vx = shufflevector <256 x i32> %xins, <256 x i32> undef, <256 x i32> zeroinitializer + %z = and <256 x i32> %vx, %y + ret <256 x i32> %z +} + +; Function Attrs: nounwind +define fastcc <256 x i32> @and_vs_v256i32(<256 x i32> %x, i32 %y) { +; CHECK-LABEL: and_vs_v256i32: +; CHECK: # %bb.0: +; CHECK-NEXT: and %s0, %s0, (32)0 +; CHECK-NEXT: lea %s1, 256 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: pvand.lo %v0, %s0, %v0 +; CHECK-NEXT: b.l.t (, %s10) + %yins = insertelement <256 x i32> undef, i32 %y, i32 0 + %vy = shufflevector <256 x i32> %yins, <256 x i32> undef, <256 x i32> zeroinitializer + %z = and <256 x i32> %x, %vy + ret <256 x i32> %z +} + + + +; <256 x i64> + +; Function Attrs: nounwind +define fastcc <256 x i64> @and_vv_v256i64(<256 x i64> %x, <256 x i64> %y) { +; CHECK-LABEL: and_vv_v256i64: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 256 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vand %v0, %v0, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %z = and <256 x i64> %x, %y + ret <256 x i64> %z +} + +; Function Attrs: nounwind +define fastcc <256 x i64> @and_sv_v256i64(i64 %x, <256 x i64> %y) { +; CHECK-LABEL: and_sv_v256i64: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 256 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vand %v0, %s0, %v0 +; CHECK-NEXT: b.l.t (, %s10) + %xins = insertelement <256 x i64> undef, i64 %x, i32 0 + %vx = shufflevector <256 x i64> %xins, <256 x i64> undef, <256 x i32> zeroinitializer + %z = and <256 x i64> %vx, %y + ret <256 x i64> %z +} + +; Function Attrs: nounwind +define fastcc <256 x i64> @and_vs_v256i64(<256 x i64> %x, i64 %y) { +; CHECK-LABEL: and_vs_v256i64: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s1, 256 +; CHECK-NEXT: lvl %s1 +; CHECK-NEXT: vand %v0, %s0, %v0 +; CHECK-NEXT: b.l.t (, %s10) + %yins = insertelement <256 x i64> undef, i64 %y, i32 0 + %vy = shufflevector <256 x i64> %yins, <256 x i64> undef, <256 x i32> zeroinitializer + %z = and <256 x i64> %x, %vy + ret <256 x i64> %z +} + +; <128 x i64> +; We expect this to be widened. + +; Function Attrs: nounwind +define fastcc <128 x i64> @and_vv_v128i64(<128 x i64> %x, <128 x i64> %y) { +; CHECK-LABEL: and_vv_v128i64: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 256 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: vand %v0, %v0, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %z = and <128 x i64> %x, %y + ret <128 x i64> %z +} + +; <256 x i16> +; We expect promotion. + +; Function Attrs: nounwind +define fastcc <256 x i16> @and_vv_v256i16(<256 x i16> %x, <256 x i16> %y) { +; CHECK-LABEL: and_vv_v256i16: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, 256 +; CHECK-NEXT: lvl %s0 +; CHECK-NEXT: pvand.lo %v0, %v0, %v1 +; CHECK-NEXT: b.l.t (, %s10) + %z = and <256 x i16> %x, %y + ret <256 x i16> %z +} + +; <128 x i16> +; We expect this to be scalarized (for now). + +; Function Attrs: nounwind +define fastcc <128 x i16> @and_vv_v128i16(<128 x i16> %x, <128 x i16> %y) { +; CHECK-LABEL: and_vv_v128i16: +; CHECK-NOT: vand + %z = and <128 x i16> %x, %y + ret <128 x i16> %z +} +