diff --git a/include/llvm/IR/IntrinsicsPowerPC.td b/include/llvm/IR/IntrinsicsPowerPC.td index 34ef4b768e3..dc7fa581fd4 100644 --- a/include/llvm/IR/IntrinsicsPowerPC.td +++ b/include/llvm/IR/IntrinsicsPowerPC.td @@ -481,6 +481,18 @@ let TargetPrefix = "ppc" in { // All intrinsics start with "llvm.ppc.". Intrinsic<[llvm_i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg>]>; + // P10 Move to VSR with Mask Intrinsics. + def int_ppc_altivec_mtvsrbm : GCCBuiltin<"__builtin_altivec_mtvsrbm">, + Intrinsic<[llvm_v16i8_ty], [llvm_i64_ty], [IntrNoMem]>; + def int_ppc_altivec_mtvsrhm : GCCBuiltin<"__builtin_altivec_mtvsrhm">, + Intrinsic<[llvm_v8i16_ty], [llvm_i64_ty], [IntrNoMem]>; + def int_ppc_altivec_mtvsrwm : GCCBuiltin<"__builtin_altivec_mtvsrwm">, + Intrinsic<[llvm_v4i32_ty], [llvm_i64_ty], [IntrNoMem]>; + def int_ppc_altivec_mtvsrdm : GCCBuiltin<"__builtin_altivec_mtvsrdm">, + Intrinsic<[llvm_v2i64_ty], [llvm_i64_ty], [IntrNoMem]>; + def int_ppc_altivec_mtvsrqm : GCCBuiltin<"__builtin_altivec_mtvsrqm">, + Intrinsic<[llvm_v1i128_ty], [llvm_i64_ty], [IntrNoMem]>; + // P10 Vector Parallel Bits Deposit/Extract Doubleword Builtins. def int_ppc_altivec_vpdepd : GCCBuiltin<"__builtin_altivec_vpdepd">, Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], diff --git a/lib/Target/PowerPC/PPCInstrPrefix.td b/lib/Target/PowerPC/PPCInstrPrefix.td index 553bcdea9bc..23b641f00cc 100644 --- a/lib/Target/PowerPC/PPCInstrPrefix.td +++ b/lib/Target/PowerPC/PPCInstrPrefix.td @@ -1054,22 +1054,28 @@ let Predicates = [IsISA3_1] in { v1i128:$vB))]>; def MTVSRBM : VXForm_RD5_XO5_RS5<1602, 16, (outs vrrc:$vD), (ins g8rc:$rB), "mtvsrbm $vD, $rB", IIC_VecGeneral, - []>; + [(set v16i8:$vD, + (int_ppc_altivec_mtvsrbm i64:$rB))]>; def MTVSRHM : VXForm_RD5_XO5_RS5<1602, 17, (outs vrrc:$vD), (ins g8rc:$rB), "mtvsrhm $vD, $rB", IIC_VecGeneral, - []>; + [(set v8i16:$vD, + (int_ppc_altivec_mtvsrhm i64:$rB))]>; def MTVSRWM : VXForm_RD5_XO5_RS5<1602, 18, (outs vrrc:$vD), (ins g8rc:$rB), "mtvsrwm $vD, $rB", IIC_VecGeneral, - []>; + [(set v4i32:$vD, + (int_ppc_altivec_mtvsrwm i64:$rB))]>; def MTVSRDM : VXForm_RD5_XO5_RS5<1602, 19, (outs vrrc:$vD), (ins g8rc:$rB), "mtvsrdm $vD, $rB", IIC_VecGeneral, - []>; + [(set v2i64:$vD, + (int_ppc_altivec_mtvsrdm i64:$rB))]>; def MTVSRQM : VXForm_RD5_XO5_RS5<1602, 20, (outs vrrc:$vD), (ins g8rc:$rB), "mtvsrqm $vD, $rB", IIC_VecGeneral, - []>; + [(set v1i128:$vD, + (int_ppc_altivec_mtvsrqm i64:$rB))]>; def MTVSRBMI : DXForm<4, 10, (outs vrrc:$vD), (ins u16imm64:$D), "mtvsrbmi $vD, $D", IIC_VecGeneral, - []>; + [(set v16i8:$vD, + (int_ppc_altivec_mtvsrbm imm:$D))]>; def VCNTMBB : VXForm_RD5_MP_VB5<1602, 12, (outs g8rc:$rD), (ins vrrc:$vB, u1imm:$MP), "vcntmbb $rD, $vB, $MP", IIC_VecGeneral, diff --git a/test/CodeGen/PowerPC/p10-vector-mask-ops.ll b/test/CodeGen/PowerPC/p10-vector-mask-ops.ll index 65e9abd657a..c93f7efe8fd 100644 --- a/test/CodeGen/PowerPC/p10-vector-mask-ops.ll +++ b/test/CodeGen/PowerPC/p10-vector-mask-ops.ll @@ -165,3 +165,109 @@ entry: %cnt = tail call i64 @llvm.ppc.altivec.vcntmbd(<2 x i64> %a, i32 0) ret i64 %cnt } + +declare <16 x i8> @llvm.ppc.altivec.mtvsrbm(i64) +declare <8 x i16> @llvm.ppc.altivec.mtvsrhm(i64) +declare <4 x i32> @llvm.ppc.altivec.mtvsrwm(i64) +declare <2 x i64> @llvm.ppc.altivec.mtvsrdm(i64) +declare <1 x i128> @llvm.ppc.altivec.mtvsrqm(i64) + +define <16 x i8> @test_mtvsrbm(i64 %a) { +; CHECK-LABEL: test_mtvsrbm: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: mtvsrbm v2, r3 +; CHECK-NEXT: blr +entry: + %mv = tail call <16 x i8> @llvm.ppc.altivec.mtvsrbm(i64 %a) + ret <16 x i8> %mv +} + +define <16 x i8> @test_mtvsrbmi() { +; CHECK-LABEL: test_mtvsrbmi: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: mtvsrbmi v2, 1 +; CHECK-NEXT: blr +entry: + %mv = tail call <16 x i8> @llvm.ppc.altivec.mtvsrbm(i64 1) + ret <16 x i8> %mv +} + +define <16 x i8> @test_mtvsrbmi2() { +; CHECK-LABEL: test_mtvsrbmi2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: mtvsrbmi v2, 255 +; CHECK-NEXT: blr +entry: + %mv = tail call <16 x i8> @llvm.ppc.altivec.mtvsrbm(i64 255) + ret <16 x i8> %mv +} + +define <16 x i8> @test_mtvsrbmi3() { +; CHECK-LABEL: test_mtvsrbmi3: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: mtvsrbmi v2, 65535 +; CHECK-NEXT: blr +entry: + %mv = tail call <16 x i8> @llvm.ppc.altivec.mtvsrbm(i64 65535) + ret <16 x i8> %mv +} + +define <16 x i8> @test_mtvsrbmi4() { +; CHECK-LABEL: test_mtvsrbmi4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: mtvsrbmi v2, 0 +; CHECK-NEXT: blr +entry: + %mv = tail call <16 x i8> @llvm.ppc.altivec.mtvsrbm(i64 65536) + ret <16 x i8> %mv +} + +define <16 x i8> @test_mtvsrbmi5() { +; CHECK-LABEL: test_mtvsrbmi5: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: mtvsrbmi v2, 10 +; CHECK-NEXT: blr +entry: + %mv = tail call <16 x i8> @llvm.ppc.altivec.mtvsrbm(i64 65546) + ret <16 x i8> %mv +} + +define <8 x i16> @test_mtvsrhm(i64 %a) { +; CHECK-LABEL: test_mtvsrhm: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: mtvsrhm v2, r3 +; CHECK-NEXT: blr +entry: + %mv = tail call <8 x i16> @llvm.ppc.altivec.mtvsrhm(i64 %a) + ret <8 x i16> %mv +} + +define <4 x i32> @test_mtvsrwm(i64 %a) { +; CHECK-LABEL: test_mtvsrwm: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: mtvsrwm v2, r3 +; CHECK-NEXT: blr +entry: + %mv = tail call <4 x i32> @llvm.ppc.altivec.mtvsrwm(i64 %a) + ret <4 x i32> %mv +} + +define <2 x i64> @test_mtvsrdm(i64 %a) { +; CHECK-LABEL: test_mtvsrdm: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: mtvsrdm v2, r3 +; CHECK-NEXT: blr +entry: + %mv = tail call <2 x i64> @llvm.ppc.altivec.mtvsrdm(i64 %a) + ret <2 x i64> %mv +} + +define <1 x i128> @test_mtvsrqm(i64 %a) { +; CHECK-LABEL: test_mtvsrqm: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: mtvsrqm v2, r3 +; CHECK-NEXT: blr +entry: + %mv = tail call <1 x i128> @llvm.ppc.altivec.mtvsrqm(i64 %a) + ret <1 x i128> %mv +}