1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-24 03:33:20 +01:00

And now support for MMX logical operations.

llvm-svn: 35125
This commit is contained in:
Bill Wendling 2007-03-16 09:44:46 +00:00
parent c3e7d4b884
commit 8ced23ee5a
3 changed files with 143 additions and 61 deletions

View File

@ -328,6 +328,24 @@ X86TargetLowering::X86TargetLowering(TargetMachine &TM)
setOperationAction(ISD::MULHS, MVT::v4i16, Legal);
setOperationAction(ISD::MUL, MVT::v4i16, Legal);
setOperationAction(ISD::AND, MVT::v8i8, Promote);
AddPromotedToType (ISD::AND, MVT::v8i8, MVT::v2i32);
setOperationAction(ISD::AND, MVT::v4i16, Promote);
AddPromotedToType (ISD::AND, MVT::v4i16, MVT::v2i32);
setOperationAction(ISD::AND, MVT::v2i32, Legal);
setOperationAction(ISD::OR, MVT::v8i8, Promote);
AddPromotedToType (ISD::OR, MVT::v8i8, MVT::v2i32);
setOperationAction(ISD::OR, MVT::v4i16, Promote);
AddPromotedToType (ISD::OR, MVT::v4i16, MVT::v2i32);
setOperationAction(ISD::OR, MVT::v2i32, Legal);
setOperationAction(ISD::XOR, MVT::v8i8, Promote);
AddPromotedToType (ISD::XOR, MVT::v8i8, MVT::v2i32);
setOperationAction(ISD::XOR, MVT::v4i16, Promote);
AddPromotedToType (ISD::XOR, MVT::v4i16, MVT::v2i32);
setOperationAction(ISD::XOR, MVT::v2i32, Legal);
setOperationAction(ISD::LOAD, MVT::v8i8, Promote);
AddPromotedToType (ISD::LOAD, MVT::v8i8, MVT::v2i32);
setOperationAction(ISD::LOAD, MVT::v4i16, Promote);

View File

@ -63,9 +63,7 @@ let isTwoAddress = 1 in {
(bitconvert
(loadv2i32 addr:$src2)))))]>;
}
}
let isTwoAddress = 1 in {
multiclass MMXI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
bit Commutable = 0> {
def rr : MMXI<opc, MRMSrcReg, (ops VR64:$dst, VR64:$src1, VR64:$src2),
@ -78,6 +76,24 @@ let isTwoAddress = 1 in {
[(set VR64:$dst, (IntId VR64:$src1,
(bitconvert (loadv2i32 addr:$src2))))]>;
}
// MMXI_binop_rm_v2i32 - Simple MMX binary operator whose type is v2i32.
//
// FIXME: we could eliminate this and use MMXI_binop_rm instead if tblgen knew
// to collapse (bitconvert VT to VT) into its operand.
//
multiclass MMXI_binop_rm_v2i32<bits<8> opc, string OpcodeStr, SDNode OpNode,
bit Commutable = 0> {
def rr : MMXI<opc, MRMSrcReg, (ops VR64:$dst, VR64:$src1, VR64:$src2),
!strconcat(OpcodeStr, " {$src2, $dst|$dst, $src2}"),
[(set VR64:$dst, (v2i32 (OpNode VR64:$src1, VR64:$src2)))]> {
let isCommutable = Commutable;
}
def rm : MMXI<opc, MRMSrcMem, (ops VR64:$dst, VR64:$src1, i64mem:$src2),
!strconcat(OpcodeStr, " {$src2, $dst|$dst, $src2}"),
[(set VR64:$dst,
(OpNode VR64:$src1,(loadv2i32 addr:$src2)))]>;
}
}
//===----------------------------------------------------------------------===//
@ -116,6 +132,24 @@ defm MMX_PMULLW : MMXI_binop_rm<0xD5, "pmullw", mul, v4i16, 1>;
defm MMX_PMULHW : MMXI_binop_rm_int<0xE5, "pmulhw" , int_x86_mmx_pmulh_w , 1>;
defm MMX_PMADDWD : MMXI_binop_rm_int<0xF5, "pmaddwd", int_x86_mmx_pmadd_wd, 1>;
// Logical Instructions
defm MMX_PAND : MMXI_binop_rm_v2i32<0xDB, "pand", and, 1>;
defm MMX_POR : MMXI_binop_rm_v2i32<0xEB, "por" , or, 1>;
defm MMX_PXOR : MMXI_binop_rm_v2i32<0xEF, "pxor", xor, 1>;
let isTwoAddress = 1 in {
def MMX_PANDNrr : MMXI<0xDF, MRMSrcReg,
(ops VR64:$dst, VR64:$src1, VR64:$src2),
"pandn {$src2, $dst|$dst, $src2}",
[(set VR64:$dst, (v2i32 (and (vnot VR64:$src1),
VR64:$src2)))]>;
def MMX_PANDNrm : MMXI<0xDF, MRMSrcMem,
(ops VR64:$dst, VR64:$src1, i64mem:$src2),
"pandn {$src2, $dst|$dst, $src2}",
[(set VR64:$dst, (v2i32 (and (vnot VR64:$src1),
(load addr:$src2))))]>;
}
// Move Instructions
def MOVD64rr : MMXI<0x6E, MRMSrcReg, (ops VR64:$dst, GR32:$src),
"movd {$src, $dst|$dst, $src}", []>;

View File

@ -4,28 +4,37 @@
define void @foo(<8 x i8>* %A, <8 x i8>* %B) {
entry:
%tmp5 = load <8 x i8>* %A ; <<8 x i8>> [#uses=1]
%tmp1 = load <8 x i8>* %A ; <<8 x i8>> [#uses=1]
%tmp3 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1]
%tmp4 = add <8 x i8> %tmp1, %tmp3 ; <<8 x i8>> [#uses=2]
store <8 x i8> %tmp4, <8 x i8>* %A
%tmp7 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1]
%tmp8 = add <8 x i8> %tmp5, %tmp7 ; <<8 x i8>> [#uses=2]
store <8 x i8> %tmp8, <8 x i8>* %A
%tmp14 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1]
%tmp25 = tail call <8 x i8> @llvm.x86.mmx.padds.b( <8 x i8> %tmp14, <8 x i8> %tmp8 ) ; <<8 x i8>> [#uses=2]
store <8 x i8> %tmp25, <8 x i8>* %B
%tmp36 = load <8 x i8>* %A ; <<8 x i8>> [#uses=1]
%tmp49 = tail call <8 x i8> @llvm.x86.mmx.paddus.b( <8 x i8> %tmp36, <8 x i8> %tmp25 ) ; <<8 x i8>> [#uses=2]
store <8 x i8> %tmp49, <8 x i8>* %B
%tmp58 = load <8 x i8>* %A ; <<8 x i8>> [#uses=1]
%tmp61 = sub <8 x i8> %tmp58, %tmp49 ; <<8 x i8>> [#uses=2]
store <8 x i8> %tmp61, <8 x i8>* %B
%tmp64 = load <8 x i8>* %A ; <<8 x i8>> [#uses=1]
%tmp80 = tail call <8 x i8> @llvm.x86.mmx.psubs.b( <8 x i8> %tmp61, <8 x i8> %tmp64 ) ; <<8 x i8>> [#uses=2]
store <8 x i8> %tmp80, <8 x i8>* %A
%tmp89 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1]
%tmp105 = tail call <8 x i8> @llvm.x86.mmx.psubus.b( <8 x i8> %tmp80, <8 x i8> %tmp89 ) ; <<8 x i8>> [#uses=1]
store <8 x i8> %tmp105, <8 x i8>* %A
%tmp13 = load <8 x i8>* %A ; <<8 x i8>> [#uses=1]
%tmp16 = mul <8 x i8> %tmp13, %tmp105 ; <<8 x i8>> [#uses=1]
store <8 x i8> %tmp16, <8 x i8>* %B
%tmp12 = tail call <8 x i8> @llvm.x86.mmx.padds.b( <8 x i8> %tmp4, <8 x i8> %tmp7 ) ; <<8 x i8>> [#uses=2]
store <8 x i8> %tmp12, <8 x i8>* %A
%tmp16 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1]
%tmp21 = tail call <8 x i8> @llvm.x86.mmx.paddus.b( <8 x i8> %tmp12, <8 x i8> %tmp16 ) ; <<8 x i8>> [#uses=2]
store <8 x i8> %tmp21, <8 x i8>* %A
%tmp27 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1]
%tmp28 = sub <8 x i8> %tmp21, %tmp27 ; <<8 x i8>> [#uses=2]
store <8 x i8> %tmp28, <8 x i8>* %A
%tmp31 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1]
%tmp36 = tail call <8 x i8> @llvm.x86.mmx.psubs.b( <8 x i8> %tmp28, <8 x i8> %tmp31 ) ; <<8 x i8>> [#uses=2]
store <8 x i8> %tmp36, <8 x i8>* %A
%tmp40 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1]
%tmp45 = tail call <8 x i8> @llvm.x86.mmx.psubus.b( <8 x i8> %tmp36, <8 x i8> %tmp40 ) ; <<8 x i8>> [#uses=2]
store <8 x i8> %tmp45, <8 x i8>* %A
%tmp51 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1]
%tmp52 = mul <8 x i8> %tmp45, %tmp51 ; <<8 x i8>> [#uses=2]
store <8 x i8> %tmp52, <8 x i8>* %A
%tmp57 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1]
%tmp58 = and <8 x i8> %tmp52, %tmp57 ; <<8 x i8>> [#uses=2]
store <8 x i8> %tmp58, <8 x i8>* %A
%tmp63 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1]
%tmp64 = or <8 x i8> %tmp58, %tmp63 ; <<8 x i8>> [#uses=2]
store <8 x i8> %tmp64, <8 x i8>* %A
%tmp69 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1]
%tmp70 = xor <8 x i8> %tmp64, %tmp69 ; <<8 x i8>> [#uses=1]
store <8 x i8> %tmp70, <8 x i8>* %A
tail call void @llvm.x86.mmx.emms( )
ret void
}
@ -37,55 +46,68 @@ entry:
%tmp4 = add <2 x i32> %tmp1, %tmp3 ; <<2 x i32>> [#uses=2]
store <2 x i32> %tmp4, <2 x i32>* %A
%tmp9 = load <2 x i32>* %B ; <<2 x i32>> [#uses=1]
%tmp10 = sub <2 x i32> %tmp4, %tmp9 ; <<2 x i32>> [#uses=1]
store <2 x i32> %tmp10, <2 x i32>* %B
%tmp13 = load <2 x i32>* %A ; <<2 x i32>> [#uses=1]
%tmp16 = mul <2 x i32> %tmp13, %tmp10 ; <<2 x i32>> [#uses=1]
store <2 x i32> %tmp16, <2 x i32>* %B
%tmp10 = sub <2 x i32> %tmp4, %tmp9 ; <<2 x i32>> [#uses=2]
store <2 x i32> %tmp10, <2 x i32>* %A
%tmp15 = load <2 x i32>* %B ; <<2 x i32>> [#uses=1]
%tmp16 = mul <2 x i32> %tmp10, %tmp15 ; <<2 x i32>> [#uses=2]
store <2 x i32> %tmp16, <2 x i32>* %A
%tmp21 = load <2 x i32>* %B ; <<2 x i32>> [#uses=1]
%tmp22 = and <2 x i32> %tmp16, %tmp21 ; <<2 x i32>> [#uses=2]
store <2 x i32> %tmp22, <2 x i32>* %A
%tmp27 = load <2 x i32>* %B ; <<2 x i32>> [#uses=1]
%tmp28 = or <2 x i32> %tmp22, %tmp27 ; <<2 x i32>> [#uses=2]
store <2 x i32> %tmp28, <2 x i32>* %A
%tmp33 = load <2 x i32>* %B ; <<2 x i32>> [#uses=1]
%tmp34 = xor <2 x i32> %tmp28, %tmp33 ; <<2 x i32>> [#uses=1]
store <2 x i32> %tmp34, <2 x i32>* %A
tail call void @llvm.x86.mmx.emms( )
ret void
}
define void @bar(<4 x i16>* %A, <4 x i16>* %B) {
entry:
%tmp5 = load <4 x i16>* %A ; <<4 x i16>> [#uses=1]
%tmp1 = load <4 x i16>* %A ; <<4 x i16>> [#uses=1]
%tmp3 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
%tmp4 = add <4 x i16> %tmp1, %tmp3 ; <<4 x i16>> [#uses=2]
store <4 x i16> %tmp4, <4 x i16>* %A
%tmp7 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
%tmp8 = add <4 x i16> %tmp5, %tmp7 ; <<4 x i16>> [#uses=2]
store <4 x i16> %tmp8, <4 x i16>* %A
%tmp14 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
%tmp25 = tail call <4 x i16> @llvm.x86.mmx.padds.w( <4 x i16> %tmp14, <4 x i16> %tmp8 ) ; <<4 x i16>> [#uses=2]
store <4 x i16> %tmp25, <4 x i16>* %B
%tmp36 = load <4 x i16>* %A ; <<4 x i16>> [#uses=1]
%tmp49 = tail call <4 x i16> @llvm.x86.mmx.paddus.w( <4 x i16> %tmp36, <4 x i16> %tmp25 ) ; <<4 x i16>> [#uses=2]
store <4 x i16> %tmp49, <4 x i16>* %B
%tmp58 = load <4 x i16>* %A ; <<4 x i16>> [#uses=1]
%tmp61 = sub <4 x i16> %tmp58, %tmp49 ; <<4 x i16>> [#uses=2]
store <4 x i16> %tmp61, <4 x i16>* %B
%tmp64 = load <4 x i16>* %A ; <<4 x i16>> [#uses=1]
%tmp80 = tail call <4 x i16> @llvm.x86.mmx.psubs.w( <4 x i16> %tmp61, <4 x i16> %tmp64 ) ; <<4 x i16>> [#uses=2]
store <4 x i16> %tmp80, <4 x i16>* %A
%tmp89 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
%tmp105 = tail call <4 x i16> @llvm.x86.mmx.psubus.w( <4 x i16> %tmp80, <4 x i16> %tmp89 ) ; <<4 x i16>> [#uses=1]
store <4 x i16> %tmp105, <4 x i16>* %A
%tmp22 = load <4 x i16>* %A ; <<4 x i16>> [#uses=1]
%tmp24 = tail call <4 x i16> @llvm.x86.mmx.pmulh.w( <4 x i16> %tmp22, <4 x i16> %tmp105 ) ; <<4 x i16>> [#uses=2]
store <4 x i16> %tmp24, <4 x i16>* %A
%tmp28 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
%tmp33 = tail call <2 x i32> @llvm.x86.mmx.pmadd.wd( <4 x i16> %tmp24, <4 x i16> %tmp28 ) ; <<2 x i32>> [#uses=1]
%tmp34 = bitcast <2 x i32> %tmp33 to <4 x i16> ; <<4 x i16>> [#uses=1]
store <4 x i16> %tmp34, <4 x i16>* %A
%tmp12 = tail call <4 x i16> @llvm.x86.mmx.padds.w( <4 x i16> %tmp4, <4 x i16> %tmp7 ) ; <<4 x i16>> [#uses=2]
store <4 x i16> %tmp12, <4 x i16>* %A
%tmp16 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
%tmp21 = tail call <4 x i16> @llvm.x86.mmx.paddus.w( <4 x i16> %tmp12, <4 x i16> %tmp16 ) ; <<4 x i16>> [#uses=2]
store <4 x i16> %tmp21, <4 x i16>* %A
%tmp27 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
%tmp28 = sub <4 x i16> %tmp21, %tmp27 ; <<4 x i16>> [#uses=2]
store <4 x i16> %tmp28, <4 x i16>* %A
%tmp31 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
%tmp36 = tail call <4 x i16> @llvm.x86.mmx.psubs.w( <4 x i16> %tmp28, <4 x i16> %tmp31 ) ; <<4 x i16>> [#uses=2]
store <4 x i16> %tmp36, <4 x i16>* %A
%tmp40 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
%tmp45 = tail call <4 x i16> @llvm.x86.mmx.psubus.w( <4 x i16> %tmp36, <4 x i16> %tmp40 ) ; <<4 x i16>> [#uses=2]
store <4 x i16> %tmp45, <4 x i16>* %A
%tmp51 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
%tmp52 = mul <4 x i16> %tmp45, %tmp51 ; <<4 x i16>> [#uses=2]
store <4 x i16> %tmp52, <4 x i16>* %A
%tmp55 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
%tmp60 = tail call <4 x i16> @llvm.x86.mmx.pmulh.w( <4 x i16> %tmp52, <4 x i16> %tmp55 ) ; <<4 x i16>> [#uses=2]
store <4 x i16> %tmp60, <4 x i16>* %A
%tmp64 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
%tmp69 = tail call <2 x i32> @llvm.x86.mmx.pmadd.wd( <4 x i16> %tmp60, <4 x i16> %tmp64 ) ; <<2 x i32>> [#uses=1]
%tmp70 = bitcast <2 x i32> %tmp69 to <4 x i16> ; <<4 x i16>> [#uses=2]
store <4 x i16> %tmp70, <4 x i16>* %A
%tmp75 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
%tmp76 = and <4 x i16> %tmp70, %tmp75 ; <<4 x i16>> [#uses=2]
store <4 x i16> %tmp76, <4 x i16>* %A
%tmp81 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
%tmp82 = or <4 x i16> %tmp76, %tmp81 ; <<4 x i16>> [#uses=2]
store <4 x i16> %tmp82, <4 x i16>* %A
%tmp87 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
%tmp88 = xor <4 x i16> %tmp82, %tmp87 ; <<4 x i16>> [#uses=1]
store <4 x i16> %tmp88, <4 x i16>* %A
tail call void @llvm.x86.mmx.emms( )
ret void
}
declare <4 x i16> @llvm.x86.mmx.padds.w(<4 x i16>, <4 x i16>)
declare <4 x i16> @llvm.x86.mmx.paddus.w(<4 x i16>, <4 x i16>)
declare <4 x i16> @llvm.x86.mmx.psubs.w(<4 x i16>, <4 x i16>)
declare <4 x i16> @llvm.x86.mmx.psubus.w(<4 x i16>, <4 x i16>)
declare <8 x i8> @llvm.x86.mmx.padds.b(<8 x i8>, <8 x i8>)
declare <8 x i8> @llvm.x86.mmx.paddus.b(<8 x i8>, <8 x i8>)
@ -94,6 +116,14 @@ declare <8 x i8> @llvm.x86.mmx.psubs.b(<8 x i8>, <8 x i8>)
declare <8 x i8> @llvm.x86.mmx.psubus.b(<8 x i8>, <8 x i8>)
declare <4 x i16> @llvm.x86.mmx.padds.w(<4 x i16>, <4 x i16>)
declare <4 x i16> @llvm.x86.mmx.paddus.w(<4 x i16>, <4 x i16>)
declare <4 x i16> @llvm.x86.mmx.psubs.w(<4 x i16>, <4 x i16>)
declare <4 x i16> @llvm.x86.mmx.psubus.w(<4 x i16>, <4 x i16>)
declare <4 x i16> @llvm.x86.mmx.pmulh.w(<4 x i16>, <4 x i16>)
declare <2 x i32> @llvm.x86.mmx.pmadd.wd(<4 x i16>, <4 x i16>)