1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-24 11:42:57 +01:00

Add FP versions of the binary operators, keeping the int and fp worlds seperate.

Though I have done extensive testing, it is possible that this will break
things in configs I can't test.  Please let me know if this causes a problem
and I'll fix it ASAP.

llvm-svn: 23505
This commit is contained in:
Chris Lattner 2005-09-28 22:29:17 +00:00
parent 61f3785147
commit d3b3d07c41
4 changed files with 108 additions and 97 deletions

View File

@ -68,8 +68,8 @@ AlphaTargetLowering::AlphaTargetLowering(TargetMachine &TM) : TargetLowering(TM)
setOperationAction(ISD::SEXTLOAD, MVT::i8, Expand);
setOperationAction(ISD::SEXTLOAD, MVT::i16, Expand);
setOperationAction(ISD::SREM, MVT::f32, Expand);
setOperationAction(ISD::SREM, MVT::f64, Expand);
setOperationAction(ISD::FREM, MVT::f32, Expand);
setOperationAction(ISD::FREM, MVT::f64, Expand);
setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);

View File

@ -1275,10 +1275,7 @@ unsigned AlphaISel::SelectExpr(SDOperand N) {
case ISD::SHL: Opc = Alpha::SL; break;
case ISD::SRL: Opc = Alpha::SRL; break;
case ISD::SRA: Opc = Alpha::SRA; break;
case ISD::MUL:
Opc = isFP ? (DestType == MVT::f64 ? Alpha::MULT : Alpha::MULS)
: Alpha::MULQ;
break;
case ISD::MUL: Opc = Alpha::MULQ; break;
};
Tmp1 = SelectExpr(N.getOperand(0));
Tmp2 = SelectExpr(N.getOperand(1));
@ -1288,25 +1285,7 @@ unsigned AlphaISel::SelectExpr(SDOperand N) {
case ISD::ADD:
case ISD::SUB:
if (isFP) {
ConstantFPSDNode *CN;
if (opcode == ISD::ADD)
Opc = DestType == MVT::f64 ? Alpha::ADDT : Alpha::ADDS;
else
Opc = DestType == MVT::f64 ? Alpha::SUBT : Alpha::SUBS;
if (opcode == ISD::SUB
&& (CN = dyn_cast<ConstantFPSDNode>(N.getOperand(0)))
&& (CN->isExactlyValue(+0.0) || CN->isExactlyValue(-0.0)))
{
Tmp2 = SelectExpr(N.getOperand(1));
BuildMI(BB, Alpha::CPYSN, 2, Result).addReg(Tmp2).addReg(Tmp2);
} else {
Tmp1 = SelectExpr(N.getOperand(0));
Tmp2 = SelectExpr(N.getOperand(1));
BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
}
return Result;
} else {
{
bool isAdd = opcode == ISD::ADD;
//first check for Scaled Adds and Subs!
@ -1369,15 +1348,25 @@ unsigned AlphaISel::SelectExpr(SDOperand N) {
}
return Result;
}
case ISD::FADD:
case ISD::FSUB:
case ISD::FMUL:
case ISD::FDIV: {
if (opcode == ISD::FADD)
Opc = DestType == MVT::f64 ? Alpha::ADDT : Alpha::ADDS;
else if (opcode == ISD::FSUB)
Opc = DestType == MVT::f64 ? Alpha::SUBT : Alpha::SUBS;
else if (opcode == ISD::FMUL)
Opc = DestType == MVT::f64 ? Alpha::MULT : Alpha::MULS;
else
Opc = DestType == MVT::f64 ? Alpha::DIVT : Alpha::DIVS;
Tmp1 = SelectExpr(N.getOperand(0));
Tmp2 = SelectExpr(N.getOperand(1));
BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
return Result;
}
case ISD::SDIV:
if (isFP) {
Tmp1 = SelectExpr(N.getOperand(0));
Tmp2 = SelectExpr(N.getOperand(1));
BuildMI(BB, DestType == MVT::f64 ? Alpha::DIVT : Alpha::DIVS, 2, Result)
.addReg(Tmp1).addReg(Tmp2);
return Result;
} else {
{
//check if we can convert into a shift!
if (isSIntImmediate(N.getOperand(1), SImm) &&
SImm != 0 && isPowerOf2_64(llabs(SImm))) {

View File

@ -72,8 +72,8 @@ namespace {
setOperationAction(ISD::SEXTLOAD , MVT::i16 , Expand);
setOperationAction(ISD::SEXTLOAD , MVT::i32 , Expand);
setOperationAction(ISD::SREM , MVT::f32 , Expand);
setOperationAction(ISD::SREM , MVT::f64 , Expand);
setOperationAction(ISD::FREM , MVT::f32 , Expand);
setOperationAction(ISD::FREM , MVT::f64 , Expand);
setOperationAction(ISD::UREM , MVT::f32 , Expand);
setOperationAction(ISD::UREM , MVT::f64 , Expand);
@ -1240,20 +1240,28 @@ unsigned ISel::SelectExpr(SDOperand N) {
BuildMI(BB, IA64::GETFSIG, 1, Result).addReg(Tmp2);
return Result;
}
case ISD::ADD: {
if(DestType == MVT::f64 && N.getOperand(0).getOpcode() == ISD::MUL &&
N.getOperand(0).Val->hasOneUse()) { // if we can fold this add
// into an fma, do so:
// ++FusedFP; // Statistic
case ISD::FADD: {
if (N.getOperand(0).getOpcode() == ISD::FMUL &&
N.getOperand(0).Val->hasOneUse()) { // if we can fold this add
// into an fma, do so:
// ++FusedFP; // Statistic
Tmp1 = SelectExpr(N.getOperand(0).getOperand(0));
Tmp2 = SelectExpr(N.getOperand(0).getOperand(1));
Tmp3 = SelectExpr(N.getOperand(1));
BuildMI(BB, IA64::FMA, 3, Result).addReg(Tmp1).addReg(Tmp2).addReg(Tmp3);
return Result; // early exit
}
//else, fallthrough:
Tmp1 = SelectExpr(N.getOperand(0));
Tmp2 = SelectExpr(N.getOperand(1));
BuildMI(BB, IA64::FADD, 2, Result).addReg(Tmp1).addReg(Tmp2);
return Result;
}
if(DestType != MVT::f64 && N.getOperand(0).getOpcode() == ISD::SHL &&
case ISD::ADD: {
if (N.getOperand(0).getOpcode() == ISD::SHL &&
N.getOperand(0).Val->hasOneUse()) { // if we might be able to fold
// this add into a shladd, try:
ConstantSDNode *CSD = NULL;
@ -1273,75 +1281,71 @@ unsigned ISel::SelectExpr(SDOperand N) {
//else, fallthrough:
Tmp1 = SelectExpr(N.getOperand(0));
if(DestType != MVT::f64) { // integer addition:
switch (ponderIntegerAdditionWith(N.getOperand(1), Tmp3)) {
case 1: // adding a constant that's 14 bits
BuildMI(BB, IA64::ADDIMM14, 2, Result).addReg(Tmp1).addSImm(Tmp3);
return Result; // early exit
} // fallthrough and emit a reg+reg ADD:
Tmp2 = SelectExpr(N.getOperand(1));
BuildMI(BB, IA64::ADD, 2, Result).addReg(Tmp1).addReg(Tmp2);
} else { // this is a floating point addition
Tmp2 = SelectExpr(N.getOperand(1));
BuildMI(BB, IA64::FADD, 2, Result).addReg(Tmp1).addReg(Tmp2);
}
switch (ponderIntegerAdditionWith(N.getOperand(1), Tmp3)) {
case 1: // adding a constant that's 14 bits
BuildMI(BB, IA64::ADDIMM14, 2, Result).addReg(Tmp1).addSImm(Tmp3);
return Result; // early exit
} // fallthrough and emit a reg+reg ADD:
Tmp2 = SelectExpr(N.getOperand(1));
BuildMI(BB, IA64::ADD, 2, Result).addReg(Tmp1).addReg(Tmp2);
return Result;
}
case ISD::FMUL:
Tmp1 = SelectExpr(N.getOperand(0));
Tmp2 = SelectExpr(N.getOperand(1));
BuildMI(BB, IA64::FMPY, 2, Result).addReg(Tmp1).addReg(Tmp2);
return Result;
case ISD::MUL: {
if(DestType != MVT::f64) { // TODO: speed!
// TODO: speed!
/* FIXME if(N.getOperand(1).getOpcode() != ISD::Constant) { // if not a const mul
*/
// boring old integer multiply with xma
Tmp1 = SelectExpr(N.getOperand(0));
Tmp2 = SelectExpr(N.getOperand(1));
unsigned TempFR1=MakeReg(MVT::f64);
unsigned TempFR2=MakeReg(MVT::f64);
unsigned TempFR3=MakeReg(MVT::f64);
BuildMI(BB, IA64::SETFSIG, 1, TempFR1).addReg(Tmp1);
BuildMI(BB, IA64::SETFSIG, 1, TempFR2).addReg(Tmp2);
BuildMI(BB, IA64::XMAL, 1, TempFR3).addReg(TempFR1).addReg(TempFR2)
.addReg(IA64::F0);
BuildMI(BB, IA64::GETFSIG, 1, Result).addReg(TempFR3);
return Result; // early exit
/* FIXME } else { // we are multiplying by an integer constant! yay
return Reg = SelectExpr(BuildConstmulSequence(N)); // avert your eyes!
} */
}
else { // floating point multiply
// boring old integer multiply with xma
Tmp1 = SelectExpr(N.getOperand(0));
Tmp2 = SelectExpr(N.getOperand(1));
BuildMI(BB, IA64::FMPY, 2, Result).addReg(Tmp1).addReg(Tmp2);
return Result;
}
unsigned TempFR1=MakeReg(MVT::f64);
unsigned TempFR2=MakeReg(MVT::f64);
unsigned TempFR3=MakeReg(MVT::f64);
BuildMI(BB, IA64::SETFSIG, 1, TempFR1).addReg(Tmp1);
BuildMI(BB, IA64::SETFSIG, 1, TempFR2).addReg(Tmp2);
BuildMI(BB, IA64::XMAL, 1, TempFR3).addReg(TempFR1).addReg(TempFR2)
.addReg(IA64::F0);
BuildMI(BB, IA64::GETFSIG, 1, Result).addReg(TempFR3);
return Result; // early exit
/* FIXME } else { // we are multiplying by an integer constant! yay
return Reg = SelectExpr(BuildConstmulSequence(N)); // avert your eyes!
} */
}
case ISD::SUB: {
if(DestType == MVT::f64 && N.getOperand(0).getOpcode() == ISD::MUL &&
case ISD::FSUB:
if(N.getOperand(0).getOpcode() == ISD::FMUL &&
N.getOperand(0).Val->hasOneUse()) { // if we can fold this sub
// into an fms, do so:
// ++FusedFP; // Statistic
// ++FusedFP; // Statistic
Tmp1 = SelectExpr(N.getOperand(0).getOperand(0));
Tmp2 = SelectExpr(N.getOperand(0).getOperand(1));
Tmp3 = SelectExpr(N.getOperand(1));
BuildMI(BB, IA64::FMS, 3, Result).addReg(Tmp1).addReg(Tmp2).addReg(Tmp3);
return Result; // early exit
}
Tmp2 = SelectExpr(N.getOperand(1));
if(DestType != MVT::f64) { // integer subtraction:
switch (ponderIntegerSubtractionFrom(N.getOperand(0), Tmp3)) {
case 1: // subtracting *from* an 8 bit constant:
BuildMI(BB, IA64::SUBIMM8, 2, Result).addSImm(Tmp3).addReg(Tmp2);
return Result; // early exit
} // fallthrough and emit a reg+reg SUB:
Tmp1 = SelectExpr(N.getOperand(0));
BuildMI(BB, IA64::SUB, 2, Result).addReg(Tmp1).addReg(Tmp2);
} else { // this is a floating point subtraction
Tmp1 = SelectExpr(N.getOperand(0));
BuildMI(BB, IA64::FSUB, 2, Result).addReg(Tmp1).addReg(Tmp2);
}
Tmp1 = SelectExpr(N.getOperand(0));
BuildMI(BB, IA64::FSUB, 2, Result).addReg(Tmp1).addReg(Tmp2);
return Result;
case ISD::SUB: {
Tmp2 = SelectExpr(N.getOperand(1));
switch (ponderIntegerSubtractionFrom(N.getOperand(0), Tmp3)) {
case 1: // subtracting *from* an 8 bit constant:
BuildMI(BB, IA64::SUBIMM8, 2, Result).addSImm(Tmp3).addReg(Tmp2);
return Result; // early exit
} // fallthrough and emit a reg+reg SUB:
Tmp1 = SelectExpr(N.getOperand(0));
BuildMI(BB, IA64::SUB, 2, Result).addReg(Tmp1).addReg(Tmp2);
return Result;
}
@ -1584,6 +1588,7 @@ pC = pA OR pB
return Result;
}
case ISD::FDIV:
case ISD::SDIV:
case ISD::UDIV:
case ISD::SREM:
@ -1601,8 +1606,10 @@ pC = pA OR pB
bool isSigned=false;
switch(N.getOpcode()) {
case ISD::FDIV:
case ISD::SDIV: isModulus=false; isSigned=true; break;
case ISD::UDIV: isModulus=false; isSigned=false; break;
case ISD::FREM:
case ISD::SREM: isModulus=true; isSigned=true; break;
case ISD::UREM: isModulus=true; isSigned=false; break;
}

View File

@ -158,7 +158,7 @@ namespace {
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
setOperationAction(ISD::SEXTLOAD , MVT::i1 , Expand);
setOperationAction(ISD::SREM , MVT::f64 , Expand);
setOperationAction(ISD::FREM , MVT::f64 , Expand);
setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
setOperationAction(ISD::CTTZ , MVT::i8 , Expand);
setOperationAction(ISD::CTLZ , MVT::i8 , Expand);
@ -205,12 +205,12 @@ namespace {
setOperationAction(ISD::FCOS , MVT::f64, Expand);
setOperationAction(ISD::FABS , MVT::f64, Expand);
setOperationAction(ISD::FNEG , MVT::f64, Expand);
setOperationAction(ISD::SREM , MVT::f64, Expand);
setOperationAction(ISD::FREM , MVT::f64, Expand);
setOperationAction(ISD::FSIN , MVT::f32, Expand);
setOperationAction(ISD::FCOS , MVT::f32, Expand);
setOperationAction(ISD::FABS , MVT::f32, Expand);
setOperationAction(ISD::FNEG , MVT::f32, Expand);
setOperationAction(ISD::SREM , MVT::f32, Expand);
setOperationAction(ISD::FREM , MVT::f32, Expand);
addLegalFPImmediate(+0.0); // xorps / xorpd
} else {
@ -2513,6 +2513,7 @@ unsigned ISel::SelectExpr(SDOperand N) {
}
return Result;
case ISD::FADD:
case ISD::ADD:
Op0 = N.getOperand(0);
Op1 = N.getOperand(1);
@ -2703,6 +2704,8 @@ unsigned ISel::SelectExpr(SDOperand N) {
return Result;
}
case ISD::FSUB:
case ISD::FMUL:
case ISD::SUB:
case ISD::MUL:
case ISD::AND:
@ -2810,7 +2813,9 @@ unsigned ISel::SelectExpr(SDOperand N) {
}
switch (Node->getOpcode()) {
default: assert(0 && "Unreachable!");
case ISD::FSUB:
case ISD::SUB: Opc = X86ScalarSSE ? SSE_SUBTab[Opc] : SUBTab[Opc]; break;
case ISD::FMUL:
case ISD::MUL: Opc = X86ScalarSSE ? SSE_MULTab[Opc] : MULTab[Opc]; break;
case ISD::AND: Opc = ANDTab[Opc]; break;
case ISD::OR: Opc = ORTab[Opc]; break;
@ -2824,7 +2829,7 @@ unsigned ISel::SelectExpr(SDOperand N) {
}
if (isFoldableLoad(Op0, Op1, true))
if (Node->getOpcode() != ISD::SUB) {
if (Node->getOpcode() != ISD::SUB && Node->getOpcode() != ISD::FSUB) {
std::swap(Op0, Op1);
goto FoldOps;
} else {
@ -2860,7 +2865,9 @@ unsigned ISel::SelectExpr(SDOperand N) {
}
switch (Node->getOpcode()) {
default: assert(0 && "Unreachable!");
case ISD::FSUB:
case ISD::SUB: Opc = X86ScalarSSE ? SSE_SUBTab[Opc] : SUBTab[Opc]; break;
case ISD::FMUL:
case ISD::MUL: Opc = X86ScalarSSE ? SSE_MULTab[Opc] : MULTab[Opc]; break;
case ISD::AND: Opc = ANDTab[Opc]; break;
case ISD::OR: Opc = ORTab[Opc]; break;
@ -2902,7 +2909,9 @@ unsigned ISel::SelectExpr(SDOperand N) {
}
switch (Node->getOpcode()) {
default: assert(0 && "Unreachable!");
case ISD::FSUB:
case ISD::SUB: Opc = X86ScalarSSE ? SSE_SUBTab[Opc] : SUBTab[Opc]; break;
case ISD::FMUL:
case ISD::MUL: Opc = X86ScalarSSE ? SSE_MULTab[Opc] : MULTab[Opc]; break;
case ISD::AND: Opc = ANDTab[Opc]; break;
case ISD::OR: Opc = ORTab[Opc]; break;
@ -3006,6 +3015,8 @@ unsigned ISel::SelectExpr(SDOperand N) {
N.getValueType(), Result);
return Result;
case ISD::FDIV:
case ISD::FREM:
case ISD::SDIV:
case ISD::UDIV:
case ISD::SREM:
@ -3013,7 +3024,7 @@ unsigned ISel::SelectExpr(SDOperand N) {
assert((N.getOpcode() != ISD::SREM || MVT::isInteger(N.getValueType())) &&
"We don't support this operator!");
if (N.getOpcode() == ISD::SDIV) {
if (N.getOpcode() == ISD::SDIV || N.getOpcode() == ISD::FDIV) {
// We can fold loads into FpDIVs, but not really into any others.
if (N.getValueType() == MVT::f64 && !X86ScalarSSE) {
// Check for reversed and unreversed DIV.
@ -3756,9 +3767,12 @@ bool ISel::TryToFoldLoadOpStore(SDNode *Node) {
default:
std::cerr << "CANNOT [mem] op= val: ";
StVal.Val->dump(); std::cerr << "\n";
case ISD::FMUL:
case ISD::MUL:
case ISD::FDIV:
case ISD::SDIV:
case ISD::UDIV:
case ISD::FREM:
case ISD::SREM:
case ISD::UREM: return false;
@ -3837,7 +3851,8 @@ bool ISel::TryToFoldLoadOpStore(SDNode *Node) {
// If we have [mem] = V op [mem], try to turn it into:
// [mem] = [mem] op V.
if (Op1 == TheLoad && StVal.getOpcode() != ISD::SUB &&
if (Op1 == TheLoad &&
StVal.getOpcode() != ISD::SUB && StVal.getOpcode() != ISD::FSUB &&
StVal.getOpcode() != ISD::SHL && StVal.getOpcode() != ISD::SRA &&
StVal.getOpcode() != ISD::SRL)
std::swap(Op0, Op1);