1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-21 12:02:58 +02:00

Don't bother sign/zext_inreg'ing the result of an and operation if we know

the result does change as a result of the extend.

This improves codegen for Alpha on this testcase:

int %a(ushort* %i) {
        %tmp.1 = load ushort* %i
        %tmp.2 = cast ushort %tmp.1 to int
        %tmp.4 = and int %tmp.2, 1
        ret int %tmp.4
}

Generating:

a:
        ldgp $29, 0($27)
        ldwu $0,0($16)
        and $0,1,$0
        ret $31,($26),1

instead of:

a:
        ldgp $29, 0($27)
        ldwu $0,0($16)
        and $0,1,$0
        addl $0,0,$0
        ret $31,($26),1

btw, alpha really should switch to livein/outs for args :)

llvm-svn: 21213
This commit is contained in:
Chris Lattner 2005-04-10 23:37:16 +00:00
parent c730ea00e2
commit 4f26677dc9

View File

@ -1119,6 +1119,25 @@ SDOperand SelectionDAG::getNode(unsigned Opcode, MVT::ValueType VT,SDOperand N1,
if (Opcode == ISD::SIGN_EXTEND_INREG) return N1;
break;
}
// If we are sign extending the result of an (and X, C) operation, and we
// know the extended bits are zeros already, don't do the extend.
if (N1.getOpcode() == ISD::AND)
if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getOperand(1))) {
uint64_t Mask = N1C->getValue();
unsigned NumBits = MVT::getSizeInBits(EVT);
if (Opcode == ISD::ZERO_EXTEND_INREG) {
if ((Mask & (~0ULL << NumBits)) == 0)
return N1;
else
return getNode(ISD::AND, VT, N1.getOperand(0),
getConstant(Mask & (~0ULL >> (64-NumBits)), VT));
} else {
assert(Opcode == ISD::SIGN_EXTEND_INREG);
if ((Mask & (~0ULL << (NumBits-1))) == 0)
return N1;
}
}
break;
}