1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 18:54:02 +01:00

Fold some and + shift in x86 addressing mode.

llvm-svn: 44970
This commit is contained in:
Evan Cheng 2007-12-13 00:43:27 +00:00
parent c314c2daf1
commit 343929c773
3 changed files with 69 additions and 6 deletions

View File

@ -582,7 +582,7 @@ bool X86DAGToDAGISel::MatchAddress(SDOperand N, X86ISelAddressMode &AM,
}
int id = N.Val->getNodeId();
bool Available = isSelected(id);
bool AlreadySelected = isSelected(id); // Already selected, not yet replaced.
switch (N.getOpcode()) {
default: break;
@ -605,7 +605,7 @@ bool X86DAGToDAGISel::MatchAddress(SDOperand N, X86ISelAddressMode &AM,
// If value is available in a register both base and index components have
// been picked, we can't fit the result available in the register in the
// addressing mode. Duplicate GlobalAddress or ConstantPool as displacement.
if (!Available || (AM.Base.Reg.Val && AM.IndexReg.Val)) {
if (!AlreadySelected || (AM.Base.Reg.Val && AM.IndexReg.Val)) {
bool isStatic = TM.getRelocationModel() == Reloc::Static;
SDOperand N0 = N.getOperand(0);
// Mac OS X X86-64 lower 4G address is not available.
@ -653,7 +653,7 @@ bool X86DAGToDAGISel::MatchAddress(SDOperand N, X86ISelAddressMode &AM,
break;
case ISD::SHL:
if (Available || AM.IndexReg.Val != 0 || AM.Scale != 1)
if (AlreadySelected || AM.IndexReg.Val != 0 || AM.Scale != 1)
break;
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1))) {
@ -690,7 +690,7 @@ bool X86DAGToDAGISel::MatchAddress(SDOperand N, X86ISelAddressMode &AM,
// FALL THROUGH
case ISD::MUL:
// X*[3,5,9] -> X+X*[2,4,8]
if (!Available &&
if (!AlreadySelected &&
AM.BaseType == X86ISelAddressMode::RegBase &&
AM.Base.Reg.Val == 0 &&
AM.IndexReg.Val == 0) {
@ -725,7 +725,7 @@ bool X86DAGToDAGISel::MatchAddress(SDOperand N, X86ISelAddressMode &AM,
break;
case ISD::ADD:
if (!Available) {
if (!AlreadySelected) {
X86ISelAddressMode Backup = AM;
if (!MatchAddress(N.Val->getOperand(0), AM, false, Depth+1) &&
!MatchAddress(N.Val->getOperand(1), AM, false, Depth+1))
@ -740,7 +740,7 @@ bool X86DAGToDAGISel::MatchAddress(SDOperand N, X86ISelAddressMode &AM,
case ISD::OR:
// Handle "X | C" as "X + C" iff X is known to have C bits clear.
if (Available) break;
if (AlreadySelected) break;
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
X86ISelAddressMode Backup = AM;
@ -758,6 +758,44 @@ bool X86DAGToDAGISel::MatchAddress(SDOperand N, X86ISelAddressMode &AM,
AM = Backup;
}
break;
case ISD::AND: {
// Handle "(x << C1) & C2" as "(X & (C2>>C1)) << C1" if safe and if this
// allows us to fold the shift into this addressing mode.
if (AlreadySelected) break;
SDOperand Shift = N.getOperand(0);
if (Shift.getOpcode() != ISD::SHL) break;
// Scale must not be used already.
if (AM.IndexReg.Val != 0 || AM.Scale != 1) break;
ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N.getOperand(1));
ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
if (!C1 || !C2) break;
// Not likely to be profitable if either the AND or SHIFT node has more
// than one use (unless all uses are for address computation). Besides,
// isel mechanism requires their node ids to be reused.
if (!N.hasOneUse() || !Shift.hasOneUse())
break;
// Verify that the shift amount is something we can fold.
unsigned ShiftCst = C1->getValue();
if (ShiftCst != 1 && ShiftCst != 2 && ShiftCst != 3)
break;
// Get the new AND mask, this folds to a constant.
SDOperand NewANDMask = CurDAG->getNode(ISD::SRL, N.getValueType(),
SDOperand(C2, 0), SDOperand(C1, 0));
SDOperand NewAND = CurDAG->getNode(ISD::AND, N.getValueType(),
Shift.getOperand(0), NewANDMask);
NewANDMask.Val->setNodeId(Shift.Val->getNodeId());
NewAND.Val->setNodeId(N.Val->getNodeId());
AM.Scale = 1 << ShiftCst;
AM.IndexReg = NewAND;
return false;
}
}
return MatchAddressBase(N, AM, isRoot, Depth);

View File

@ -2613,6 +2613,10 @@ def : Pat<(i16 (anyext (loadi8 addr:$src))), (MOVZX16rm8 addr:$src)>;
def : Pat<(i32 (anyext (loadi8 addr:$src))), (MOVZX32rm8 addr:$src)>;
def : Pat<(i32 (anyext (loadi16 addr:$src))), (MOVZX32rm16 addr:$src)>;
// (and (i32 load), 255) -> (zextload i8)
def : Pat<(i32 (and (loadi32 addr:$src), (i32 255))), (MOVZX32rm8 addr:$src)>;
def : Pat<(i32 (and (loadi32 addr:$src), (i32 65535))),(MOVZX32rm16 addr:$src)>;
//===----------------------------------------------------------------------===//
// Some peepholes
//===----------------------------------------------------------------------===//

View File

@ -0,0 +1,21 @@
; RUN: llvm-as < %s | llc -march=x86 | not grep and
define i32 @t1(i8* %X, i32 %i) {
entry:
%tmp2 = shl i32 %i, 2 ; <i32> [#uses=1]
%tmp4 = and i32 %tmp2, 1020 ; <i32> [#uses=1]
%tmp7 = getelementptr i8* %X, i32 %tmp4 ; <i8*> [#uses=1]
%tmp78 = bitcast i8* %tmp7 to i32* ; <i32*> [#uses=1]
%tmp9 = load i32* %tmp78, align 4 ; <i32> [#uses=1]
ret i32 %tmp9
}
define i32 @t2(i16* %X, i32 %i) {
entry:
%tmp2 = shl i32 %i, 1 ; <i32> [#uses=1]
%tmp4 = and i32 %tmp2, 131070 ; <i32> [#uses=1]
%tmp7 = getelementptr i16* %X, i32 %tmp4 ; <i16*> [#uses=1]
%tmp78 = bitcast i16* %tmp7 to i32* ; <i32*> [#uses=1]
%tmp9 = load i32* %tmp78, align 4 ; <i32> [#uses=1]
ret i32 %tmp9
}