diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp index dadce7a09a2..acd6e23a10c 100644 --- a/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -1378,7 +1378,6 @@ static bool foldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N, // allows us to fold the shift into this addressing mode. Returns false if the // transform succeeded. static bool foldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N, - uint64_t Mask, SDValue Shift, SDValue X, X86ISelAddressMode &AM) { if (Shift.getOpcode() != ISD::SHL || @@ -1396,6 +1395,11 @@ static bool foldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N, if (ShiftAmt != 1 && ShiftAmt != 2 && ShiftAmt != 3) return true; + // Use a signed mask so that shifting right will insert sign bits. These + // bits will be removed when we shift the result left so it doesn't matter + // what we use. This might allow a smaller immediate encoding. + int64_t Mask = cast(N->getOperand(1))->getSExtValue(); + MVT VT = N.getSimpleValueType(); SDLoc DL(N); SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, DL, VT); @@ -1863,7 +1867,7 @@ bool X86DAGToDAGISel::matchAddressRecursively(SDValue N, X86ISelAddressMode &AM, // Try to swap the mask and shift to place shifts which can be done as // a scale on the outside of the mask. - if (!foldMaskedShiftToScaledMask(*CurDAG, N, Mask, Shift, X, AM)) + if (!foldMaskedShiftToScaledMask(*CurDAG, N, Shift, X, AM)) return false; // Try to fold the mask and shift into BEXTR and scale. diff --git a/test/CodeGen/X86/fold-and-shift-x86_64.ll b/test/CodeGen/X86/fold-and-shift-x86_64.ll index 8aa4b6825b0..ead180edc97 100644 --- a/test/CodeGen/X86/fold-and-shift-x86_64.ll +++ b/test/CodeGen/X86/fold-and-shift-x86_64.ll @@ -4,9 +4,8 @@ define i8 @t1(i8* %X, i64 %i) { ; CHECK-LABEL: t1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: movabsq $4611686018427387649, %rax # imm = 0x3FFFFFFFFFFFFF01 -; CHECK-NEXT: andq %rsi, %rax -; CHECK-NEXT: movb (%rdi,%rax,4), %al +; CHECK-NEXT: andq $-255, %rsi +; CHECK-NEXT: movb (%rdi,%rsi,4), %al ; CHECK-NEXT: retq entry: @@ -20,9 +19,8 @@ entry: define i8 @t2(i8* %X, i64 %i) { ; CHECK-LABEL: t2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: movabsq $4611686018427387890, %rax # imm = 0x3FFFFFFFFFFFFFF2 -; CHECK-NEXT: andq %rsi, %rax -; CHECK-NEXT: movb (%rdi,%rax,4), %al +; CHECK-NEXT: andq $-14, %rsi +; CHECK-NEXT: movb (%rdi,%rsi,4), %al ; CHECK-NEXT: retq entry: diff --git a/test/CodeGen/X86/fold-and-shift.ll b/test/CodeGen/X86/fold-and-shift.ll index 93b8bbd3080..39c91d90017 100644 --- a/test/CodeGen/X86/fold-and-shift.ll +++ b/test/CodeGen/X86/fold-and-shift.ll @@ -95,7 +95,7 @@ define i8 @t5(i8* %X, i32 %i) { ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx -; CHECK-NEXT: andl $1073741810, %ecx # imm = 0x3FFFFFF2 +; CHECK-NEXT: andl $-14, %ecx ; CHECK-NEXT: movb (%eax,%ecx,4), %al ; CHECK-NEXT: retl @@ -112,7 +112,7 @@ define i8 @t6(i8* %X, i32 %i) { ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx -; CHECK-NEXT: andl $1073741569, %ecx # imm = 0x3FFFFF01 +; CHECK-NEXT: andl $-255, %ecx ; CHECK-NEXT: movb (%eax,%ecx,4), %al ; CHECK-NEXT: retl