1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-24 19:52:54 +01:00

Support pattern matching vsldoi(x,y) and vsldoi(x,x), which allows the f.e. to

lower it and LLVM to have one fewer intrinsic.  This implements
CodeGen/PowerPC/vec_shuffle.ll

llvm-svn: 27450
This commit is contained in:
Chris Lattner 2006-04-06 18:26:28 +00:00
parent e547562ea3
commit 2875bb116e
4 changed files with 97 additions and 41 deletions

View File

@ -277,46 +277,73 @@ static bool isConstantOrUndef(SDOperand Op, unsigned Val) {
/// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
/// VPKUHUM instruction.
bool PPC::isVPKUHUMShuffleMask(SDNode *N) {
return isConstantOrUndef(N->getOperand( 0), 1) &&
isConstantOrUndef(N->getOperand( 1), 3) &&
isConstantOrUndef(N->getOperand( 2), 5) &&
isConstantOrUndef(N->getOperand( 3), 7) &&
isConstantOrUndef(N->getOperand( 4), 9) &&
isConstantOrUndef(N->getOperand( 5), 11) &&
isConstantOrUndef(N->getOperand( 6), 13) &&
isConstantOrUndef(N->getOperand( 7), 15) &&
isConstantOrUndef(N->getOperand( 8), 17) &&
isConstantOrUndef(N->getOperand( 9), 19) &&
isConstantOrUndef(N->getOperand(10), 21) &&
isConstantOrUndef(N->getOperand(11), 23) &&
isConstantOrUndef(N->getOperand(12), 25) &&
isConstantOrUndef(N->getOperand(13), 27) &&
isConstantOrUndef(N->getOperand(14), 29) &&
isConstantOrUndef(N->getOperand(15), 31);
for (unsigned i = 0; i != 16; ++i)
if (!isConstantOrUndef(N->getOperand(i), i*2+1))
return false;
return true;
}
/// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
/// VPKUWUM instruction.
bool PPC::isVPKUWUMShuffleMask(SDNode *N) {
return isConstantOrUndef(N->getOperand( 0), 2) &&
isConstantOrUndef(N->getOperand( 1), 3) &&
isConstantOrUndef(N->getOperand( 2), 6) &&
isConstantOrUndef(N->getOperand( 3), 7) &&
isConstantOrUndef(N->getOperand( 4), 10) &&
isConstantOrUndef(N->getOperand( 5), 11) &&
isConstantOrUndef(N->getOperand( 6), 14) &&
isConstantOrUndef(N->getOperand( 7), 15) &&
isConstantOrUndef(N->getOperand( 8), 18) &&
isConstantOrUndef(N->getOperand( 9), 19) &&
isConstantOrUndef(N->getOperand(10), 22) &&
isConstantOrUndef(N->getOperand(11), 23) &&
isConstantOrUndef(N->getOperand(12), 26) &&
isConstantOrUndef(N->getOperand(13), 27) &&
isConstantOrUndef(N->getOperand(14), 30) &&
isConstantOrUndef(N->getOperand(15), 31);
for (unsigned i = 0; i != 16; i += 2)
if (!isConstantOrUndef(N->getOperand(i ), i*2+2) ||
!isConstantOrUndef(N->getOperand(i+1), i*2+3))
return false;
return true;
}
/// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
/// amount, otherwise return -1.
int PPC::isVSLDOIShuffleMask(SDNode *N) {
assert(N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!");
// Find the first non-undef value in the shuffle mask.
unsigned i;
for (i = 0; i != 16 && N->getOperand(i).getOpcode() == ISD::UNDEF; ++i)
/*search*/;
if (i == 16) return -1; // all undef.
// Otherwise, check to see if the rest of the elements are consequtively
// numbered from this value.
unsigned ShiftAmt = cast<ConstantSDNode>(N->getOperand(i))->getValue();
if (ShiftAmt < i) return -1;
ShiftAmt -= i;
// Check the rest of the elements to see if they are consequtive.
for (++i; i != 16; ++i)
if (!isConstantOrUndef(N->getOperand(i), ShiftAmt+i))
return -1;
return ShiftAmt;
}
/// isVSLDOIRotateShuffleMask - If this is a vsldoi rotate shuffle mask,
/// return the shift amount, otherwise return -1. Note that vlsdoi(x,x) will
/// result in the shuffle being changed to shuffle(x,undef, ...) with
/// transformed byte numbers.
int PPC::isVSLDOIRotateShuffleMask(SDNode *N) {
assert(N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!");
// Find the first non-undef value in the shuffle mask.
unsigned i;
for (i = 0; i != 16 && N->getOperand(i).getOpcode() == ISD::UNDEF; ++i)
/*search*/;
if (i == 16) return -1; // all undef.
// Otherwise, check to see if the rest of the elements are consequtively
// numbered from this value.
unsigned ShiftAmt = cast<ConstantSDNode>(N->getOperand(i))->getValue();
if (ShiftAmt < i) return -1;
ShiftAmt -= i;
// Check the rest of the elements to see if they are consequtive.
for (++i; i != 16; ++i)
if (!isConstantOrUndef(N->getOperand(i), (ShiftAmt+i) & 15))
return -1;
return ShiftAmt;
}
/// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a splat of a single element that is suitable for input to
@ -810,7 +837,9 @@ SDOperand PPCTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
return Op;
if (PPC::isVPKUWUMShuffleMask(PermMask.Val) ||
PPC::isVPKUHUMShuffleMask(PermMask.Val))
PPC::isVPKUHUMShuffleMask(PermMask.Val) ||
PPC::isVSLDOIShuffleMask(PermMask.Val) != -1 ||
PPC::isVSLDOIRotateShuffleMask(PermMask.Val) != -1)
return Op;
// TODO: Handle more cases, and also handle cases that are cheaper to do as

View File

@ -110,6 +110,14 @@ namespace llvm {
/// VPKUWUM instruction.
bool isVPKUWUMShuffleMask(SDNode *N);
/// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
/// amount, otherwise return -1.
int isVSLDOIShuffleMask(SDNode *N);
/// isVSLDOIRotateShuffleMask - If this is a vsldoi rotate shuffle mask,
/// return the shift amount, otherwise return -1. This matches vsldoi(x,x).
int isVSLDOIRotateShuffleMask(SDNode *N);
/// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a splat of a single element that is suitable for input to
/// VSPLTB/VSPLTH/VSPLTW.

View File

@ -24,6 +24,23 @@ def VPKUWUM_shuffle_mask : PatLeaf<(build_vector), [{
return PPC::isVPKUWUMShuffleMask(N);
}]>;
def VSLDOI_get_imm : SDNodeXForm<build_vector, [{
return getI32Imm(PPC::isVSLDOIShuffleMask(N));
}]>;
def VSLDOI_shuffle_mask : PatLeaf<(build_vector), [{
return PPC::isVSLDOIShuffleMask(N) != -1;
}], VSLDOI_get_imm>;
/// VSLDOI_rotate* - These are used to match vsldoi(X,X), which is turned into
/// vector_shuffle(X,undef,mask) by the dag combiner.
def VSLDOI_rotate_get_imm : SDNodeXForm<build_vector, [{
return getI32Imm(PPC::isVSLDOIRotateShuffleMask(N));
}]>;
def VSLDOI_rotate_shuffle_mask : PatLeaf<(build_vector), [{
return PPC::isVSLDOIRotateShuffleMask(N) != -1;
}], VSLDOI_rotate_get_imm>;
// VSPLT*_get_imm xform function: convert vector_shuffle mask to VSPLT* imm.
def VSPLTB_get_imm : SDNodeXForm<build_vector, [{
return getI32Imm(PPC::getVSPLTImmediate(N, 1));
@ -182,11 +199,12 @@ def VMLADDUHM : VA1a_Int<34, "vmladduhm", int_ppc_altivec_vmladduhm>;
def VPERM : VA1a_Int<43, "vperm", int_ppc_altivec_vperm>;
def VSEL : VA1a_Int<42, "vsel", int_ppc_altivec_vsel>;
// Shuffles.
def VSLDOI : VAForm_2<44, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB, u5imm:$SH),
"vsldoi $vD, $vA, $vB, $SH", VecFP,
[(set VRRC:$vD,
(int_ppc_altivec_vsldoi VRRC:$vA, VRRC:$vB,
imm:$SH))]>;
(vector_shuffle (v16i8 VRRC:$vA), VRRC:$vB,
VSLDOI_shuffle_mask:$SH))]>;
// VX-Form instructions. AltiVec arithmetic ops.
def VADDFP : VXForm_1<10, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
@ -504,6 +522,12 @@ def : Pat<(v4f32 (bitconvert (v16i8 VRRC:$src))), (v4f32 VRRC:$src)>;
def : Pat<(v4f32 (bitconvert (v8i16 VRRC:$src))), (v4f32 VRRC:$src)>;
def : Pat<(v4f32 (bitconvert (v4i32 VRRC:$src))), (v4f32 VRRC:$src)>;
// Shuffles.
// Match vsldoi(x,x)
def:Pat<(vector_shuffle (v16i8 VRRC:$vA),undef, VSLDOI_rotate_shuffle_mask:$in),
(VSLDOI VRRC:$vA, VRRC:$vA, VSLDOI_rotate_shuffle_mask:$in)>;
// Immediate vector formation with vsplti*.
def : Pat<(v16i8 vecspltisb:$invec), (v16i8 (VSPLTISB vecspltisb:$invec))>;
def : Pat<(v16i8 vecspltish:$invec), (v16i8 (VSPLTISH vecspltish:$invec))>;

View File

@ -130,11 +130,6 @@ Instcombine llvm.ppc.altivec.vperm with an immediate into a shuffle operation.
//===----------------------------------------------------------------------===//
Handle VECTOR_SHUFFLE nodes with the appropriate shuffle mask with vsldoi,
vpkuhum and vpkuwum.
//===----------------------------------------------------------------------===//
Implement multiply for vector integer types, to avoid the horrible scalarized
code produced by legalize.