1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-18 18:42:46 +02:00

GlobalISel: Implement narrowScalar for shift main type

This is pretty much directly ported from SelectionDAG. Doesn't include
the shift by non-constant but known bits version, since there isn't a
globalisel version of computeKnownBits yet.

This shows a disadvantage of targets not specifically which type
should be used for the shift amount. If type 0 is legalized before
type 1, the operations on the shift amount type use the wider type
(which are also less likely to legalize). This can be avoided by
targets specifying legalization actions on type 1 earlier than for
type 0.

llvm-svn: 353455
This commit is contained in:
Matt Arsenault 2019-02-07 19:37:44 +00:00
parent 3f79f1900f
commit e739762e43
14 changed files with 3263 additions and 267 deletions

View File

@ -175,6 +175,10 @@ private:
LegalizeResult
reduceLoadStoreWidth(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy);
LegalizeResult narrowScalarShiftByConstant(MachineInstr &MI, const APInt &Amt,
LLT HalfTy, LLT ShiftAmtTy);
LegalizeResult narrowScalarShift(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
LegalizeResult narrowScalarMul(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
LegalizeResult narrowScalarSelect(MachineInstr &MI, unsigned TypeIdx, LLT Ty);

View File

@ -203,6 +203,7 @@ protected:
void validateTruncExt(const LLT &Dst, const LLT &Src, bool IsExtend);
void validateBinaryOp(const LLT &Res, const LLT &Op0, const LLT &Op1);
void validateShiftOp(const LLT &Res, const LLT &Op0, const LLT &Op1);
void validateSelectOp(const LLT &ResTy, const LLT &TstTy, const LLT &Op0Ty,
const LLT &Op1Ty);
@ -1163,6 +1164,18 @@ public:
return buildInstr(TargetOpcode::G_SHL, {Dst}, {Src0, Src1}, Flags);
}
MachineInstrBuilder buildLShr(const DstOp &Dst, const SrcOp &Src0,
const SrcOp &Src1,
Optional<unsigned> Flags = None) {
return buildInstr(TargetOpcode::G_LSHR, {Dst}, {Src0, Src1}, Flags);
}
MachineInstrBuilder buildAShr(const DstOp &Dst, const SrcOp &Src0,
const SrcOp &Src1,
Optional<unsigned> Flags = None) {
return buildInstr(TargetOpcode::G_ASHR, {Dst}, {Src0, Src1}, Flags);
}
/// Build and insert \p Res = G_AND \p Op0, \p Op1
///
/// G_AND sets \p Res to the bitwise and of integer parameters \p Op0 and \p

View File

@ -811,14 +811,8 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
}
case TargetOpcode::G_SHL:
case TargetOpcode::G_LSHR:
case TargetOpcode::G_ASHR: {
if (TypeIdx != 1)
return UnableToLegalize; // TODO
Observer.changingInstr(MI);
narrowScalarSrc(MI, NarrowTy, 2);
Observer.changedInstr(MI);
return Legalized;
}
case TargetOpcode::G_ASHR:
return narrowScalarShift(MI, TypeIdx, NarrowTy);
case TargetOpcode::G_CTLZ:
case TargetOpcode::G_CTLZ_ZERO_UNDEF:
case TargetOpcode::G_CTTZ:
@ -2194,6 +2188,221 @@ LegalizerHelper::fewerElementsVector(MachineInstr &MI, unsigned TypeIdx,
}
}
LegalizerHelper::LegalizeResult
LegalizerHelper::narrowScalarShiftByConstant(MachineInstr &MI, const APInt &Amt,
const LLT HalfTy, const LLT AmtTy) {
unsigned InL = MRI.createGenericVirtualRegister(HalfTy);
unsigned InH = MRI.createGenericVirtualRegister(HalfTy);
MIRBuilder.buildUnmerge({InL, InH}, MI.getOperand(1).getReg());
if (Amt.isNullValue()) {
MIRBuilder.buildMerge(MI.getOperand(0).getReg(), {InL, InH});
MI.eraseFromParent();
return Legalized;
}
LLT NVT = HalfTy;
unsigned NVTBits = HalfTy.getSizeInBits();
unsigned VTBits = 2 * NVTBits;
SrcOp Lo(0), Hi(0);
if (MI.getOpcode() == TargetOpcode::G_SHL) {
if (Amt.ugt(VTBits)) {
Lo = Hi = MIRBuilder.buildConstant(NVT, 0);
} else if (Amt.ugt(NVTBits)) {
Lo = MIRBuilder.buildConstant(NVT, 0);
Hi = MIRBuilder.buildShl(NVT, InL,
MIRBuilder.buildConstant(AmtTy, Amt - NVTBits));
} else if (Amt == NVTBits) {
Lo = MIRBuilder.buildConstant(NVT, 0);
Hi = InL;
} else {
Lo = MIRBuilder.buildShl(NVT, InL, MIRBuilder.buildConstant(AmtTy, Amt));
Hi = MIRBuilder.buildOr(
NVT,
MIRBuilder.buildShl(NVT, InH, MIRBuilder.buildConstant(AmtTy, Amt)),
MIRBuilder.buildLShr(
NVT, InL, MIRBuilder.buildConstant(AmtTy, -Amt + NVTBits)));
}
} else if (MI.getOpcode() == TargetOpcode::G_LSHR) {
if (Amt.ugt(VTBits)) {
Lo = Hi = MIRBuilder.buildConstant(NVT, 0);
} else if (Amt.ugt(NVTBits)) {
Lo = MIRBuilder.buildLShr(NVT, InH,
MIRBuilder.buildConstant(AmtTy, Amt - NVTBits));
Hi = MIRBuilder.buildConstant(NVT, 0);
} else if (Amt == NVTBits) {
Lo = InH;
Hi = MIRBuilder.buildConstant(NVT, 0);
} else {
auto ShiftAmtConst = MIRBuilder.buildConstant(AmtTy, Amt);
auto OrLHS = MIRBuilder.buildLShr(NVT, InL, ShiftAmtConst);
auto OrRHS = MIRBuilder.buildShl(
NVT, InH, MIRBuilder.buildConstant(AmtTy, -Amt + NVTBits));
Lo = MIRBuilder.buildOr(NVT, OrLHS, OrRHS);
Hi = MIRBuilder.buildLShr(NVT, InH, ShiftAmtConst);
}
} else {
if (Amt.ugt(VTBits)) {
Hi = Lo = MIRBuilder.buildAShr(
NVT, InH, MIRBuilder.buildConstant(AmtTy, NVTBits - 1));
} else if (Amt.ugt(NVTBits)) {
Lo = MIRBuilder.buildAShr(NVT, InH,
MIRBuilder.buildConstant(AmtTy, Amt - NVTBits));
Hi = MIRBuilder.buildAShr(NVT, InH,
MIRBuilder.buildConstant(AmtTy, NVTBits - 1));
} else if (Amt == NVTBits) {
Lo = InH;
Hi = MIRBuilder.buildAShr(NVT, InH,
MIRBuilder.buildConstant(AmtTy, NVTBits - 1));
} else {
auto ShiftAmtConst = MIRBuilder.buildConstant(AmtTy, Amt);
auto OrLHS = MIRBuilder.buildLShr(NVT, InL, ShiftAmtConst);
auto OrRHS = MIRBuilder.buildShl(
NVT, InH, MIRBuilder.buildConstant(AmtTy, -Amt + NVTBits));
Lo = MIRBuilder.buildOr(NVT, OrLHS, OrRHS);
Hi = MIRBuilder.buildAShr(NVT, InH, ShiftAmtConst);
}
}
MIRBuilder.buildMerge(MI.getOperand(0).getReg(), {Lo.getReg(), Hi.getReg()});
MI.eraseFromParent();
return Legalized;
}
// TODO: Optimize if constant shift amount.
LegalizerHelper::LegalizeResult
LegalizerHelper::narrowScalarShift(MachineInstr &MI, unsigned TypeIdx,
LLT RequestedTy) {
if (TypeIdx == 1) {
Observer.changingInstr(MI);
narrowScalarSrc(MI, RequestedTy, 2);
Observer.changedInstr(MI);
return Legalized;
}
unsigned DstReg = MI.getOperand(0).getReg();
LLT DstTy = MRI.getType(DstReg);
if (DstTy.isVector())
return UnableToLegalize;
unsigned Amt = MI.getOperand(2).getReg();
LLT ShiftAmtTy = MRI.getType(Amt);
const unsigned DstEltSize = DstTy.getScalarSizeInBits();
if (DstEltSize % 2 != 0)
return UnableToLegalize;
// Ignore the input type. We can only go to exactly half the size of the
// input. If that isn't small enough, the resulting pieces will be further
// legalized.
const unsigned NewBitSize = DstEltSize / 2;
const LLT HalfTy = LLT::scalar(NewBitSize);
const LLT CondTy = LLT::scalar(1);
if (const MachineInstr *KShiftAmt =
getOpcodeDef(TargetOpcode::G_CONSTANT, Amt, MRI)) {
return narrowScalarShiftByConstant(
MI, KShiftAmt->getOperand(1).getCImm()->getValue(), HalfTy, ShiftAmtTy);
}
// TODO: Expand with known bits.
// Handle the fully general expansion by an unknown amount.
auto NewBits = MIRBuilder.buildConstant(ShiftAmtTy, NewBitSize);
unsigned InL = MRI.createGenericVirtualRegister(HalfTy);
unsigned InH = MRI.createGenericVirtualRegister(HalfTy);
MIRBuilder.buildUnmerge({InL, InH}, MI.getOperand(1).getReg());
auto AmtExcess = MIRBuilder.buildSub(ShiftAmtTy, Amt, NewBits);
auto AmtLack = MIRBuilder.buildSub(ShiftAmtTy, NewBits, Amt);
auto Zero = MIRBuilder.buildConstant(ShiftAmtTy, 0);
auto IsShort = MIRBuilder.buildICmp(ICmpInst::ICMP_ULT, CondTy, Amt, NewBits);
auto IsZero = MIRBuilder.buildICmp(ICmpInst::ICMP_EQ, CondTy, Amt, Zero);
unsigned ResultRegs[2];
switch (MI.getOpcode()) {
case TargetOpcode::G_SHL: {
// Short: ShAmt < NewBitSize
auto LoS = MIRBuilder.buildShl(HalfTy, InH, Amt);
auto OrLHS = MIRBuilder.buildShl(HalfTy, InH, Amt);
auto OrRHS = MIRBuilder.buildLShr(HalfTy, InL, AmtLack);
auto HiS = MIRBuilder.buildOr(HalfTy, OrLHS, OrRHS);
// Long: ShAmt >= NewBitSize
auto LoL = MIRBuilder.buildConstant(HalfTy, 0); // Lo part is zero.
auto HiL = MIRBuilder.buildShl(HalfTy, InL, AmtExcess); // Hi from Lo part.
auto Lo = MIRBuilder.buildSelect(HalfTy, IsShort, LoS, LoL);
auto Hi = MIRBuilder.buildSelect(
HalfTy, IsZero, InH, MIRBuilder.buildSelect(HalfTy, IsShort, HiS, HiL));
ResultRegs[0] = Lo.getReg(0);
ResultRegs[1] = Hi.getReg(0);
break;
}
case TargetOpcode::G_LSHR: {
// Short: ShAmt < NewBitSize
auto HiS = MIRBuilder.buildLShr(HalfTy, InH, Amt);
auto OrLHS = MIRBuilder.buildLShr(HalfTy, InL, Amt);
auto OrRHS = MIRBuilder.buildShl(HalfTy, InH, AmtLack);
auto LoS = MIRBuilder.buildOr(HalfTy, OrLHS, OrRHS);
// Long: ShAmt >= NewBitSize
auto HiL = MIRBuilder.buildConstant(HalfTy, 0); // Hi part is zero.
auto LoL = MIRBuilder.buildLShr(HalfTy, InH, AmtExcess); // Lo from Hi part.
auto Lo = MIRBuilder.buildSelect(
HalfTy, IsZero, InL, MIRBuilder.buildSelect(HalfTy, IsShort, LoS, LoL));
auto Hi = MIRBuilder.buildSelect(HalfTy, IsShort, HiS, HiL);
ResultRegs[0] = Lo.getReg(0);
ResultRegs[1] = Hi.getReg(0);
break;
}
case TargetOpcode::G_ASHR: {
// Short: ShAmt < NewBitSize
auto HiS = MIRBuilder.buildAShr(HalfTy, InH, Amt);
auto OrLHS = MIRBuilder.buildLShr(HalfTy, InL, Amt);
auto OrRHS = MIRBuilder.buildLShr(HalfTy, InH, AmtLack);
auto LoS = MIRBuilder.buildOr(HalfTy, OrLHS, OrRHS);
// Long: ShAmt >= NewBitSize
// Sign of Hi part.
auto HiL = MIRBuilder.buildAShr(
HalfTy, InH, MIRBuilder.buildConstant(ShiftAmtTy, NewBitSize - 1));
auto LoL = MIRBuilder.buildAShr(HalfTy, InH, AmtExcess); // Lo from Hi part.
auto Lo = MIRBuilder.buildSelect(
HalfTy, IsZero, InL, MIRBuilder.buildSelect(HalfTy, IsShort, LoS, LoL));
auto Hi = MIRBuilder.buildSelect(HalfTy, IsShort, HiS, HiL);
ResultRegs[0] = Lo.getReg(0);
ResultRegs[1] = Hi.getReg(0);
break;
}
default:
llvm_unreachable("not a shift");
}
MIRBuilder.buildMerge(DstReg, ResultRegs);
MI.eraseFromParent();
return Legalized;
}
LegalizerHelper::LegalizeResult
LegalizerHelper::narrowScalarMul(MachineInstr &MI, unsigned TypeIdx, LLT NewTy) {
unsigned DstReg = MI.getOperand(0).getReg();

View File

@ -185,6 +185,12 @@ void MachineIRBuilder::validateBinaryOp(const LLT &Res, const LLT &Op0,
assert((Res == Op0 && Res == Op1) && "type mismatch");
}
void MachineIRBuilder::validateShiftOp(const LLT &Res, const LLT &Op0,
const LLT &Op1) {
assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
assert((Res == Op0) && "type mismatch");
}
MachineInstrBuilder MachineIRBuilder::buildGEP(unsigned Res, unsigned Op0,
unsigned Op1) {
assert(getMRI()->getType(Res).isPointer() &&
@ -852,11 +858,8 @@ MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc,
}
case TargetOpcode::G_ADD:
case TargetOpcode::G_AND:
case TargetOpcode::G_ASHR:
case TargetOpcode::G_LSHR:
case TargetOpcode::G_MUL:
case TargetOpcode::G_OR:
case TargetOpcode::G_SHL:
case TargetOpcode::G_SUB:
case TargetOpcode::G_XOR:
case TargetOpcode::G_UDIV:
@ -870,6 +873,17 @@ MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc,
SrcOps[0].getLLTTy(*getMRI()),
SrcOps[1].getLLTTy(*getMRI()));
break;
}
case TargetOpcode::G_SHL:
case TargetOpcode::G_ASHR:
case TargetOpcode::G_LSHR: {
assert(DstOps.size() == 1 && "Invalid Dst");
assert(SrcOps.size() == 2 && "Invalid Srcs");
validateShiftOp(DstOps[0].getLLTTy(*getMRI()),
SrcOps[0].getLLTTy(*getMRI()),
SrcOps[1].getLLTTy(*getMRI()));
break;
}
case TargetOpcode::G_SEXT:
case TargetOpcode::G_ZEXT:
case TargetOpcode::G_ANYEXT:
@ -879,7 +893,7 @@ MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc,
SrcOps[0].getLLTTy(*getMRI()), true);
break;
case TargetOpcode::G_TRUNC:
case TargetOpcode::G_FPTRUNC:
case TargetOpcode::G_FPTRUNC: {
assert(DstOps.size() == 1 && "Invalid Dst");
assert(SrcOps.size() == 1 && "Invalid Srcs");
validateTruncExt(DstOps[0].getLLTTy(*getMRI()),

View File

@ -85,6 +85,7 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST) {
getActionDefinitionsBuilder(G_SHL)
.legalFor({{s32, s32}, {s64, s64},
{v2s32, v2s32}, {v4s32, v4s32}, {v2s64, v2s64}})
.clampScalar(1, s32, s64)
.clampScalar(0, s32, s64)
.widenScalarToNextPow2(0)
.clampNumElements(0, v2s32, v4s32)
@ -105,6 +106,7 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST) {
getActionDefinitionsBuilder({G_LSHR, G_ASHR})
.legalFor({{s32, s32}, {s64, s64}})
.clampScalar(1, s32, s64)
.clampScalar(0, s32, s64)
.minScalarSameAs(1, 0);

View File

@ -439,11 +439,17 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST,
.clampMaxNumElements(0, S16, 2);
} else
Shifts.legalFor({{S16, S32}, {S16, S16}});
Shifts.clampScalar(1, S16, S32);
Shifts.clampScalar(0, S16, S64);
} else
} else {
// Make sure we legalize the shift amount type first, as the general
// expansion for the shifted type will produce much worse code if it hasn't
// been truncated already.
Shifts.clampScalar(1, S32, S32);
Shifts.clampScalar(0, S32, S64);
Shifts.clampScalar(1, S32, S32)
.scalarize(0);
}
Shifts.scalarize(0);
for (unsigned Op : {G_EXTRACT_VECTOR_ELT, G_INSERT_VECTOR_ELT}) {
unsigned VecTypeIdx = Op == G_EXTRACT_VECTOR_ELT ? 1 : 0;

View File

@ -8,13 +8,13 @@ body: |
; CHECK-LABEL: name: test_merge_s4
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[C]](s64)
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[C2]]
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C3]]
; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND]], [[AND1]](s32)
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C2]]
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[C]](s64)
; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[C3]]
; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[AND]](s32)
; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[C]](s64)
; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[TRUNC1]], [[C4]]

View File

@ -7,30 +7,30 @@ body: |
; CHECK-LABEL: name: test_shift
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[TRUNC]], [[C]](s32)
; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC1]], [[C1]]
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[C]]
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[TRUNC1]], [[C1]](s32)
; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C1]](s32)
; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[ASHR]], [[AND]](s32)
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ASHR1]](s32)
; CHECK: $w0 = COPY [[COPY2]](s32)
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; CHECK: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[TRUNC2]], [[C2]]
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
; CHECK: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[TRUNC3]], [[C3]]
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND1]], [[AND2]](s32)
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND2]], [[AND1]](s32)
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; CHECK: $w0 = COPY [[COPY3]](s32)
; CHECK: [[TRUNC4:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[TRUNC5:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
; CHECK: [[AND3:%[0-9]+]]:_(s32) = G_AND [[TRUNC5]], [[C4]]
; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[TRUNC4]], [[AND3]](s32)
; CHECK: [[TRUNC4:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
; CHECK: [[AND3:%[0-9]+]]:_(s32) = G_AND [[TRUNC4]], [[C4]]
; CHECK: [[TRUNC5:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[TRUNC5]], [[AND3]](s32)
; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[SHL1]](s32)
; CHECK: $w0 = COPY [[COPY4]](s32)
%0:_(s64) = COPY $x0
@ -96,3 +96,109 @@ body: |
$x0 = COPY %2(s64)
...
---
name: test_shl_s128_s128
body: |
bb.0:
; CHECK-LABEL: name: test_shl_s128_s128
; CHECK: [[COPY:%[0-9]+]]:_(s128) = COPY $q0
; CHECK: [[COPY1:%[0-9]+]]:_(s128) = COPY $q1
; CHECK: [[TRUNC:%[0-9]+]]:_(s64) = G_TRUNC [[COPY1]](s128)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
; CHECK: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[TRUNC]], [[C]]
; CHECK: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[C]], [[TRUNC]]
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[TRUNC]](s64), [[C]]
; CHECK: [[TRUNC1:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
; CHECK: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[TRUNC]](s64), [[C1]]
; CHECK: [[TRUNC2:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP1]](s32)
; CHECK: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[TRUNC]](s64)
; CHECK: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[TRUNC]](s64)
; CHECK: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[SUB1]](s64)
; CHECK: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL1]], [[LSHR]]
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[SUB]](s64)
; CHECK: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC1]](s1), [[SHL]], [[C2]]
; CHECK: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC1]](s1), [[OR]], [[SHL2]]
; CHECK: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC2]](s1), [[UV1]], [[SELECT1]]
; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT]](s64), [[SELECT2]](s64)
; CHECK: $q0 = COPY [[MV]](s128)
%0:_(s128) = COPY $q0
%1:_(s128) = COPY $q1
%2:_(s128) = G_SHL %0, %1
$q0 = COPY %2
...
---
name: test_lshr_s128_s128
body: |
bb.0:
; CHECK-LABEL: name: test_lshr_s128_s128
; CHECK: [[COPY:%[0-9]+]]:_(s128) = COPY $q0
; CHECK: [[COPY1:%[0-9]+]]:_(s128) = COPY $q1
; CHECK: [[TRUNC:%[0-9]+]]:_(s64) = G_TRUNC [[COPY1]](s128)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
; CHECK: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[TRUNC]], [[C]]
; CHECK: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[C]], [[TRUNC]]
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[TRUNC]](s64), [[C]]
; CHECK: [[TRUNC1:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
; CHECK: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[TRUNC]](s64), [[C1]]
; CHECK: [[TRUNC2:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP1]](s32)
; CHECK: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[TRUNC]](s64)
; CHECK: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[TRUNC]](s64)
; CHECK: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[SUB1]](s64)
; CHECK: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR1]], [[SHL]]
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[SUB]](s64)
; CHECK: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC1]](s1), [[OR]], [[LSHR2]]
; CHECK: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC2]](s1), [[UV]], [[SELECT]]
; CHECK: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC1]](s1), [[LSHR]], [[C2]]
; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
; CHECK: $q0 = COPY [[MV]](s128)
%0:_(s128) = COPY $q0
%1:_(s128) = COPY $q1
%2:_(s128) = G_LSHR %0, %1
$q0 = COPY %2
...
---
name: test_ashr_s128_s128
body: |
bb.0:
; CHECK-LABEL: name: test_ashr_s128_s128
; CHECK: [[COPY:%[0-9]+]]:_(s128) = COPY $q0
; CHECK: [[COPY1:%[0-9]+]]:_(s128) = COPY $q1
; CHECK: [[TRUNC:%[0-9]+]]:_(s64) = G_TRUNC [[COPY1]](s128)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
; CHECK: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[TRUNC]], [[C]]
; CHECK: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[C]], [[TRUNC]]
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[TRUNC]](s64), [[C]]
; CHECK: [[TRUNC1:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
; CHECK: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[TRUNC]](s64), [[C1]]
; CHECK: [[TRUNC2:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP1]](s32)
; CHECK: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[TRUNC]](s64)
; CHECK: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[TRUNC]](s64)
; CHECK: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[SUB1]](s64)
; CHECK: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[LSHR1]]
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
; CHECK: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[C2]](s64)
; CHECK: [[ASHR2:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[SUB]](s64)
; CHECK: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC1]](s1), [[OR]], [[ASHR2]]
; CHECK: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC2]](s1), [[UV]], [[SELECT]]
; CHECK: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC1]](s1), [[ASHR]], [[ASHR1]]
; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
; CHECK: $q0 = COPY [[MV]](s128)
%0:_(s128) = COPY $q0
%1:_(s128) = COPY $q1
%2:_(s128) = G_ASHR %0, %1
$q0 = COPY %2
...

View File

@ -13,8 +13,8 @@ body: |
; CHECK: [[ZEXT:%[0-9]+]]:_(s16) = G_ZEXT [[TRUNC]](s8)
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[C1]](s32)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ZEXT]](s16)
; CHECK: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[TRUNC1]](s16)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ZEXT]](s16)
; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ANYEXT]], [[ZEXT1]](s32)
; CHECK: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[SHL]](s32)
; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[ZEXT]](s16)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -9,13 +9,13 @@ body: |
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C3]]
; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C4]]
; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND]], [[AND1]](s32)
; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[AND]](s32)
; CHECK: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C]](s32)
; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C5]]
@ -39,26 +39,26 @@ body: |
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 16777215
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY [[C3]](s32)
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C4]]
; CHECK: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 16777215
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[C3]](s32)
; CHECK: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C5]]
; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND]], [[AND1]](s32)
; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[AND]](s32)
; CHECK: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C]](s32)
; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C6]]
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[SHL]](s32)
; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[COPY3]]
; CHECK: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; CHECK: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 16777215
; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C7]](s32)
; CHECK: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C8]]
; CHECK: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 16777215
; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY [[C7]](s32)
; CHECK: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; CHECK: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C9]]
; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[AND4]](s32)
; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND4]], [[AND3]](s32)
; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY [[OR]](s32)
; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY [[SHL1]](s32)
; CHECK: [[OR1:%[0-9]+]]:_(s32) = G_OR [[COPY6]], [[COPY7]]

File diff suppressed because it is too large Load Diff

View File

@ -234,58 +234,180 @@ body: |
liveins: $vgpr0
; CHECK-LABEL: name: test_unmerge_s1_s8
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
; CHECK: [[ZEXT:%[0-9]+]]:_(s128) = G_ZEXT [[TRUNC]](s8)
; CHECK: [[C:%[0-9]+]]:_(s128) = G_CONSTANT i128 15
; CHECK: [[SHL:%[0-9]+]]:_(s128) = G_SHL [[ZEXT]], [[C]](s128)
; CHECK: [[OR:%[0-9]+]]:_(s128) = G_OR [[ZEXT]], [[SHL]]
; CHECK: [[C1:%[0-9]+]]:_(s128) = G_CONSTANT i128 30
; CHECK: [[SHL1:%[0-9]+]]:_(s128) = G_SHL [[OR]], [[C1]](s128)
; CHECK: [[OR1:%[0-9]+]]:_(s128) = G_OR [[OR]], [[SHL1]]
; CHECK: [[C2:%[0-9]+]]:_(s128) = G_CONSTANT i128 45
; CHECK: [[SHL2:%[0-9]+]]:_(s128) = G_SHL [[OR1]], [[C2]](s128)
; CHECK: [[OR2:%[0-9]+]]:_(s128) = G_OR [[OR1]], [[SHL2]]
; CHECK: [[C3:%[0-9]+]]:_(s128) = G_CONSTANT i128 60
; CHECK: [[SHL3:%[0-9]+]]:_(s128) = G_SHL [[OR2]], [[C3]](s128)
; CHECK: [[OR3:%[0-9]+]]:_(s128) = G_OR [[OR2]], [[SHL3]]
; CHECK: [[C4:%[0-9]+]]:_(s128) = G_CONSTANT i128 75
; CHECK: [[SHL4:%[0-9]+]]:_(s128) = G_SHL [[OR3]], [[C4]](s128)
; CHECK: [[OR4:%[0-9]+]]:_(s128) = G_OR [[OR3]], [[SHL4]]
; CHECK: [[C5:%[0-9]+]]:_(s128) = G_CONSTANT i128 90
; CHECK: [[SHL5:%[0-9]+]]:_(s128) = G_SHL [[OR4]], [[C5]](s128)
; CHECK: [[OR5:%[0-9]+]]:_(s128) = G_OR [[OR4]], [[SHL5]]
; CHECK: [[C6:%[0-9]+]]:_(s128) = G_CONSTANT i128 105
; CHECK: [[SHL6:%[0-9]+]]:_(s128) = G_SHL [[OR5]], [[C6]](s128)
; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[OR5]](s128)
; CHECK: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[SHL6]](s128)
; CHECK: [[OR6:%[0-9]+]]:_(s64) = G_OR [[UV]], [[UV2]]
; CHECK: [[OR7:%[0-9]+]]:_(s64) = G_OR [[UV1]], [[UV3]]
; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[OR6]](s64), [[OR7]](s64)
; CHECK: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16), [[UV8:%[0-9]+]]:_(s16), [[UV9:%[0-9]+]]:_(s16), [[UV10:%[0-9]+]]:_(s16), [[UV11:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[MV]](s128)
; CHECK: [[TRUNC1:%[0-9]+]]:_(s1) = G_TRUNC [[UV4]](s16)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s1)
; CHECK: [[TRUNC2:%[0-9]+]]:_(s1) = G_TRUNC [[UV5]](s16)
; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s1)
; CHECK: [[TRUNC3:%[0-9]+]]:_(s1) = G_TRUNC [[UV6]](s16)
; CHECK: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC3]](s1)
; CHECK: [[TRUNC4:%[0-9]+]]:_(s1) = G_TRUNC [[UV7]](s16)
; CHECK: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC4]](s1)
; CHECK: [[TRUNC5:%[0-9]+]]:_(s1) = G_TRUNC [[UV8]](s16)
; CHECK: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC5]](s1)
; CHECK: [[TRUNC6:%[0-9]+]]:_(s1) = G_TRUNC [[UV9]](s16)
; CHECK: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC6]](s1)
; CHECK: [[TRUNC7:%[0-9]+]]:_(s1) = G_TRUNC [[UV10]](s16)
; CHECK: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC7]](s1)
; CHECK: [[TRUNC8:%[0-9]+]]:_(s1) = G_TRUNC [[UV11]](s16)
; CHECK: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC8]](s1)
; CHECK: $vgpr0 = COPY [[ANYEXT]](s32)
; CHECK: $vgpr1 = COPY [[ANYEXT1]](s32)
; CHECK: $vgpr2 = COPY [[ANYEXT2]](s32)
; CHECK: $vgpr3 = COPY [[ANYEXT3]](s32)
; CHECK: $vgpr4 = COPY [[ANYEXT4]](s32)
; CHECK: $vgpr5 = COPY [[ANYEXT5]](s32)
; CHECK: $vgpr6 = COPY [[ANYEXT6]](s32)
; CHECK: $vgpr7 = COPY [[ANYEXT7]](s32)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 255
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[COPY]](s32)
; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[ANYEXT]](s128)
; CHECK: [[AND:%[0-9]+]]:_(s64) = G_AND [[UV]], [[C]]
; CHECK: [[AND1:%[0-9]+]]:_(s64) = G_AND [[UV1]], [[C1]]
; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[AND]](s64), [[AND1]](s64)
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 15
; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[C2]](s64), [[C3]](s64)
; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[MV1]](s128)
; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
; CHECK: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[MV]](s128)
; CHECK: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C4]]
; CHECK: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C4]], [[TRUNC]]
; CHECK: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C4]]
; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C5]]
; CHECK: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV3]], [[TRUNC]](s32)
; CHECK: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV3]], [[TRUNC]](s32)
; CHECK: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV2]], [[SUB1]](s32)
; CHECK: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL1]], [[LSHR]]
; CHECK: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[UV2]], [[SUB]](s32)
; CHECK: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SHL]], [[C6]]
; CHECK: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[SHL2]]
; CHECK: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV3]], [[SELECT1]]
; CHECK: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[MV]](s128)
; CHECK: [[OR1:%[0-9]+]]:_(s64) = G_OR [[UV4]], [[SELECT]]
; CHECK: [[OR2:%[0-9]+]]:_(s64) = G_OR [[UV5]], [[SELECT2]]
; CHECK: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 30
; CHECK: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[C7]](s64), [[C8]](s64)
; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[MV2]](s128)
; CHECK: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
; CHECK: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[TRUNC1]], [[C9]]
; CHECK: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C9]], [[TRUNC1]]
; CHECK: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC1]](s32), [[C9]]
; CHECK: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC1]](s32), [[C10]]
; CHECK: [[SHL3:%[0-9]+]]:_(s64) = G_SHL [[OR2]], [[TRUNC1]](s32)
; CHECK: [[SHL4:%[0-9]+]]:_(s64) = G_SHL [[OR2]], [[TRUNC1]](s32)
; CHECK: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[OR1]], [[SUB3]](s32)
; CHECK: [[OR3:%[0-9]+]]:_(s64) = G_OR [[SHL4]], [[LSHR1]]
; CHECK: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[SHL5:%[0-9]+]]:_(s64) = G_SHL [[OR1]], [[SUB2]](s32)
; CHECK: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[SHL3]], [[C11]]
; CHECK: [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[OR3]], [[SHL5]]
; CHECK: [[SELECT5:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[OR2]], [[SELECT4]]
; CHECK: [[OR4:%[0-9]+]]:_(s64) = G_OR [[OR1]], [[SELECT3]]
; CHECK: [[OR5:%[0-9]+]]:_(s64) = G_OR [[OR2]], [[SELECT5]]
; CHECK: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 45
; CHECK: [[C13:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[MV3:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[C12]](s64), [[C13]](s64)
; CHECK: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[MV3]](s128)
; CHECK: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
; CHECK: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[TRUNC2]], [[C14]]
; CHECK: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[C14]], [[TRUNC2]]
; CHECK: [[C15:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC2]](s32), [[C14]]
; CHECK: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC2]](s32), [[C15]]
; CHECK: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[OR5]], [[TRUNC2]](s32)
; CHECK: [[SHL7:%[0-9]+]]:_(s64) = G_SHL [[OR5]], [[TRUNC2]](s32)
; CHECK: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[OR4]], [[SUB5]](s32)
; CHECK: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL7]], [[LSHR2]]
; CHECK: [[C16:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[SHL8:%[0-9]+]]:_(s64) = G_SHL [[OR4]], [[SUB4]](s32)
; CHECK: [[SELECT6:%[0-9]+]]:_(s64) = G_SELECT [[ICMP4]](s1), [[SHL6]], [[C16]]
; CHECK: [[SELECT7:%[0-9]+]]:_(s64) = G_SELECT [[ICMP4]](s1), [[OR6]], [[SHL8]]
; CHECK: [[SELECT8:%[0-9]+]]:_(s64) = G_SELECT [[ICMP5]](s1), [[OR5]], [[SELECT7]]
; CHECK: [[OR7:%[0-9]+]]:_(s64) = G_OR [[OR4]], [[SELECT6]]
; CHECK: [[OR8:%[0-9]+]]:_(s64) = G_OR [[OR5]], [[SELECT8]]
; CHECK: [[C17:%[0-9]+]]:_(s64) = G_CONSTANT i64 60
; CHECK: [[C18:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[MV4:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[C17]](s64), [[C18]](s64)
; CHECK: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[MV4]](s128)
; CHECK: [[C19:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
; CHECK: [[SUB6:%[0-9]+]]:_(s32) = G_SUB [[TRUNC3]], [[C19]]
; CHECK: [[SUB7:%[0-9]+]]:_(s32) = G_SUB [[C19]], [[TRUNC3]]
; CHECK: [[C20:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[ICMP6:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC3]](s32), [[C19]]
; CHECK: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC3]](s32), [[C20]]
; CHECK: [[SHL9:%[0-9]+]]:_(s64) = G_SHL [[OR8]], [[TRUNC3]](s32)
; CHECK: [[SHL10:%[0-9]+]]:_(s64) = G_SHL [[OR8]], [[TRUNC3]](s32)
; CHECK: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[OR7]], [[SUB7]](s32)
; CHECK: [[OR9:%[0-9]+]]:_(s64) = G_OR [[SHL10]], [[LSHR3]]
; CHECK: [[C21:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[SHL11:%[0-9]+]]:_(s64) = G_SHL [[OR7]], [[SUB6]](s32)
; CHECK: [[SELECT9:%[0-9]+]]:_(s64) = G_SELECT [[ICMP6]](s1), [[SHL9]], [[C21]]
; CHECK: [[SELECT10:%[0-9]+]]:_(s64) = G_SELECT [[ICMP6]](s1), [[OR9]], [[SHL11]]
; CHECK: [[SELECT11:%[0-9]+]]:_(s64) = G_SELECT [[ICMP7]](s1), [[OR8]], [[SELECT10]]
; CHECK: [[OR10:%[0-9]+]]:_(s64) = G_OR [[OR7]], [[SELECT9]]
; CHECK: [[OR11:%[0-9]+]]:_(s64) = G_OR [[OR8]], [[SELECT11]]
; CHECK: [[C22:%[0-9]+]]:_(s64) = G_CONSTANT i64 75
; CHECK: [[C23:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[MV5:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[C22]](s64), [[C23]](s64)
; CHECK: [[TRUNC4:%[0-9]+]]:_(s32) = G_TRUNC [[MV5]](s128)
; CHECK: [[C24:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
; CHECK: [[SUB8:%[0-9]+]]:_(s32) = G_SUB [[TRUNC4]], [[C24]]
; CHECK: [[SUB9:%[0-9]+]]:_(s32) = G_SUB [[C24]], [[TRUNC4]]
; CHECK: [[C25:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[ICMP8:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC4]](s32), [[C24]]
; CHECK: [[ICMP9:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC4]](s32), [[C25]]
; CHECK: [[SHL12:%[0-9]+]]:_(s64) = G_SHL [[OR11]], [[TRUNC4]](s32)
; CHECK: [[SHL13:%[0-9]+]]:_(s64) = G_SHL [[OR11]], [[TRUNC4]](s32)
; CHECK: [[LSHR4:%[0-9]+]]:_(s64) = G_LSHR [[OR10]], [[SUB9]](s32)
; CHECK: [[OR12:%[0-9]+]]:_(s64) = G_OR [[SHL13]], [[LSHR4]]
; CHECK: [[C26:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[SHL14:%[0-9]+]]:_(s64) = G_SHL [[OR10]], [[SUB8]](s32)
; CHECK: [[SELECT12:%[0-9]+]]:_(s64) = G_SELECT [[ICMP8]](s1), [[SHL12]], [[C26]]
; CHECK: [[SELECT13:%[0-9]+]]:_(s64) = G_SELECT [[ICMP8]](s1), [[OR12]], [[SHL14]]
; CHECK: [[SELECT14:%[0-9]+]]:_(s64) = G_SELECT [[ICMP9]](s1), [[OR11]], [[SELECT13]]
; CHECK: [[OR13:%[0-9]+]]:_(s64) = G_OR [[OR10]], [[SELECT12]]
; CHECK: [[OR14:%[0-9]+]]:_(s64) = G_OR [[OR11]], [[SELECT14]]
; CHECK: [[C27:%[0-9]+]]:_(s64) = G_CONSTANT i64 90
; CHECK: [[C28:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[MV6:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[C27]](s64), [[C28]](s64)
; CHECK: [[TRUNC5:%[0-9]+]]:_(s32) = G_TRUNC [[MV6]](s128)
; CHECK: [[C29:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
; CHECK: [[SUB10:%[0-9]+]]:_(s32) = G_SUB [[TRUNC5]], [[C29]]
; CHECK: [[SUB11:%[0-9]+]]:_(s32) = G_SUB [[C29]], [[TRUNC5]]
; CHECK: [[C30:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[ICMP10:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC5]](s32), [[C29]]
; CHECK: [[ICMP11:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC5]](s32), [[C30]]
; CHECK: [[SHL15:%[0-9]+]]:_(s64) = G_SHL [[OR14]], [[TRUNC5]](s32)
; CHECK: [[SHL16:%[0-9]+]]:_(s64) = G_SHL [[OR14]], [[TRUNC5]](s32)
; CHECK: [[LSHR5:%[0-9]+]]:_(s64) = G_LSHR [[OR13]], [[SUB11]](s32)
; CHECK: [[OR15:%[0-9]+]]:_(s64) = G_OR [[SHL16]], [[LSHR5]]
; CHECK: [[C31:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[SHL17:%[0-9]+]]:_(s64) = G_SHL [[OR13]], [[SUB10]](s32)
; CHECK: [[SELECT15:%[0-9]+]]:_(s64) = G_SELECT [[ICMP10]](s1), [[SHL15]], [[C31]]
; CHECK: [[SELECT16:%[0-9]+]]:_(s64) = G_SELECT [[ICMP10]](s1), [[OR15]], [[SHL17]]
; CHECK: [[SELECT17:%[0-9]+]]:_(s64) = G_SELECT [[ICMP11]](s1), [[OR14]], [[SELECT16]]
; CHECK: [[OR16:%[0-9]+]]:_(s64) = G_OR [[OR13]], [[SELECT15]]
; CHECK: [[OR17:%[0-9]+]]:_(s64) = G_OR [[OR14]], [[SELECT17]]
; CHECK: [[C32:%[0-9]+]]:_(s64) = G_CONSTANT i64 105
; CHECK: [[C33:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[MV7:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[C32]](s64), [[C33]](s64)
; CHECK: [[TRUNC6:%[0-9]+]]:_(s32) = G_TRUNC [[MV7]](s128)
; CHECK: [[C34:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
; CHECK: [[SUB12:%[0-9]+]]:_(s32) = G_SUB [[TRUNC6]], [[C34]]
; CHECK: [[SUB13:%[0-9]+]]:_(s32) = G_SUB [[C34]], [[TRUNC6]]
; CHECK: [[C35:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[ICMP12:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC6]](s32), [[C34]]
; CHECK: [[ICMP13:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC6]](s32), [[C35]]
; CHECK: [[SHL18:%[0-9]+]]:_(s64) = G_SHL [[OR17]], [[TRUNC6]](s32)
; CHECK: [[SHL19:%[0-9]+]]:_(s64) = G_SHL [[OR17]], [[TRUNC6]](s32)
; CHECK: [[LSHR6:%[0-9]+]]:_(s64) = G_LSHR [[OR16]], [[SUB13]](s32)
; CHECK: [[OR18:%[0-9]+]]:_(s64) = G_OR [[SHL19]], [[LSHR6]]
; CHECK: [[C36:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[SHL20:%[0-9]+]]:_(s64) = G_SHL [[OR16]], [[SUB12]](s32)
; CHECK: [[SELECT18:%[0-9]+]]:_(s64) = G_SELECT [[ICMP12]](s1), [[SHL18]], [[C36]]
; CHECK: [[SELECT19:%[0-9]+]]:_(s64) = G_SELECT [[ICMP12]](s1), [[OR18]], [[SHL20]]
; CHECK: [[SELECT20:%[0-9]+]]:_(s64) = G_SELECT [[ICMP13]](s1), [[OR17]], [[SELECT19]]
; CHECK: [[OR19:%[0-9]+]]:_(s64) = G_OR [[OR16]], [[SELECT18]]
; CHECK: [[OR20:%[0-9]+]]:_(s64) = G_OR [[OR17]], [[SELECT20]]
; CHECK: [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16), [[UV8:%[0-9]+]]:_(s16), [[UV9:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[OR19]](s64)
; CHECK: [[UV10:%[0-9]+]]:_(s16), [[UV11:%[0-9]+]]:_(s16), [[UV12:%[0-9]+]]:_(s16), [[UV13:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[OR20]](s64)
; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV6]](s16)
; CHECK: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV7]](s16)
; CHECK: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV8]](s16)
; CHECK: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[UV9]](s16)
; CHECK: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[UV10]](s16)
; CHECK: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[UV11]](s16)
; CHECK: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[UV12]](s16)
; CHECK: [[ANYEXT8:%[0-9]+]]:_(s32) = G_ANYEXT [[UV13]](s16)
; CHECK: $vgpr0 = COPY [[ANYEXT1]](s32)
; CHECK: $vgpr1 = COPY [[ANYEXT2]](s32)
; CHECK: $vgpr2 = COPY [[ANYEXT3]](s32)
; CHECK: $vgpr3 = COPY [[ANYEXT4]](s32)
; CHECK: $vgpr4 = COPY [[ANYEXT5]](s32)
; CHECK: $vgpr5 = COPY [[ANYEXT6]](s32)
; CHECK: $vgpr6 = COPY [[ANYEXT7]](s32)
; CHECK: $vgpr7 = COPY [[ANYEXT8]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s8) = G_TRUNC %0
%2:_(s1), %3:_(s1), %4:_(s1), %5:_(s1), %6:_(s1), %7:_(s1), %8:_(s1), %9:_(s1) = G_UNMERGE_VALUES %1