1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 12:41:49 +01:00

Revert r331819 [GlobalISel][Legalizer] More concise and faster widenScalar, NFC

Reverting this to see if the clang-cmake-aarch64-global-isel and
clang-cmake-aarch64-quick bots are failing because of this commit

llvm-svn: 331839
This commit is contained in:
Roman Tereshin 2018-05-09 01:43:12 +00:00
parent cc9d8f83c8
commit 53a5f4ff4b
6 changed files with 249 additions and 175 deletions

View File

@ -93,18 +93,6 @@ public:
const LegalizerInfo &getLegalizerInfo() const { return LI; }
private:
/// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
/// Use by extending the operand's type to \p WideTy using the specified \p
/// ExtOpcode for the extension instruction, and replacing the vreg of the
/// operand in place.
void widenScalarSrc(MachineInstr &MI, LLT WideTy, unsigned OpIdx,
unsigned ExtOpcode);
/// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
/// Def by extending the operand's type to \p WideTy and truncating it back
/// with the \p TruncOpcode, and replacing the vreg of the operand in place.
void widenScalarDst(MachineInstr &MI, LLT WideTy, unsigned OpIdx = 0,
unsigned TruncOpcode = TargetOpcode::G_TRUNC);
/// Helper function to split a wide generic register into bitwise blocks with
/// the given Type (which implies the number of blocks needed). The generic

View File

@ -154,7 +154,6 @@ public:
/// \name Control where instructions we create are recorded (typically for
/// visiting again later during legalization).
/// @{
void recordInsertion(MachineInstr *InsertedInstr) const;
void recordInsertions(std::function<void(MachineInstr *)> InsertedInstr);
void stopRecordingInsertions();
/// @}

View File

@ -636,11 +636,6 @@ public:
Contents.ImmVal = immVal;
}
void setCImm(const ConstantInt *CI) {
assert(isCImm() && "Wrong MachineOperand mutator");
Contents.CI = CI;
}
void setFPImm(const ConstantFP *CFP) {
assert(isFPImm() && "Wrong MachineOperand mutator");
Contents.CFP = CFP;

View File

@ -591,22 +591,6 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
}
}
void LegalizerHelper::widenScalarSrc(MachineInstr &MI, LLT WideTy,
unsigned OpIdx, unsigned ExtOpcode) {
MachineOperand &MO = MI.getOperand(OpIdx);
auto ExtB = MIRBuilder.buildInstr(ExtOpcode, WideTy, MO.getReg());
MO.setReg(ExtB->getOperand(0).getReg());
}
void LegalizerHelper::widenScalarDst(MachineInstr &MI, LLT WideTy,
unsigned OpIdx, unsigned TruncOpcode) {
MachineOperand &MO = MI.getOperand(OpIdx);
unsigned DstExt = MRI.createGenericVirtualRegister(WideTy);
MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt());
MIRBuilder.buildInstr(TruncOpcode, MO.getReg(), DstExt);
MO.setReg(DstExt);
}
LegalizerHelper::LegalizeResult
LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
MIRBuilder.setInstr(MI);
@ -614,83 +598,140 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
switch (MI.getOpcode()) {
default:
return UnableToLegalize;
case TargetOpcode::G_ADD:
case TargetOpcode::G_AND:
case TargetOpcode::G_MUL:
case TargetOpcode::G_OR:
case TargetOpcode::G_XOR:
case TargetOpcode::G_SUB:
case TargetOpcode::G_SHL:
case TargetOpcode::G_SHL: {
// Perform operation at larger width (any extension is fine here, high bits
// don't affect the result) and then truncate the result back to the
// original type.
widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ANYEXT);
widenScalarDst(MI, WideTy);
MIRBuilder.recordInsertion(&MI);
return Legalized;
unsigned Src1Ext = MRI.createGenericVirtualRegister(WideTy);
unsigned Src2Ext = MRI.createGenericVirtualRegister(WideTy);
MIRBuilder.buildAnyExt(Src1Ext, MI.getOperand(1).getReg());
MIRBuilder.buildAnyExt(Src2Ext, MI.getOperand(2).getReg());
unsigned DstExt = MRI.createGenericVirtualRegister(WideTy);
MIRBuilder.buildInstr(MI.getOpcode())
.addDef(DstExt)
.addUse(Src1Ext)
.addUse(Src2Ext);
MIRBuilder.buildTrunc(MI.getOperand(0).getReg(), DstExt);
MI.eraseFromParent();
return Legalized;
}
case TargetOpcode::G_SDIV:
case TargetOpcode::G_SREM:
case TargetOpcode::G_ASHR:
widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_SEXT);
widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_SEXT);
widenScalarDst(MI, WideTy);
MIRBuilder.recordInsertion(&MI);
return Legalized;
case TargetOpcode::G_UDIV:
case TargetOpcode::G_SREM:
case TargetOpcode::G_UREM:
case TargetOpcode::G_LSHR:
widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ZEXT);
widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT);
widenScalarDst(MI, WideTy);
MIRBuilder.recordInsertion(&MI);
return Legalized;
case TargetOpcode::G_ASHR:
case TargetOpcode::G_LSHR: {
unsigned ExtOp = MI.getOpcode() == TargetOpcode::G_SDIV ||
MI.getOpcode() == TargetOpcode::G_SREM ||
MI.getOpcode() == TargetOpcode::G_ASHR
? TargetOpcode::G_SEXT
: TargetOpcode::G_ZEXT;
case TargetOpcode::G_SELECT:
unsigned LHSExt = MRI.createGenericVirtualRegister(WideTy);
MIRBuilder.buildInstr(ExtOp).addDef(LHSExt).addUse(
MI.getOperand(1).getReg());
unsigned RHSExt = MRI.createGenericVirtualRegister(WideTy);
MIRBuilder.buildInstr(ExtOp).addDef(RHSExt).addUse(
MI.getOperand(2).getReg());
unsigned ResExt = MRI.createGenericVirtualRegister(WideTy);
MIRBuilder.buildInstr(MI.getOpcode())
.addDef(ResExt)
.addUse(LHSExt)
.addUse(RHSExt);
MIRBuilder.buildTrunc(MI.getOperand(0).getReg(), ResExt);
MI.eraseFromParent();
return Legalized;
}
case TargetOpcode::G_SELECT: {
if (TypeIdx != 0)
return UnableToLegalize;
// Perform operation at larger width (any extension is fine here, high bits
// don't affect the result) and then truncate the result back to the
// original type.
widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ANYEXT);
widenScalarSrc(MI, WideTy, 3, TargetOpcode::G_ANYEXT);
widenScalarDst(MI, WideTy);
MIRBuilder.recordInsertion(&MI);
return Legalized;
unsigned Src1Ext = MRI.createGenericVirtualRegister(WideTy);
unsigned Src2Ext = MRI.createGenericVirtualRegister(WideTy);
MIRBuilder.buildAnyExt(Src1Ext, MI.getOperand(2).getReg());
MIRBuilder.buildAnyExt(Src2Ext, MI.getOperand(3).getReg());
unsigned DstExt = MRI.createGenericVirtualRegister(WideTy);
MIRBuilder.buildInstr(TargetOpcode::G_SELECT)
.addDef(DstExt)
.addReg(MI.getOperand(1).getReg())
.addUse(Src1Ext)
.addUse(Src2Ext);
MIRBuilder.buildTrunc(MI.getOperand(0).getReg(), DstExt);
MI.eraseFromParent();
return Legalized;
}
case TargetOpcode::G_FPTOSI:
case TargetOpcode::G_FPTOUI:
case TargetOpcode::G_FPTOUI: {
if (TypeIdx != 0)
return UnableToLegalize;
widenScalarDst(MI, WideTy);
MIRBuilder.recordInsertion(&MI);
return Legalized;
unsigned DstExt = MRI.createGenericVirtualRegister(WideTy);
MIRBuilder.buildInstr(MI.getOpcode())
.addDef(DstExt)
.addUse(MI.getOperand(1).getReg());
MIRBuilder.buildTrunc(MI.getOperand(0).getReg(), DstExt);
MI.eraseFromParent();
return Legalized;
}
case TargetOpcode::G_SITOFP:
case TargetOpcode::G_UITOFP: {
if (TypeIdx != 1)
return UnableToLegalize;
widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_SEXT);
MIRBuilder.recordInsertion(&MI);
return Legalized;
case TargetOpcode::G_UITOFP:
if (TypeIdx != 1)
return UnableToLegalize;
widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ZEXT);
MIRBuilder.recordInsertion(&MI);
return Legalized;
unsigned Src = MI.getOperand(1).getReg();
unsigned SrcExt = MRI.createGenericVirtualRegister(WideTy);
case TargetOpcode::G_INSERT:
if (MI.getOpcode() == TargetOpcode::G_SITOFP) {
MIRBuilder.buildSExt(SrcExt, Src);
} else {
assert(MI.getOpcode() == TargetOpcode::G_UITOFP && "Unexpected conv op");
MIRBuilder.buildZExt(SrcExt, Src);
}
MIRBuilder.buildInstr(MI.getOpcode())
.addDef(MI.getOperand(0).getReg())
.addUse(SrcExt);
MI.eraseFromParent();
return Legalized;
}
case TargetOpcode::G_INSERT: {
if (TypeIdx != 0)
return UnableToLegalize;
widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
widenScalarDst(MI, WideTy);
MIRBuilder.recordInsertion(&MI);
return Legalized;
unsigned Src = MI.getOperand(1).getReg();
unsigned SrcExt = MRI.createGenericVirtualRegister(WideTy);
MIRBuilder.buildAnyExt(SrcExt, Src);
unsigned DstExt = MRI.createGenericVirtualRegister(WideTy);
auto MIB = MIRBuilder.buildInsert(DstExt, SrcExt, MI.getOperand(2).getReg(),
MI.getOperand(3).getImm());
for (unsigned OpNum = 4; OpNum < MI.getNumOperands(); OpNum += 2) {
MIB.addReg(MI.getOperand(OpNum).getReg());
MIB.addImm(MI.getOperand(OpNum + 1).getImm());
}
MIRBuilder.buildTrunc(MI.getOperand(0).getReg(), DstExt);
MI.eraseFromParent();
return Legalized;
}
case TargetOpcode::G_LOAD:
// For some types like i24, we might try to widen to i32. To properly handle
// this we should be using a dedicated extending load, until then avoid
@ -700,109 +741,164 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
return UnableToLegalize;
LLVM_FALLTHROUGH;
case TargetOpcode::G_SEXTLOAD:
case TargetOpcode::G_ZEXTLOAD:
widenScalarDst(MI, WideTy);
MIRBuilder.recordInsertion(&MI);
case TargetOpcode::G_ZEXTLOAD: {
unsigned DstExt = MRI.createGenericVirtualRegister(WideTy);
MIRBuilder.buildLoadInstr(MI.getOpcode(), DstExt, MI.getOperand(1).getReg(),
**MI.memoperands_begin());
MIRBuilder.buildTrunc(MI.getOperand(0).getReg(), DstExt);
MI.eraseFromParent();
return Legalized;
}
case TargetOpcode::G_STORE: {
if (MRI.getType(MI.getOperand(0).getReg()) != LLT::scalar(1) ||
WideTy != LLT::scalar(8))
return UnableToLegalize;
const auto &TLI = *MIRBuilder.getMF().getSubtarget().getTargetLowering();
switch (TLI.getBooleanContents(false, false)) {
case TargetLoweringBase::ZeroOrNegativeOneBooleanContent:
widenScalarSrc(MI, WideTy, 0, TargetOpcode::G_SEXT);
break;
case TargetLoweringBase::ZeroOrOneBooleanContent:
widenScalarSrc(MI, WideTy, 0, TargetOpcode::G_ZEXT);
break;
default:
widenScalarSrc(MI, WideTy, 0, TargetOpcode::G_ANYEXT);
}
MIRBuilder.recordInsertion(&MI);
auto &TLI = *MIRBuilder.getMF().getSubtarget().getTargetLowering();
auto Content = TLI.getBooleanContents(false, false);
unsigned ExtOp = TargetOpcode::G_ANYEXT;
if (Content == TargetLoweringBase::ZeroOrOneBooleanContent)
ExtOp = TargetOpcode::G_ZEXT;
else if (Content == TargetLoweringBase::ZeroOrNegativeOneBooleanContent)
ExtOp = TargetOpcode::G_SEXT;
else
ExtOp = TargetOpcode::G_ANYEXT;
unsigned SrcExt = MRI.createGenericVirtualRegister(WideTy);
MIRBuilder.buildInstr(ExtOp).addDef(SrcExt).addUse(
MI.getOperand(0).getReg());
MIRBuilder.buildStore(SrcExt, MI.getOperand(1).getReg(),
**MI.memoperands_begin());
MI.eraseFromParent();
return Legalized;
}
case TargetOpcode::G_CONSTANT: {
MachineOperand &SrcMO = MI.getOperand(1);
LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext();
const APInt &Val = SrcMO.getCImm()->getValue().sext(WideTy.getSizeInBits());
SrcMO.setCImm(ConstantInt::get(Ctx, Val));
widenScalarDst(MI, WideTy);
MIRBuilder.recordInsertion(&MI);
unsigned DstExt = MRI.createGenericVirtualRegister(WideTy);
MIRBuilder.buildConstant(DstExt, *MI.getOperand(1).getCImm());
MIRBuilder.buildTrunc(MI.getOperand(0).getReg(), DstExt);
MI.eraseFromParent();
return Legalized;
}
case TargetOpcode::G_FCONSTANT: {
MachineOperand &SrcMO = MI.getOperand(1);
const ConstantFP *CFP = MI.getOperand(1).getFPImm();
APFloat Val = CFP->getValueAPF();
LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext();
APFloat Val = SrcMO.getFPImm()->getValueAPF();
auto LLT2Sem = [](LLT Ty) {
switch (Ty.getSizeInBits()) {
case 32:
return &APFloat::IEEEsingle();
break;
case 64:
return &APFloat::IEEEdouble();
break;
default:
llvm_unreachable("Unhandled fp widen type");
}
};
bool LosesInfo;
switch (WideTy.getSizeInBits()) {
case 32:
Val.convert(APFloat::IEEEsingle(), APFloat::rmTowardZero, &LosesInfo);
break;
case 64:
Val.convert(APFloat::IEEEdouble(), APFloat::rmTowardZero, &LosesInfo);
break;
default:
llvm_unreachable("Unhandled fp widen type");
}
SrcMO.setFPImm(ConstantFP::get(Ctx, Val));
widenScalarDst(MI, WideTy, 0, TargetOpcode::G_FPTRUNC);
MIRBuilder.recordInsertion(&MI);
Val.convert(*LLT2Sem(WideTy), APFloat::rmTowardZero, &LosesInfo);
auto Cst = MIRBuilder.buildFConstant(WideTy, *ConstantFP::get(Ctx, Val));
MIRBuilder.buildFPTrunc(MI.getOperand(0).getReg(), Cst);
MI.eraseFromParent();
return Legalized;
}
case TargetOpcode::G_BRCOND:
widenScalarSrc(MI, WideTy, 0, TargetOpcode::G_ANYEXT);
MIRBuilder.recordInsertion(&MI);
case TargetOpcode::G_BRCOND: {
unsigned TstExt = MRI.createGenericVirtualRegister(WideTy);
MIRBuilder.buildAnyExt(TstExt, MI.getOperand(0).getReg());
MIRBuilder.buildBrCond(TstExt, *MI.getOperand(1).getMBB());
MI.eraseFromParent();
return Legalized;
case TargetOpcode::G_FCMP:
if (TypeIdx == 0)
widenScalarDst(MI, WideTy);
else {
widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_FPEXT);
widenScalarSrc(MI, WideTy, 3, TargetOpcode::G_FPEXT);
}
case TargetOpcode::G_FCMP: {
unsigned Op0Ext, Op1Ext, DstReg;
unsigned Cmp1 = MI.getOperand(2).getReg();
unsigned Cmp2 = MI.getOperand(3).getReg();
if (TypeIdx == 0) {
Op0Ext = Cmp1;
Op1Ext = Cmp2;
DstReg = MRI.createGenericVirtualRegister(WideTy);
} else {
Op0Ext = MRI.createGenericVirtualRegister(WideTy);
Op1Ext = MRI.createGenericVirtualRegister(WideTy);
DstReg = MI.getOperand(0).getReg();
MIRBuilder.buildInstr(TargetOpcode::G_FPEXT, Op0Ext, Cmp1);
MIRBuilder.buildInstr(TargetOpcode::G_FPEXT, Op1Ext, Cmp2);
}
MIRBuilder.recordInsertion(&MI);
return Legalized;
case TargetOpcode::G_ICMP:
MIRBuilder.buildFCmp(
static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate()),
DstReg, Op0Ext, Op1Ext);
if (TypeIdx == 0)
widenScalarDst(MI, WideTy);
else {
unsigned ExtOpcode = CmpInst::isSigned(static_cast<CmpInst::Predicate>(
MI.getOperand(1).getPredicate()))
? TargetOpcode::G_SEXT
: TargetOpcode::G_ZEXT;
widenScalarSrc(MI, WideTy, 2, ExtOpcode);
widenScalarSrc(MI, WideTy, 3, ExtOpcode);
}
MIRBuilder.recordInsertion(&MI);
MIRBuilder.buildInstr(TargetOpcode::G_TRUNC, MI.getOperand(0).getReg(),
DstReg);
MI.eraseFromParent();
return Legalized;
case TargetOpcode::G_GEP:
}
case TargetOpcode::G_ICMP: {
bool IsSigned = CmpInst::isSigned(
static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate()));
unsigned Cmp1 = MI.getOperand(2).getReg();
unsigned Cmp2 = MI.getOperand(3).getReg();
unsigned Op0Ext, Op1Ext, DstReg;
if (TypeIdx == 0) {
Op0Ext = Cmp1;
Op1Ext = Cmp2;
DstReg = MRI.createGenericVirtualRegister(WideTy);
} else {
Op0Ext = MRI.createGenericVirtualRegister(WideTy);
Op1Ext = MRI.createGenericVirtualRegister(WideTy);
DstReg = MI.getOperand(0).getReg();
if (IsSigned) {
MIRBuilder.buildSExt(Op0Ext, Cmp1);
MIRBuilder.buildSExt(Op1Ext, Cmp2);
} else {
MIRBuilder.buildZExt(Op0Ext, Cmp1);
MIRBuilder.buildZExt(Op1Ext, Cmp2);
}
}
MIRBuilder.buildICmp(
static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate()),
DstReg, Op0Ext, Op1Ext);
if (TypeIdx == 0)
MIRBuilder.buildInstr(TargetOpcode::G_TRUNC, MI.getOperand(0).getReg(),
DstReg);
MI.eraseFromParent();
return Legalized;
}
case TargetOpcode::G_GEP: {
assert(TypeIdx == 1 && "unable to legalize pointer of GEP");
widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_SEXT);
MIRBuilder.recordInsertion(&MI);
unsigned OffsetExt = MRI.createGenericVirtualRegister(WideTy);
MIRBuilder.buildSExt(OffsetExt, MI.getOperand(2).getReg());
MI.getOperand(2).setReg(OffsetExt);
return Legalized;
}
case TargetOpcode::G_PHI: {
assert(TypeIdx == 0 && "Expecting only Idx 0");
for (unsigned I = 1; I < MI.getNumOperands(); I += 2) {
MachineBasicBlock &OpMBB = *MI.getOperand(I + 1).getMBB();
MIRBuilder.setInsertPt(OpMBB, OpMBB.getFirstTerminator());
widenScalarSrc(MI, WideTy, I, TargetOpcode::G_ANYEXT);
auto getExtendedReg = [&](unsigned Reg, MachineBasicBlock &MBB) {
auto FirstTermIt = MBB.getFirstTerminator();
MIRBuilder.setInsertPt(MBB, FirstTermIt);
MachineInstr *DefMI = MRI.getVRegDef(Reg);
MachineInstrBuilder MIB;
if (DefMI->getOpcode() == TargetOpcode::G_TRUNC)
MIB = MIRBuilder.buildAnyExtOrTrunc(WideTy,
DefMI->getOperand(1).getReg());
else
MIB = MIRBuilder.buildAnyExt(WideTy, Reg);
return MIB->getOperand(0).getReg();
};
auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, WideTy);
for (auto OpIt = MI.operands_begin() + 1, OpE = MI.operands_end();
OpIt != OpE;) {
unsigned Reg = OpIt++->getReg();
MachineBasicBlock *OpMBB = OpIt++->getMBB();
MIB.addReg(getExtendedReg(Reg, *OpMBB));
MIB.addMBB(OpMBB);
}
MachineBasicBlock &MBB = *MI.getParent();
MIRBuilder.setInsertPt(MBB, --MBB.getFirstNonPHI());
widenScalarDst(MI, WideTy);
MIRBuilder.recordInsertion(&MI);
auto *MBB = MI.getParent();
MIRBuilder.setInsertPt(*MBB, MBB->getFirstNonPHI());
MIRBuilder.buildTrunc(MI.getOperand(0).getReg(),
MIB->getOperand(0).getReg());
MI.eraseFromParent();
return Legalized;
}
}

View File

@ -53,11 +53,6 @@ void MachineIRBuilderBase::setInsertPt(MachineBasicBlock &MBB,
State.II = II;
}
void MachineIRBuilderBase::recordInsertion(MachineInstr *InsertedInstr) const {
if (State.InsertedInstr)
State.InsertedInstr(InsertedInstr);
}
void MachineIRBuilderBase::recordInsertions(
std::function<void(MachineInstr *)> Inserted) {
State.InsertedInstr = std::move(Inserted);
@ -82,7 +77,8 @@ MachineInstrBuilder MachineIRBuilderBase::buildInstrNoInsert(unsigned Opcode) {
MachineInstrBuilder MachineIRBuilderBase::insertInstr(MachineInstrBuilder MIB) {
getMBB().insert(getInsertPt(), MIB);
recordInsertion(MIB);
if (State.InsertedInstr)
State.InsertedInstr(MIB);
return MIB;
}

View File

@ -1,3 +1,4 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=aarch64-unknown-unknown -verify-machineinstrs -run-pass=legalizer %s -o - | FileCheck %s
--- |
; ModuleID = '/tmp/test.ll'
@ -295,7 +296,7 @@ body: |
; CHECK: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C1]](s32)
; CHECK: bb.1:
; CHECK: successors: %bb.1(0x40000000), %bb.2(0x40000000)
; CHECK: [[PHI:%[0-9]+]]:_(s16) = G_PHI [[TRUNC]](s16), %bb.0, [[TRUNC3:%[0-9]+]](s16), %bb.1
; CHECK: [[PHI:%[0-9]+]]:_(s16) = G_PHI [[TRUNC]](s16), %bb.0, %14(s16), %bb.1
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[PHI]](s16)
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[C]](s32)
; CHECK: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[ANYEXT]], [[COPY1]]
@ -305,7 +306,7 @@ body: |
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C2]]
; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ugt), [[AND]](s32), [[COPY]]
; CHECK: [[TRUNC2:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
; CHECK: [[TRUNC3]]:_(s16) = G_TRUNC [[ADD]](s32)
; CHECK: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[ADD]](s32)
; CHECK: G_BRCOND [[TRUNC2]](s1), %bb.1
; CHECK: bb.2:
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
@ -362,14 +363,14 @@ body: |
; CHECK: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C]](s32)
; CHECK: bb.1:
; CHECK: successors: %bb.1(0x40000000), %bb.2(0x40000000)
; CHECK: [[PHI:%[0-9]+]]:_(s16) = G_PHI [[TRUNC]](s16), %bb.0, [[COPY1:%[0-9]+]](s16), %bb.1
; CHECK: [[PHI:%[0-9]+]]:_(s16) = G_PHI [[TRUNC]](s16), %bb.0, %8(s16), %bb.1
; CHECK: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[PHI]](s16)
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[PHI]](s16)
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C1]]
; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ugt), [[AND]](s32), [[COPY]]
; CHECK: [[TRUNC2:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
; CHECK: [[COPY1]]:_(s16) = COPY [[PHI]](s16)
; CHECK: [[COPY1:%[0-9]+]]:_(s16) = COPY [[PHI]](s16)
; CHECK: G_BRCOND [[TRUNC2]](s1), %bb.1
; CHECK: bb.2:
; CHECK: $w0 = COPY [[AND]](s32)
@ -455,8 +456,8 @@ body: |
; CHECK: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[PHI1]](s16)
; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C5]]
; CHECK: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[AND]], [[AND1]]
; CHECK: $w0 = COPY [[ADD2]](s32)
; CHECK: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[C]]1, [[C]]2
; CHECK: $w0 = COPY [[C]]3(s32)
; CHECK: RET_ReallyLR implicit $w0
bb.0:
successors: %bb.1(0x40000000), %bb.2(0x40000000)
@ -531,7 +532,6 @@ body: |
; CHECK: successors: %bb.1(0x40000000), %bb.2(0x40000000)
; CHECK: liveins: $w0, $w1
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
@ -546,7 +546,7 @@ body: |
; CHECK: G_BR %bb.2
; CHECK: bb.1:
; CHECK: successors: %bb.2(0x40000000), %bb.1(0x40000000)
; CHECK: [[PHI:%[0-9]+]]:_(s16) = G_PHI [[TRUNC2]](s16), %bb.0, [[TRUNC5:%[0-9]+]](s16), %bb.1
; CHECK: [[PHI:%[0-9]+]]:_(s16) = G_PHI [[TRUNC2]](s16), %bb.0, [[C]]2(s16), %bb.1
; CHECK: [[TRUNC3:%[0-9]+]]:_(s8) = G_TRUNC [[PHI]](s16)
; CHECK: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[PHI]](s16)
@ -554,15 +554,15 @@ body: |
; CHECK: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[AND]], [[C2]]
; CHECK: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ugt), [[ADD1]](s32), [[C3]]
; CHECK: [[TRUNC4:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP1]](s32)
; CHECK: [[COPY2:%[0-9]+]]:_(s16) = COPY [[PHI]](s16)
; CHECK: [[TRUNC5]]:_(s16) = G_TRUNC [[C4]](s32)
; CHECK: [[COPY1:%[0-9]+]]:_(s16) = COPY [[PHI]](s16)
; CHECK: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[C4]](s32)
; CHECK: G_BRCOND [[TRUNC4]](s1), %bb.2
; CHECK: G_BR %bb.1
; CHECK: bb.2:
; CHECK: [[PHI1:%[0-9]+]]:_(s16) = G_PHI [[COPY2]](s16), %bb.1, [[TRUNC1]](s16), %bb.0
; CHECK: [[PHI1:%[0-9]+]]:_(s16) = G_PHI [[COPY1]](s16), %bb.1, [[TRUNC1]](s16), %bb.0
; CHECK: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[PHI1]](s16)
; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C6]]
; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[C]]8, [[C]]7
; CHECK: $w0 = COPY [[AND1]](s32)
; CHECK: RET_ReallyLR implicit $w0
bb.0: