1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-26 04:32:44 +01:00

[X86] X86::CMOV to Branch heuristic based optimization.

LLVM compiler recognizes opportunities to transform a branch into IR select instruction(s) - later it will be lowered into X86::CMOV instruction, assuming no other optimization eliminated the SelectInst.
However, it is not always profitable to emit X86::CMOV instruction. For example, branch is preferable over an X86::CMOV instruction when:
1. Branch is well predicted
2. Condition operand is expensive, compared to True-value and the False-value operands

In CodeGenPrepare pass there is a shallow optimization that tries to convert SelectInst into branch, but it is not enough.
This commit, implements machine optimization pass that converts X86::CMOV instruction(s) into branch, based on a conservative heuristic.

Differential Revision: https://reviews.llvm.org/D34769

llvm-svn: 308142
This commit is contained in:
Amjad Aboud 2017-07-16 17:39:56 +00:00
parent c153f3743a
commit dec4b2327f
8 changed files with 998 additions and 21 deletions

View File

@ -37,6 +37,7 @@ endif()
set(sources
X86AsmPrinter.cpp
X86CallFrameOptimization.cpp
X86CmovConversion.cpp
X86ExpandPseudo.cpp
X86FastISel.cpp
X86FixupBWInsts.cpp

View File

@ -83,6 +83,9 @@ FunctionPass *createX86WinEHStatePass();
/// the MachineInstr to MC.
FunctionPass *createX86ExpandPseudoPass();
/// This pass converts X86 cmov instructions into branch when profitable.
FunctionPass *createX86CmovConverterPass();
/// Return a Machine IR pass that selectively replaces
/// certain byte and word instructions by equivalent 32 bit instructions,
/// in order to eliminate partial register usage, false dependences on

View File

@ -0,0 +1,611 @@
//====-- X86CmovConversion.cpp - Convert Cmov to Branch -------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
/// This file implements a pass that converts X86 cmov instructions into branch
/// when profitable. This pass is conservative, i.e., it applies transformation
/// if and only if it can gaurantee a gain with high confidence.
///
/// Thus, the optimization applies under the following conditions:
/// 1. Consider as a candidate only CMOV in most inner loop, assuming that
/// most hotspots are represented by these loops.
/// 2. Given a group of CMOV instructions, that are using same EFLAGS def
/// instruction:
/// a. Consider them as candidates only if all have same code condition or
/// opposite one, to prevent generating more than one conditional jump
/// per EFLAGS def instruction.
/// b. Consider them as candidates only if all are profitable to be
/// converted, assuming that one bad conversion may casue a degradation.
/// 3. Apply conversion only for loop that are found profitable and only for
/// CMOV candidates that were found profitable.
/// a. Loop is considered profitable only if conversion will reduce its
/// depth cost by some thrishold.
/// b. CMOV is considered profitable if the cost of its condition is higher
/// than the average cost of its true-value and false-value by 25% of
/// branch-misprediction-penalty, this to assure no degredassion even
/// with 25% branch misprediction.
///
/// Note: This pass is assumed to run on SSA machine code.
//===----------------------------------------------------------------------===//
//
// External interfaces:
// FunctionPass *llvm::createX86CmovConverterPass();
// bool X86CmovConverterPass::runOnMachineFunction(MachineFunction &MF);
//
#include "X86.h"
#include "X86InstrInfo.h"
#include "X86Subtarget.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/TargetSchedule.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
#define DEBUG_TYPE "x86-cmov-converter"
STATISTIC(NumOfSkippedCmovGroups, "Number of unsupported CMOV-groups");
STATISTIC(NumOfCmovGroupCandidate, "Number of CMOV-group candidates");
STATISTIC(NumOfLoopCandidate, "Number of CMOV-conversion profitable loops");
STATISTIC(NumOfOptimizedCmovGroups, "Number of optimized CMOV-groups");
namespace {
// This internal switch can be used to turn off the cmov/branch optimization.
static cl::opt<bool>
EnableCmovConverter("x86-cmov-converter",
cl::desc("Enable the X86 cmov-to-branch optimization."),
cl::init(true), cl::Hidden);
/// Converts X86 cmov instructions into branches when profitable.
class X86CmovConverterPass : public MachineFunctionPass {
public:
X86CmovConverterPass() : MachineFunctionPass(ID) {}
~X86CmovConverterPass() {}
StringRef getPassName() const override { return "X86 cmov Conversion"; }
bool runOnMachineFunction(MachineFunction &MF) override;
void getAnalysisUsage(AnalysisUsage &AU) const override;
private:
/// Pass identification, replacement for typeid.
static char ID;
const MachineRegisterInfo *MRI;
const TargetInstrInfo *TII;
TargetSchedModel TSchedModel;
/// List of consecutive CMOV instructions.
typedef SmallVector<MachineInstr *, 2> CmovGroup;
typedef SmallVector<CmovGroup, 2> CmovGroups;
/// Collect all CMOV-group-candidates in \p CurrLoop and update \p
/// CmovInstGroups accordingly.
///
/// \param CurrLoop Loop being processed.
/// \param CmovInstGroups List of consecutive CMOV instructions in CurrLoop.
/// \returns true iff it found any CMOV-group-candidate.
bool collectCmovCandidates(MachineLoop *CurrLoop, CmovGroups &CmovInstGroups);
/// Check if it is profitable to transform each CMOV-group-candidates into
/// branch. Remove all groups that are not profitable from \p CmovInstGroups.
///
/// \param CurrLoop Loop being processed.
/// \param CmovInstGroups List of consecutive CMOV instructions in CurrLoop.
/// \returns true iff any CMOV-group-candidate remain.
bool checkForProfitableCmovCandidates(MachineLoop *CurrLoop,
CmovGroups &CmovInstGroups);
/// Convert the given list of consecutive CMOV instructions into a branch.
///
/// \param Group Consecutive CMOV instructions to be converted into branch.
void convertCmovInstsToBranches(SmallVectorImpl<MachineInstr *> &Group) const;
};
char X86CmovConverterPass::ID = 0;
void X86CmovConverterPass::getAnalysisUsage(AnalysisUsage &AU) const {
MachineFunctionPass::getAnalysisUsage(AU);
AU.addRequired<MachineLoopInfo>();
}
bool X86CmovConverterPass::runOnMachineFunction(MachineFunction &MF) {
if (skipFunction(*MF.getFunction()))
return false;
if (!EnableCmovConverter)
return false;
DEBUG(dbgs() << "********** " << getPassName() << " : " << MF.getName()
<< "**********\n");
bool Changed = false;
MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
const TargetSubtargetInfo &STI = MF.getSubtarget();
MRI = &MF.getRegInfo();
TII = STI.getInstrInfo();
TSchedModel.init(STI.getSchedModel(), &STI, TII);
//===--------------------------------------------------------------------===//
// Algorithm
// ---------
// For each inner most loop
// collectCmovCandidates() {
// Find all CMOV-group-candidates.
// }
//
// checkForProfitableCmovCandidates() {
// * Calculate both loop-depth and optimized-loop-depth.
// * Use these depth to check for loop transformation profitability.
// * Check for CMOV-group-candidate transformation profitability.
// }
//
// For each profitable CMOV-group-candidate
// convertCmovInstsToBranches() {
// * Create FalseBB, SinkBB, Conditional branch to SinkBB.
// * Replace each CMOV instruction with a PHI instruction in SinkBB.
// }
//
// Note: For more details, see each function description.
//===--------------------------------------------------------------------===//
for (MachineBasicBlock &MBB : MF) {
MachineLoop *CurrLoop = MLI.getLoopFor(&MBB);
// Optimize only inner most loops.
if (!CurrLoop || CurrLoop->getHeader() != &MBB ||
!CurrLoop->getSubLoops().empty())
continue;
// List of consecutive CMOV instructions to be processed.
CmovGroups CmovInstGroups;
if (!collectCmovCandidates(CurrLoop, CmovInstGroups))
continue;
if (!checkForProfitableCmovCandidates(CurrLoop, CmovInstGroups))
continue;
Changed = true;
for (auto &Group : CmovInstGroups)
convertCmovInstsToBranches(Group);
}
return Changed;
}
bool X86CmovConverterPass::collectCmovCandidates(MachineLoop *CurrLoop,
CmovGroups &CmovInstGroups) {
//===--------------------------------------------------------------------===//
// Collect all CMOV-group-candidates and add them into CmovInstGroups.
//
// CMOV-group:
// CMOV instructions, in same MBB, that uses same EFLAGS def instruction.
//
// CMOV-group-candidate:
// CMOV-group where all the CMOV instructions are
// 1. consecutive.
// 2. have same condition code or opposite one.
// 3. have only operand registers (X86::CMOVrr).
//===--------------------------------------------------------------------===//
// List of possible improvement (TODO's):
// --------------------------------------
// TODO: Add support for X86::CMOVrm instructions.
// TODO: Add support for X86::SETcc instructions.
// TODO: Add support for CMOV-groups with non consecutive CMOV instructions.
//===--------------------------------------------------------------------===//
// Current processed CMOV-Group.
CmovGroup Group;
for (auto *MBB : CurrLoop->getBlocks()) {
Group.clear();
// Condition code of first CMOV instruction current processed range and its
// opposite condition code.
X86::CondCode FirstCC, FirstOppCC;
// Indicator of a non CMOVrr instruction in the current processed range.
bool FoundNonCMOVInst = false;
// Indicator for current processed CMOV-group if it should be skipped.
bool SkipGroup = false;
for (auto &I : *MBB) {
X86::CondCode CC = X86::getCondFromCMovOpc(I.getOpcode());
// Check if we found a X86::CMOVrr instruction.
if (CC != X86::COND_INVALID && !I.mayLoad()) {
if (Group.empty()) {
// We found first CMOV in the range, reset flags.
FirstCC = CC;
FirstOppCC = X86::GetOppositeBranchCondition(CC);
FoundNonCMOVInst = false;
SkipGroup = false;
}
Group.push_back(&I);
// Check if it is a non-consecutive CMOV instruction or it has different
// condition code than FirstCC or FirstOppCC.
if (FoundNonCMOVInst || (CC != FirstCC && CC != FirstOppCC))
// Mark the SKipGroup indicator to skip current processed CMOV-Group.
SkipGroup = true;
continue;
}
// If Group is empty, keep looking for first CMOV in the range.
if (Group.empty())
continue;
// We found a non X86::CMOVrr instruction.
FoundNonCMOVInst = true;
// Check if this instruction define EFLAGS, to determine end of processed
// range, as there would be no more instructions using current EFLAGS def.
if (I.definesRegister(X86::EFLAGS)) {
// Check if current processed CMOV-group should not be skipped and add
// it as a CMOV-group-candidate.
if (!SkipGroup)
CmovInstGroups.push_back(Group);
else
++NumOfSkippedCmovGroups;
Group.clear();
}
}
// End of basic block is considered end of range, check if current processed
// CMOV-group should not be skipped and add it as a CMOV-group-candidate.
if (Group.empty())
continue;
if (!SkipGroup)
CmovInstGroups.push_back(Group);
else
++NumOfSkippedCmovGroups;
}
NumOfCmovGroupCandidate += CmovInstGroups.size();
return !CmovInstGroups.empty();
}
/// \returns Depth of CMOV instruction as if it was converted into branch.
/// \param TrueOpDepth depth cost of CMOV true value operand.
/// \param FalseOpDepth depth cost of CMOV false value operand.
static unsigned getDepthOfOptCmov(unsigned TrueOpDepth, unsigned FalseOpDepth) {
//===--------------------------------------------------------------------===//
// With no info about branch weight, we assume 50% for each value operand.
// Thus, depth of optimized CMOV instruction is the rounded up average of
// its True-Operand-Value-Depth and False-Operand-Value-Depth.
//===--------------------------------------------------------------------===//
return (TrueOpDepth + FalseOpDepth + 1) / 2;
}
bool X86CmovConverterPass::checkForProfitableCmovCandidates(
MachineLoop *CurrLoop, CmovGroups &CmovInstGroups) {
struct DepthInfo {
/// Depth of original loop.
unsigned Depth;
/// Depth of optimized loop.
unsigned OptDepth;
};
/// Number of loop iterations to calculate depth for ?!
static const unsigned LoopIterations = 2;
DenseMap<MachineInstr *, DepthInfo> DepthMap;
DepthInfo LoopDepth[LoopIterations] = {{0, 0}, {0, 0}};
enum { PhyRegType = 0, VirRegType = 1, RegTypeNum = 2 };
/// For each register type maps the register to its last def instruction.
DenseMap<unsigned, MachineInstr *> RegDefMaps[RegTypeNum];
/// Maps register operand to its def instruction, which can be nullptr if it
/// is unknown (e.g., operand is defined outside the loop).
DenseMap<MachineOperand *, MachineInstr *> OperandToDefMap;
// Set depth of unknown instruction (i.e., nullptr) to zero.
DepthMap[nullptr] = {0, 0};
SmallPtrSet<MachineInstr *, 4> CmovInstructions;
for (auto &Group : CmovInstGroups)
CmovInstructions.insert(Group.begin(), Group.end());
//===--------------------------------------------------------------------===//
// Step 1: Calculate instruction depth and loop depth.
// Optimized-Loop:
// loop with CMOV-group-candidates converted into branches.
//
// Instruction-Depth:
// instruction latency + max operand depth.
// * For CMOV instruction in optimized loop the depth is calculated as:
// CMOV latency + getDepthOfOptCmov(True-Op-Depth, False-Op-depth)
// TODO: Find a better way to estimate the latency of the branch instruction
// rather than using the CMOV latency.
//
// Loop-Depth:
// max instruction depth of all instructions in the loop.
// Note: instruction with max depth represents the critical-path in the loop.
//
// Loop-Depth[i]:
// Loop-Depth calculated for first `i` iterations.
// Note: it is enough to calculate depth for up to two iterations.
//
// Depth-Diff[i]:
// Number of cycles saved in first 'i` iterations by optimizing the loop.
//===--------------------------------------------------------------------===//
for (unsigned I = 0; I < LoopIterations; ++I) {
DepthInfo &MaxDepth = LoopDepth[I];
for (auto *MBB : CurrLoop->getBlocks()) {
// Clear physical registers Def map.
RegDefMaps[PhyRegType].clear();
for (MachineInstr &MI : *MBB) {
unsigned MIDepth = 0;
unsigned MIDepthOpt = 0;
bool IsCMOV = CmovInstructions.count(&MI);
for (auto &MO : MI.uses()) {
// Checks for "isUse()" as "uses()" returns also implicit definitions.
if (!MO.isReg() || !MO.isUse())
continue;
unsigned Reg = MO.getReg();
auto &RDM = RegDefMaps[TargetRegisterInfo::isVirtualRegister(Reg)];
if (MachineInstr *DefMI = RDM.lookup(Reg)) {
OperandToDefMap[&MO] = DefMI;
DepthInfo Info = DepthMap.lookup(DefMI);
MIDepth = std::max(MIDepth, Info.Depth);
if (!IsCMOV)
MIDepthOpt = std::max(MIDepthOpt, Info.OptDepth);
}
}
if (IsCMOV)
MIDepthOpt = getDepthOfOptCmov(
DepthMap[OperandToDefMap.lookup(&MI.getOperand(1))].OptDepth,
DepthMap[OperandToDefMap.lookup(&MI.getOperand(2))].OptDepth);
// Iterates over all operands to handle implicit definitions as well.
for (auto &MO : MI.operands()) {
if (!MO.isReg() || !MO.isDef())
continue;
unsigned Reg = MO.getReg();
RegDefMaps[TargetRegisterInfo::isVirtualRegister(Reg)][Reg] = &MI;
}
unsigned Latency = TSchedModel.computeInstrLatency(&MI);
DepthMap[&MI] = {MIDepth += Latency, MIDepthOpt += Latency};
MaxDepth.Depth = std::max(MaxDepth.Depth, MIDepth);
MaxDepth.OptDepth = std::max(MaxDepth.OptDepth, MIDepthOpt);
}
}
}
unsigned Diff[LoopIterations] = {LoopDepth[0].Depth - LoopDepth[0].OptDepth,
LoopDepth[1].Depth - LoopDepth[1].OptDepth};
//===--------------------------------------------------------------------===//
// Step 2: Check if Loop worth to be optimized.
// Worth-Optimize-Loop:
// case 1: Diff[1] == Diff[0]
// Critical-path is iteration independent - there is no dependency
// of critical-path instructions on critical-path instructions of
// previous iteration.
// Thus, it is enough to check gain percent of 1st iteration -
// To be conservative, the optimized loop need to have a depth of
// 12.5% cycles less than original loop, per iteration.
//
// case 2: Diff[1] > Diff[0]
// Critical-path is iteration dependent - there is dependency of
// critical-path instructions on critical-path instructions of
// previous iteration.
// Thus, it is required to check the gradient of the gain - the
// change in Depth-Diff compared to the change in Loop-Depth between
// 1st and 2nd iterations.
// To be conservative, the gradient need to be at least 50%.
//
// If loop is not worth optimizing, remove all CMOV-group-candidates.
//===--------------------------------------------------------------------===//
bool WorthOptLoop = false;
if (Diff[1] == Diff[0])
WorthOptLoop = Diff[0] * 8 >= LoopDepth[0].Depth;
else if (Diff[1] > Diff[0])
WorthOptLoop =
(Diff[1] - Diff[0]) * 2 >= (LoopDepth[1].Depth - LoopDepth[0].Depth);
if (!WorthOptLoop)
return false;
++NumOfLoopCandidate;
//===--------------------------------------------------------------------===//
// Step 3: Check for each CMOV-group-candidate if it worth to be optimized.
// Worth-Optimize-Group:
// Iff it worths to optimize all CMOV instructions in the group.
//
// Worth-Optimize-CMOV:
// Predicted branch is faster than CMOV by the difference between depth of
// condition operand and depth of taken (predicted) value operand.
// To be conservative, the gain of such CMOV transformation should cover at
// at least 25% of branch-misprediction-penalty.
//===--------------------------------------------------------------------===//
unsigned MispredictPenalty = TSchedModel.getMCSchedModel()->MispredictPenalty;
CmovGroups TempGroups;
std::swap(TempGroups, CmovInstGroups);
for (auto &Group : TempGroups) {
bool WorthOpGroup = true;
for (auto *MI : Group) {
// Avoid CMOV instruction which value is used as a pointer to load from.
// This is another conservative check to avoid converting CMOV instruction
// used with tree-search like algorithm, where the branch is unpredicted.
auto UIs = MRI->use_instructions(MI->defs().begin()->getReg());
if (UIs.begin() != UIs.end() && ++UIs.begin() == UIs.end()) {
unsigned Op = UIs.begin()->getOpcode();
if (Op == X86::MOV64rm || Op == X86::MOV32rm) {
WorthOpGroup = false;
break;
}
}
unsigned CondCost =
DepthMap[OperandToDefMap.lookup(&MI->getOperand(3))].Depth;
unsigned ValCost = getDepthOfOptCmov(
DepthMap[OperandToDefMap.lookup(&MI->getOperand(1))].Depth,
DepthMap[OperandToDefMap.lookup(&MI->getOperand(2))].Depth);
if (ValCost > CondCost || (CondCost - ValCost) * 4 < MispredictPenalty) {
WorthOpGroup = false;
break;
}
}
if (WorthOpGroup)
CmovInstGroups.push_back(Group);
}
return !CmovInstGroups.empty();
}
static bool checkEFLAGSLive(MachineInstr *MI) {
if (MI->killsRegister(X86::EFLAGS))
return false;
// The EFLAGS operand of MI might be missing a kill marker.
// Figure out whether EFLAGS operand should LIVE after MI instruction.
MachineBasicBlock *BB = MI->getParent();
MachineBasicBlock::iterator ItrMI = MI;
// Scan forward through BB for a use/def of EFLAGS.
for (auto I = std::next(ItrMI), E = BB->end(); I != E; ++I) {
if (I->readsRegister(X86::EFLAGS))
return true;
if (I->definesRegister(X86::EFLAGS))
return false;
}
// We hit the end of the block, check whether EFLAGS is live into a successor.
for (auto I = BB->succ_begin(), E = BB->succ_end(); I != E; ++I) {
if ((*I)->isLiveIn(X86::EFLAGS))
return true;
}
return false;
}
void X86CmovConverterPass::convertCmovInstsToBranches(
SmallVectorImpl<MachineInstr *> &Group) const {
assert(!Group.empty() && "No CMOV instructions to convert");
++NumOfOptimizedCmovGroups;
// To convert a CMOVcc instruction, we actually have to insert the diamond
// control-flow pattern. The incoming instruction knows the destination vreg
// to set, the condition code register to branch on, the true/false values to
// select between, and a branch opcode to use.
// Before
// -----
// MBB:
// cond = cmp ...
// v1 = CMOVge t1, f1, cond
// v2 = CMOVlt t2, f2, cond
// v3 = CMOVge v1, f3, cond
//
// After
// -----
// MBB:
// cond = cmp ...
// jge %SinkMBB
//
// FalseMBB:
// jmp %SinkMBB
//
// SinkMBB:
// %v1 = phi[%f1, %FalseMBB], [%t1, %MBB]
// %v2 = phi[%t2, %FalseMBB], [%f2, %MBB] ; For CMOV with OppCC switch
// ; true-value with false-value
// %v3 = phi[%f3, %FalseMBB], [%t1, %MBB] ; Phi instruction cannot use
// ; previous Phi instruction result
MachineInstr &MI = *Group.front();
MachineInstr *LastCMOV = Group.back();
DebugLoc DL = MI.getDebugLoc();
X86::CondCode CC = X86::CondCode(X86::getCondFromCMovOpc(MI.getOpcode()));
X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
MachineBasicBlock *MBB = MI.getParent();
MachineFunction::iterator It = ++MBB->getIterator();
MachineFunction *F = MBB->getParent();
const BasicBlock *BB = MBB->getBasicBlock();
MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(BB);
MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(BB);
F->insert(It, FalseMBB);
F->insert(It, SinkMBB);
// If the EFLAGS register isn't dead in the terminator, then claim that it's
// live into the sink and copy blocks.
if (checkEFLAGSLive(LastCMOV)) {
FalseMBB->addLiveIn(X86::EFLAGS);
SinkMBB->addLiveIn(X86::EFLAGS);
}
// Transfer the remainder of BB and its successor edges to SinkMBB.
SinkMBB->splice(SinkMBB->begin(), MBB,
std::next(MachineBasicBlock::iterator(LastCMOV)), MBB->end());
SinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
// Add the false and sink blocks as its successors.
MBB->addSuccessor(FalseMBB);
MBB->addSuccessor(SinkMBB);
// Create the conditional branch instruction.
BuildMI(MBB, DL, TII->get(X86::GetCondBranchFromCond(CC))).addMBB(SinkMBB);
// Add the sink block to the false block successors.
FalseMBB->addSuccessor(SinkMBB);
MachineInstrBuilder MIB;
MachineBasicBlock::iterator MIItBegin = MachineBasicBlock::iterator(MI);
MachineBasicBlock::iterator MIItEnd =
std::next(MachineBasicBlock::iterator(LastCMOV));
MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin();
// As we are creating the PHIs, we have to be careful if there is more than
// one. Later CMOVs may reference the results of earlier CMOVs, but later
// PHIs have to reference the individual true/false inputs from earlier PHIs.
// That also means that PHI construction must work forward from earlier to
// later, and that the code must maintain a mapping from earlier PHI's
// destination registers, and the registers that went into the PHI.
DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable;
for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) {
unsigned DestReg = MIIt->getOperand(0).getReg();
unsigned Op1Reg = MIIt->getOperand(1).getReg();
unsigned Op2Reg = MIIt->getOperand(2).getReg();
// If this CMOV we are processing is the opposite condition from the jump we
// generated, then we have to swap the operands for the PHI that is going to
// be generated.
if (X86::getCondFromCMovOpc(MIIt->getOpcode()) == OppCC)
std::swap(Op1Reg, Op2Reg);
auto Op1Itr = RegRewriteTable.find(Op1Reg);
if (Op1Itr != RegRewriteTable.end())
Op1Reg = Op1Itr->second.first;
auto Op2Itr = RegRewriteTable.find(Op2Reg);
if (Op2Itr != RegRewriteTable.end())
Op2Reg = Op2Itr->second.second;
// SinkMBB:
// %Result = phi [ %FalseValue, FalseMBB ], [ %TrueValue, MBB ]
// ...
MIB = BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->get(X86::PHI), DestReg)
.addReg(Op1Reg)
.addMBB(FalseMBB)
.addReg(Op2Reg)
.addMBB(MBB);
(void)MIB;
DEBUG(dbgs() << "\tFrom: "; MIIt->dump());
DEBUG(dbgs() << "\tTo: "; MIB->dump());
// Add this PHI to the rewrite table.
RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg);
}
// Now remove the CMOV(s).
MBB->erase(MIItBegin, MIItEnd);
}
} // End anonymous namespace.
FunctionPass *llvm::createX86CmovConverterPass() {
return new X86CmovConverterPass();
}

View File

@ -375,6 +375,7 @@ bool X86PassConfig::addILPOpts() {
addPass(&EarlyIfConverterID);
if (EnableMachineCombinerPass)
addPass(&MachineCombinerID);
addPass(createX86CmovConverterPass());
return true;
}

View File

@ -1,4 +1,4 @@
; RUN: llc < %s -march=x86 -mattr=+cmov | FileCheck %s
; RUN: llc < %s -march=x86 -mattr=+cmov -x86-cmov-converter=false | FileCheck %s
;
; Test scheduling a multi-use compare. We should neither spill flags
; nor clone the compare.

View File

@ -9,32 +9,32 @@ define void @atomic_maxmin_i6432() {
; LINUX: [[LABEL:.LBB[0-9]+_[0-9]+]]
; LINUX: cmpl
; LINUX: sbbl
; LINUX: cmovne
; LINUX: cmovne
; LINUX: jne
; LINUX: jne
; LINUX: lock cmpxchg8b
; LINUX: jne [[LABEL]]
%2 = atomicrmw min i64* @sc64, i64 6 acquire
; LINUX: [[LABEL:.LBB[0-9]+_[0-9]+]]
; LINUX: cmpl
; LINUX: sbbl
; LINUX: cmovne
; LINUX: cmovne
; LINUX: jne
; LINUX: jne
; LINUX: lock cmpxchg8b
; LINUX: jne [[LABEL]]
%3 = atomicrmw umax i64* @sc64, i64 7 acquire
; LINUX: [[LABEL:.LBB[0-9]+_[0-9]+]]
; LINUX: cmpl
; LINUX: sbbl
; LINUX: cmovne
; LINUX: cmovne
; LINUX: jne
; LINUX: jne
; LINUX: lock cmpxchg8b
; LINUX: jne [[LABEL]]
%4 = atomicrmw umin i64* @sc64, i64 8 acquire
; LINUX: [[LABEL:.LBB[0-9]+_[0-9]+]]
; LINUX: cmpl
; LINUX: sbbl
; LINUX: cmovne
; LINUX: cmovne
; LINUX: jne
; LINUX: jne
; LINUX: lock cmpxchg8b
; LINUX: jne [[LABEL]]
ret void

View File

@ -167,14 +167,24 @@ define void @fetch_and_min(i128* %p, i128 %bits) {
; CHECK-NEXT: sbbq %rdx, %rcx
; CHECK-NEXT: setge %cl
; CHECK-NEXT: andb $1, %cl
; CHECK-NEXT: movq %rax, %rbx
; CHECK-NEXT: jne LBB5_3
; CHECK-NEXT: ## BB#2: ## %atomicrmw.start
; CHECK-NEXT: ## in Loop: Header=BB5_1 Depth=1
; CHECK-NEXT: movq %rsi, %rbx
; CHECK-NEXT: cmovneq %rax, %rbx
; CHECK-NEXT: LBB5_3: ## %atomicrmw.start
; CHECK-NEXT: ## in Loop: Header=BB5_1 Depth=1
; CHECK-NEXT: testb %cl, %cl
; CHECK-NEXT: movq %rdx, %rcx
; CHECK-NEXT: jne LBB5_5
; CHECK-NEXT: ## BB#4: ## %atomicrmw.start
; CHECK-NEXT: ## in Loop: Header=BB5_1 Depth=1
; CHECK-NEXT: movq %r8, %rcx
; CHECK-NEXT: cmovneq %rdx, %rcx
; CHECK-NEXT: LBB5_5: ## %atomicrmw.start
; CHECK-NEXT: ## in Loop: Header=BB5_1 Depth=1
; CHECK-NEXT: lock cmpxchg16b (%rdi)
; CHECK-NEXT: jne LBB5_1
; CHECK-NEXT: ## BB#2: ## %atomicrmw.end
; CHECK-NEXT: ## BB#6: ## %atomicrmw.end
; CHECK-NEXT: movq %rax, {{.*}}(%rip)
; CHECK-NEXT: movq %rdx, _var+{{.*}}(%rip)
; CHECK-NEXT: popq %rbx
@ -203,14 +213,24 @@ define void @fetch_and_max(i128* %p, i128 %bits) {
; CHECK-NEXT: sbbq %r8, %rcx
; CHECK-NEXT: setge %cl
; CHECK-NEXT: andb $1, %cl
; CHECK-NEXT: movq %rax, %rbx
; CHECK-NEXT: jne LBB6_3
; CHECK-NEXT: ## BB#2: ## %atomicrmw.start
; CHECK-NEXT: ## in Loop: Header=BB6_1 Depth=1
; CHECK-NEXT: movq %rsi, %rbx
; CHECK-NEXT: cmovneq %rax, %rbx
; CHECK-NEXT: LBB6_3: ## %atomicrmw.start
; CHECK-NEXT: ## in Loop: Header=BB6_1 Depth=1
; CHECK-NEXT: testb %cl, %cl
; CHECK-NEXT: movq %rdx, %rcx
; CHECK-NEXT: jne LBB6_5
; CHECK-NEXT: ## BB#4: ## %atomicrmw.start
; CHECK-NEXT: ## in Loop: Header=BB6_1 Depth=1
; CHECK-NEXT: movq %r8, %rcx
; CHECK-NEXT: cmovneq %rdx, %rcx
; CHECK-NEXT: LBB6_5: ## %atomicrmw.start
; CHECK-NEXT: ## in Loop: Header=BB6_1 Depth=1
; CHECK-NEXT: lock cmpxchg16b (%rdi)
; CHECK-NEXT: jne LBB6_1
; CHECK-NEXT: ## BB#2: ## %atomicrmw.end
; CHECK-NEXT: ## BB#6: ## %atomicrmw.end
; CHECK-NEXT: movq %rax, {{.*}}(%rip)
; CHECK-NEXT: movq %rdx, _var+{{.*}}(%rip)
; CHECK-NEXT: popq %rbx
@ -239,14 +259,24 @@ define void @fetch_and_umin(i128* %p, i128 %bits) {
; CHECK-NEXT: sbbq %rdx, %rcx
; CHECK-NEXT: setae %cl
; CHECK-NEXT: andb $1, %cl
; CHECK-NEXT: movq %rax, %rbx
; CHECK-NEXT: jne LBB7_3
; CHECK-NEXT: ## BB#2: ## %atomicrmw.start
; CHECK-NEXT: ## in Loop: Header=BB7_1 Depth=1
; CHECK-NEXT: movq %rsi, %rbx
; CHECK-NEXT: cmovneq %rax, %rbx
; CHECK-NEXT: LBB7_3: ## %atomicrmw.start
; CHECK-NEXT: ## in Loop: Header=BB7_1 Depth=1
; CHECK-NEXT: testb %cl, %cl
; CHECK-NEXT: movq %rdx, %rcx
; CHECK-NEXT: jne LBB7_5
; CHECK-NEXT: ## BB#4: ## %atomicrmw.start
; CHECK-NEXT: ## in Loop: Header=BB7_1 Depth=1
; CHECK-NEXT: movq %r8, %rcx
; CHECK-NEXT: cmovneq %rdx, %rcx
; CHECK-NEXT: LBB7_5: ## %atomicrmw.start
; CHECK-NEXT: ## in Loop: Header=BB7_1 Depth=1
; CHECK-NEXT: lock cmpxchg16b (%rdi)
; CHECK-NEXT: jne LBB7_1
; CHECK-NEXT: ## BB#2: ## %atomicrmw.end
; CHECK-NEXT: ## BB#6: ## %atomicrmw.end
; CHECK-NEXT: movq %rax, {{.*}}(%rip)
; CHECK-NEXT: movq %rdx, _var+{{.*}}(%rip)
; CHECK-NEXT: popq %rbx
@ -275,14 +305,24 @@ define void @fetch_and_umax(i128* %p, i128 %bits) {
; CHECK-NEXT: sbbq %rdx, %rcx
; CHECK-NEXT: setb %cl
; CHECK-NEXT: andb $1, %cl
; CHECK-NEXT: movq %rax, %rbx
; CHECK-NEXT: jne LBB8_3
; CHECK-NEXT: ## BB#2: ## %atomicrmw.start
; CHECK-NEXT: ## in Loop: Header=BB8_1 Depth=1
; CHECK-NEXT: movq %rsi, %rbx
; CHECK-NEXT: cmovneq %rax, %rbx
; CHECK-NEXT: LBB8_3: ## %atomicrmw.start
; CHECK-NEXT: ## in Loop: Header=BB8_1 Depth=1
; CHECK-NEXT: testb %cl, %cl
; CHECK-NEXT: movq %rdx, %rcx
; CHECK-NEXT: jne LBB8_5
; CHECK-NEXT: ## BB#4: ## %atomicrmw.start
; CHECK-NEXT: ## in Loop: Header=BB8_1 Depth=1
; CHECK-NEXT: movq %r8, %rcx
; CHECK-NEXT: cmovneq %rdx, %rcx
; CHECK-NEXT: LBB8_5: ## %atomicrmw.start
; CHECK-NEXT: ## in Loop: Header=BB8_1 Depth=1
; CHECK-NEXT: lock cmpxchg16b (%rdi)
; CHECK-NEXT: jne LBB8_1
; CHECK-NEXT: ## BB#2: ## %atomicrmw.end
; CHECK-NEXT: ## BB#6: ## %atomicrmw.end
; CHECK-NEXT: movq %rax, {{.*}}(%rip)
; CHECK-NEXT: movq %rdx, _var+{{.*}}(%rip)
; CHECK-NEXT: popq %rbx

View File

@ -0,0 +1,321 @@
; RUN: llc -mtriple=x86_64-pc-linux -x86-cmov-converter=true -verify-machineinstrs < %s | FileCheck %s
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; This test checks that x86-cmov-converter optimization transform CMOV
;; instruction into branches when it is profitable.
;; There are 5 cases below:
;; 1. CmovInCriticalPath:
;; CMOV depends on the condition and it is in the hot path.
;; Thus, it worths transforming.
;;
;; 2. CmovNotInCriticalPath:
;; similar test like in (1), just that CMOV is not in the hot path.
;; Thus, it does not worth transforming.
;;
;; 3. MaxIndex:
;; Maximum calculation algorithm that is looking for the max index,
;; calculating CMOV value is cheaper than calculating CMOV condition.
;; Thus, it worths transforming.
;;
;; 4. MaxValue:
;; Maximum calculation algorithm that is looking for the max value,
;; calculating CMOV value is not cheaper than calculating CMOV condition.
;; Thus, it does not worth transforming.
;;
;; 5. BinarySearch:
;; Usually, binary search CMOV is not predicted.
;; Thus, it does not worth transforming.
;;
;; Test was created using the following command line:
;; > clang -S -O2 -m64 -fno-vectorize -fno-unroll-loops -emit-llvm foo.c -o -
;; Where foo.c is:
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;void CmovInHotPath(int n, int a, int b, int *c, int *d) {
;; for (int i = 0; i < n; i++) {
;; int t = c[i];
;; if (c[i] * a > b)
;; t = 10;
;; c[i] = t;
;; }
;;}
;;
;;
;;void CmovNotInHotPath(int n, int a, int b, int *c, int *d) {
;; for (int i = 0; i < n; i++) {
;; int t = c[i];
;; if (c[i] * a > b)
;; t = 10;
;; c[i] = t;
;; d[i] /= b;
;; }
;;}
;;
;;
;;int MaxIndex(int n, int *a) {
;; int t = 0;
;; for (int i = 1; i < n; i++) {
;; if (a[i] > a[t])
;; t = i;
;; }
;; return a[t];
;;}
;;
;;
;;int MaxValue(int n, int *a) {
;; int t = a[0];
;; for (int i = 1; i < n; i++) {
;; if (a[i] > t)
;; t = a[i];
;; }
;; return t;
;;}
;;
;;typedef struct Node Node;
;;struct Node {
;; unsigned Val;
;; Node *Right;
;; Node *Left;
;;};
;;
;;unsigned BinarySearch(unsigned Mask, Node *Curr, Node *Next) {
;; while (Curr->Val > Next->Val) {
;; Curr = Next;
;; if (Mask & (0x1 << Curr->Val))
;; Next = Curr->Right;
;; else
;; Next = Curr->Left;
;; }
;; return Curr->Val;
;;}
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
%struct.Node = type { i32, %struct.Node*, %struct.Node* }
; CHECK-LABEL: CmovInHotPath
; CHECK-NOT: cmov
; CHECK: jg
define void @CmovInHotPath(i32 %n, i32 %a, i32 %b, i32* nocapture %c, i32* nocapture readnone %d) #0 {
entry:
%cmp14 = icmp sgt i32 %n, 0
br i1 %cmp14, label %for.body.preheader, label %for.cond.cleanup
for.body.preheader: ; preds = %entry
%wide.trip.count = zext i32 %n to i64
br label %for.body
for.cond.cleanup: ; preds = %for.body, %entry
ret void
for.body: ; preds = %for.body.preheader, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
%arrayidx = getelementptr inbounds i32, i32* %c, i64 %indvars.iv
%0 = load i32, i32* %arrayidx, align 4
%mul = mul nsw i32 %0, %a
%cmp3 = icmp sgt i32 %mul, %b
%. = select i1 %cmp3, i32 10, i32 %0
store i32 %., i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count
br i1 %exitcond, label %for.cond.cleanup, label %for.body
}
; CHECK-LABEL: CmovNotInHotPath
; CHECK: cmovg
define void @CmovNotInHotPath(i32 %n, i32 %a, i32 %b, i32* nocapture %c, i32* nocapture %d) #0 {
entry:
%cmp18 = icmp sgt i32 %n, 0
br i1 %cmp18, label %for.body.preheader, label %for.cond.cleanup
for.body.preheader: ; preds = %entry
%wide.trip.count = zext i32 %n to i64
br label %for.body
for.cond.cleanup: ; preds = %for.body, %entry
ret void
for.body: ; preds = %for.body.preheader, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
%arrayidx = getelementptr inbounds i32, i32* %c, i64 %indvars.iv
%0 = load i32, i32* %arrayidx, align 4
%mul = mul nsw i32 %0, %a
%cmp3 = icmp sgt i32 %mul, %b
%. = select i1 %cmp3, i32 10, i32 %0
store i32 %., i32* %arrayidx, align 4
%arrayidx7 = getelementptr inbounds i32, i32* %d, i64 %indvars.iv
%1 = load i32, i32* %arrayidx7, align 4
%div = sdiv i32 %1, %b
store i32 %div, i32* %arrayidx7, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count
br i1 %exitcond, label %for.cond.cleanup, label %for.body
}
; CHECK-LABEL: MaxIndex
; CHECK-NOT: cmov
; CHECK: jg
define i32 @MaxIndex(i32 %n, i32* nocapture readonly %a) #0 {
entry:
%cmp14 = icmp sgt i32 %n, 1
br i1 %cmp14, label %for.body.preheader, label %for.cond.cleanup
for.body.preheader: ; preds = %entry
%wide.trip.count = zext i32 %n to i64
br label %for.body
for.cond.cleanup.loopexit: ; preds = %for.body
%phitmp = sext i32 %i.0.t.0 to i64
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
%t.0.lcssa = phi i64 [ 0, %entry ], [ %phitmp, %for.cond.cleanup.loopexit ]
%arrayidx5 = getelementptr inbounds i32, i32* %a, i64 %t.0.lcssa
%0 = load i32, i32* %arrayidx5, align 4
ret i32 %0
for.body: ; preds = %for.body.preheader, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 1, %for.body.preheader ]
%t.015 = phi i32 [ %i.0.t.0, %for.body ], [ 0, %for.body.preheader ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%1 = load i32, i32* %arrayidx, align 4
%idxprom1 = sext i32 %t.015 to i64
%arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %idxprom1
%2 = load i32, i32* %arrayidx2, align 4
%cmp3 = icmp sgt i32 %1, %2
%3 = trunc i64 %indvars.iv to i32
%i.0.t.0 = select i1 %cmp3, i32 %3, i32 %t.015
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count
br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body
}
; CHECK-LABEL: MaxValue
; CHECK-NOT: jg
; CHECK: cmovg
define i32 @MaxValue(i32 %n, i32* nocapture readonly %a) #0 {
entry:
%0 = load i32, i32* %a, align 4
%cmp13 = icmp sgt i32 %n, 1
br i1 %cmp13, label %for.body.preheader, label %for.cond.cleanup
for.body.preheader: ; preds = %entry
%wide.trip.count = zext i32 %n to i64
br label %for.body
for.cond.cleanup: ; preds = %for.body, %entry
%t.0.lcssa = phi i32 [ %0, %entry ], [ %.t.0, %for.body ]
ret i32 %t.0.lcssa
for.body: ; preds = %for.body.preheader, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 1, %for.body.preheader ]
%t.014 = phi i32 [ %.t.0, %for.body ], [ %0, %for.body.preheader ]
%arrayidx1 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%1 = load i32, i32* %arrayidx1, align 4
%cmp2 = icmp sgt i32 %1, %t.014
%.t.0 = select i1 %cmp2, i32 %1, i32 %t.014
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count
br i1 %exitcond, label %for.cond.cleanup, label %for.body
}
; CHECK-LABEL: BinarySearch
; CHECK: cmov
define i32 @BinarySearch(i32 %Mask, %struct.Node* nocapture readonly %Curr, %struct.Node* nocapture readonly %Next) #0 {
entry:
%Val8 = getelementptr inbounds %struct.Node, %struct.Node* %Curr, i64 0, i32 0
%0 = load i32, i32* %Val8, align 8
%Val19 = getelementptr inbounds %struct.Node, %struct.Node* %Next, i64 0, i32 0
%1 = load i32, i32* %Val19, align 8
%cmp10 = icmp ugt i32 %0, %1
br i1 %cmp10, label %while.body, label %while.end
while.body: ; preds = %entry, %while.body
%2 = phi i32 [ %4, %while.body ], [ %1, %entry ]
%Next.addr.011 = phi %struct.Node* [ %3, %while.body ], [ %Next, %entry ]
%shl = shl i32 1, %2
%and = and i32 %shl, %Mask
%tobool = icmp eq i32 %and, 0
%Left = getelementptr inbounds %struct.Node, %struct.Node* %Next.addr.011, i64 0, i32 2
%Right = getelementptr inbounds %struct.Node, %struct.Node* %Next.addr.011, i64 0, i32 1
%Left.sink = select i1 %tobool, %struct.Node** %Left, %struct.Node** %Right
%3 = load %struct.Node*, %struct.Node** %Left.sink, align 8
%Val1 = getelementptr inbounds %struct.Node, %struct.Node* %3, i64 0, i32 0
%4 = load i32, i32* %Val1, align 8
%cmp = icmp ugt i32 %2, %4
br i1 %cmp, label %while.body, label %while.end
while.end: ; preds = %while.body, %entry
%.lcssa = phi i32 [ %0, %entry ], [ %2, %while.body ]
ret i32 %.lcssa
}
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; The following test checks that x86-cmov-converter optimization transforms
;; CMOV instructions into branch correctly.
;;
;; MBB:
;; cond = cmp ...
;; v1 = CMOVgt t1, f1, cond
;; v2 = CMOVle s1, f2, cond
;;
;; Where: t1 = 11, f1 = 22, f2 = a
;;
;; After CMOV transformation
;; -------------------------
;; MBB:
;; cond = cmp ...
;; ja %SinkMBB
;;
;; FalseMBB:
;; jmp %SinkMBB
;;
;; SinkMBB:
;; %v1 = phi[%f1, %FalseMBB], [%t1, %MBB]
;; %v2 = phi[%f1, %FalseMBB], [%f2, %MBB] ; For CMOV with OppCC switch
;; ; true-value with false-value
;; ; Phi instruction cannot use
;; ; previous Phi instruction result
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; CHECK-LABEL: Transform
; CHECK-NOT: cmov
; CHECK: divl [[a:%[0-9a-z]*]]
; CHECK: cmpl [[a]], %eax
; CHECK: movl $11, [[s1:%[0-9a-z]*]]
; CHECK: movl [[a]], [[s2:%[0-9a-z]*]]
; CHECK: ja [[SinkBB:.*]]
; CHECK: [[FalseBB:.*]]:
; CHECK: movl $22, [[s1]]
; CHECK: movl $22, [[s2]]
; CHECK: [[SinkBB]]:
; CHECK: ja
define void @Transform(i32 *%arr, i32 *%arr2, i32 %a, i32 %b, i32 %c, i32 %n) #0 {
entry:
%cmp10 = icmp ugt i32 0, %n
br i1 %cmp10, label %while.body, label %while.end
while.body: ; preds = %entry, %while.body
%i = phi i32 [ %i_inc, %while.body ], [ 0, %entry ]
%arr_i = getelementptr inbounds i32, i32* %arr, i32 %i
%x = load i32, i32* %arr_i, align 4
%div = udiv i32 %x, %a
%cond = icmp ugt i32 %div, %a
%condOpp = icmp ule i32 %div, %a
%s1 = select i1 %cond, i32 11, i32 22
%s2 = select i1 %condOpp, i32 %s1, i32 %a
%sum = urem i32 %s1, %s2
store i32 %sum, i32* %arr_i, align 4
%i_inc = add i32 %i, 1
%cmp = icmp ugt i32 %i_inc, %n
br i1 %cmp, label %while.body, label %while.end
while.end: ; preds = %while.body, %entry
ret void
}
attributes #0 = {"target-cpu"="x86-64"}