From 30264d4391aaa0d45665e760e62e0fc9fc3ef01b Mon Sep 17 00:00:00 2001 From: Francis Visoiu Mistrih Date: Mon, 4 Dec 2017 17:18:51 +0000 Subject: [PATCH] [CodeGen] Unify MBB reference format in both MIR and debug output As part of the unification of the debug format and the MIR format, print MBB references as '%bb.5'. The MIR printer prints the IR name of a MBB only for block definitions. * find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" \) -type f -print0 | xargs -0 sed -i '' -E 's/BB#" << ([a-zA-Z0-9_]+)->getNumber\(\)/" << printMBBReference(*\1)/g' * find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" \) -type f -print0 | xargs -0 sed -i '' -E 's/BB#" << ([a-zA-Z0-9_]+)\.getNumber\(\)/" << printMBBReference(\1)/g' * find . \( -name "*.txt" -o -name "*.s" -o -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" \) -type f -print0 | xargs -0 sed -i '' -E 's/BB#([0-9]+)/%bb.\1/g' * grep -nr 'BB#' and fix Differential Revision: https://reviews.llvm.org/D40422 llvm-svn: 319665 --- docs/MIRLangRef.rst | 16 +- docs/NVPTXUsage.rst | 8 +- include/llvm/CodeGen/MachineBasicBlock.h | 9 + lib/CodeGen/AsmPrinter/AsmPrinter.cpp | 3 +- lib/CodeGen/BranchFolding.cpp | 35 +- lib/CodeGen/BranchRelaxation.cpp | 19 +- lib/CodeGen/EarlyIfConversion.cpp | 24 +- lib/CodeGen/EdgeBundles.cpp | 12 +- lib/CodeGen/ExecutionDepsFix.cpp | 4 +- lib/CodeGen/IfConversion.cpp | 31 +- lib/CodeGen/LiveDebugVariables.cpp | 4 +- lib/CodeGen/LiveIntervalAnalysis.cpp | 2 +- lib/CodeGen/LiveRangeCalc.cpp | 2 +- lib/CodeGen/MIRParser/MILexer.cpp | 3 + lib/CodeGen/MIRParser/MIParser.cpp | 2 + lib/CodeGen/MIRPrinter.cpp | 22 +- lib/CodeGen/MachineBasicBlock.cpp | 23 +- lib/CodeGen/MachineBlockPlacement.cpp | 2 +- lib/CodeGen/MachineBranchProbabilityInfo.cpp | 2 +- lib/CodeGen/MachineFunction.cpp | 4 +- lib/CodeGen/MachineLICM.cpp | 12 +- lib/CodeGen/MachineOperand.cpp | 2 +- lib/CodeGen/MachineScheduler.cpp | 29 +- lib/CodeGen/MachineSink.cpp | 46 +- lib/CodeGen/MachineTraceMetrics.cpp | 49 +- lib/CodeGen/MachineVerifier.cpp | 47 +- lib/CodeGen/PHIElimination.cpp | 6 +- lib/CodeGen/PostRASchedulerList.cpp | 4 +- lib/CodeGen/ProcessImplicitDefs.cpp | 2 +- lib/CodeGen/RegAllocGreedy.cpp | 2 +- lib/CodeGen/RegisterCoalescer.cpp | 10 +- lib/CodeGen/ScheduleDAGInstrs.cpp | 2 +- .../SelectionDAG/ScheduleDAGRRList.cpp | 5 +- lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp | 5 +- lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp | 65 +- lib/CodeGen/SlotIndexes.cpp | 2 +- lib/CodeGen/SplitKit.cpp | 36 +- lib/CodeGen/StackColoring.cpp | 2 +- lib/CodeGen/TailDuplicator.cpp | 23 +- .../AArch64/AArch64ConditionOptimizer.cpp | 2 +- .../AArch64/AArch64ConditionalCompares.cpp | 16 +- .../AArch64RedundantCopyElimination.cpp | 10 +- .../AMDGPU/AMDGPUMachineCFGStructurizer.cpp | 85 +- lib/Target/AMDGPU/GCNIterativeScheduler.cpp | 4 +- lib/Target/AMDGPU/GCNSchedStrategy.cpp | 5 +- lib/Target/AMDGPU/SIFixSGPRCopies.cpp | 25 +- lib/Target/AMDGPU/SIMachineScheduler.cpp | 10 +- lib/Target/AMDGPU/SIWholeQuadMode.cpp | 5 +- lib/Target/ARM/ARMConstantIslandPass.cpp | 32 +- lib/Target/ARM/ARMConstantPoolValue.cpp | 2 +- lib/Target/BPF/BPFISelDAGToDAG.cpp | 8 +- lib/Target/Hexagon/BitTracker.cpp | 15 +- lib/Target/Hexagon/HexagonBitSimplify.cpp | 2 +- .../Hexagon/HexagonConstPropagation.cpp | 31 +- lib/Target/Hexagon/HexagonEarlyIfConv.cpp | 32 +- lib/Target/Hexagon/HexagonExpandCondsets.cpp | 2 +- lib/Target/Hexagon/HexagonFrameLowering.cpp | 27 +- lib/Target/Hexagon/HexagonGenInsert.cpp | 2 +- lib/Target/Hexagon/HexagonHardwareLoops.cpp | 4 +- lib/Target/Hexagon/HexagonInstrInfo.cpp | 18 +- .../Hexagon/HexagonMachineScheduler.cpp | 14 +- lib/Target/Hexagon/HexagonOptAddrMode.cpp | 4 +- lib/Target/Hexagon/HexagonPeephole.cpp | 5 +- lib/Target/Hexagon/HexagonSplitDouble.cpp | 2 +- lib/Target/Hexagon/RDFGraph.cpp | 10 +- lib/Target/Hexagon/RDFGraph.h | 2 +- lib/Target/Hexagon/RDFLiveness.cpp | 4 +- lib/Target/MSP430/MSP430BranchSelector.cpp | 10 +- lib/Target/Mips/MipsConstantIslandPass.cpp | 30 +- lib/Target/PowerPC/PPCBranchCoalescing.cpp | 58 +- lib/Target/PowerPC/PPCCTRLoops.cpp | 19 +- lib/Target/PowerPC/PPCExpandISEL.cpp | 8 +- lib/Target/PowerPC/PPCMIPeephole.cpp | 7 +- lib/Target/PowerPC/PPCVSXSwapRemoval.cpp | 2 +- lib/Target/PowerPC/README.txt | 2 +- lib/Target/PowerPC/README_ALTIVEC.txt | 2 +- lib/Target/README.txt | 6 +- .../SystemZ/SystemZMachineScheduler.cpp | 8 +- .../WebAssemblyFixIrreducibleControlFlow.cpp | 3 +- lib/Target/X86/README.txt | 14 +- lib/Target/X86/X86FixupBWInsts.cpp | 13 +- lib/Target/X86/X86FloatingPoint.cpp | 4 +- .../AArch64/GlobalISel/arm64-irtranslator.ll | 94 +- .../GlobalISel/irtranslator-exceptions.ll | 6 +- .../AArch64/GlobalISel/legalize-exceptions.ll | 4 +- .../AArch64/GlobalISel/legalize-simple.mir | 10 +- .../GlobalISel/localizer-in-O0-pipeline.mir | 18 +- .../AArch64/aarch64-fix-cortex-a53-835769.ll | 4 +- test/CodeGen/AArch64/aarch64-stp-cluster.ll | 14 +- test/CodeGen/AArch64/analyze-branch.ll | 20 +- test/CodeGen/AArch64/arm64-ccmp.ll | 25 +- test/CodeGen/AArch64/arm64-fp128.ll | 4 +- test/CodeGen/AArch64/arm64-icmp-opt.ll | 2 +- .../AArch64/arm64-indexed-vector-ldst.ll | 2 +- test/CodeGen/AArch64/arm64-ldp-cluster.ll | 28 +- .../AArch64/arm64-misched-basic-A53.ll | 10 +- .../AArch64/arm64-misched-basic-A57.ll | 4 +- .../AArch64/arm64-misched-memdep-bug.ll | 2 +- test/CodeGen/AArch64/arm64-variadic-aapcs.ll | 2 +- test/CodeGen/AArch64/bics.ll | 6 +- test/CodeGen/AArch64/branch-relax-cbz.ll | 2 +- test/CodeGen/AArch64/fast-isel-assume.ll | 2 +- test/CodeGen/AArch64/fast-isel-atomic.ll | 48 +- test/CodeGen/AArch64/fast-isel-cmp-vec.ll | 24 +- test/CodeGen/AArch64/fast-isel-cmpxchg.ll | 8 +- test/CodeGen/AArch64/fcvt-int.ll | 4 +- test/CodeGen/AArch64/local_vars.ll | 2 +- test/CodeGen/AArch64/max-jump-table.ll | 8 +- test/CodeGen/AArch64/neon-bitcast.ll | 122 +- test/CodeGen/AArch64/nest-register.ll | 2 +- test/CodeGen/AArch64/recp-fastmath.ll | 28 +- test/CodeGen/AArch64/selectcc-to-shiftand.ll | 18 +- test/CodeGen/AArch64/sibling-call.ll | 4 +- test/CodeGen/AArch64/sqrt-fastmath.ll | 56 +- test/CodeGen/AArch64/tail-call.ll | 2 +- test/CodeGen/AMDGPU/branch-relaxation.ll | 8 +- test/CodeGen/AMDGPU/callee-frame-setup.ll | 8 +- test/CodeGen/AMDGPU/cf-loop-on-constant.ll | 2 +- .../AMDGPU/control-flow-fastregalloc.ll | 6 +- test/CodeGen/AMDGPU/convergent-inlineasm.ll | 2 +- test/CodeGen/AMDGPU/early-if-convert.ll | 2 +- test/CodeGen/AMDGPU/else.ll | 2 +- test/CodeGen/AMDGPU/fence-amdgiz.ll | 2 +- test/CodeGen/AMDGPU/i1-copy-implicit-def.ll | 2 +- test/CodeGen/AMDGPU/invert-br-undef-vcc.mir | 6 +- .../CodeGen/AMDGPU/llvm.amdgcn.buffer.load.ll | 10 +- .../AMDGPU/llvm.amdgcn.buffer.wbinvl1.ll | 2 +- .../AMDGPU/llvm.amdgcn.buffer.wbinvl1.sc.ll | 2 +- .../AMDGPU/llvm.amdgcn.buffer.wbinvl1.vol.ll | 2 +- test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pkrtz.ll | 2 +- test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll | 4 +- .../AMDGPU/llvm.amdgcn.s.dcache.inv.ll | 4 +- .../AMDGPU/llvm.amdgcn.s.dcache.inv.vol.ll | 4 +- .../CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.ll | 4 +- .../AMDGPU/llvm.amdgcn.s.dcache.wb.vol.ll | 4 +- test/CodeGen/AMDGPU/loop_break.ll | 4 +- .../AMDGPU/memory-legalizer-atomic-fence.ll | 40 +- test/CodeGen/AMDGPU/multilevel-break.ll | 2 +- .../AMDGPU/optimize-if-exec-masking.mir | 48 +- test/CodeGen/AMDGPU/ret_jump.ll | 2 +- test/CodeGen/AMDGPU/sgpr-control-flow.ll | 2 +- ...si-lower-control-flow-unreachable-block.ll | 2 +- test/CodeGen/AMDGPU/skip-if-dead.ll | 48 +- test/CodeGen/AMDGPU/smrd.ll | 8 +- test/CodeGen/AMDGPU/uniform-cfg.ll | 2 +- test/CodeGen/AMDGPU/valu-i1.ll | 2 +- .../AMDGPU/vccz-corrupt-bug-workaround.mir | 12 +- test/CodeGen/ARM/Windows/dbzchk.ll | 48 +- test/CodeGen/ARM/and-load-combine.ll | 168 +- test/CodeGen/ARM/arm-and-tst-peephole.ll | 10 +- test/CodeGen/ARM/atomic-ops-v8.ll | 8 +- test/CodeGen/ARM/bool-ext-inc.ll | 8 +- test/CodeGen/ARM/cmpxchg-weak.ll | 10 +- test/CodeGen/ARM/cortex-a57-misched-alu.ll | 2 +- test/CodeGen/ARM/cortex-a57-misched-basic.ll | 4 +- test/CodeGen/ARM/cortex-a57-misched-vadd.ll | 4 +- test/CodeGen/ARM/cortex-a57-misched-vfma.ll | 12 +- test/CodeGen/ARM/cortex-a57-misched-vsub.ll | 4 +- test/CodeGen/ARM/cortexr52-misched-basic.ll | 4 +- .../ARM/crash-on-pow2-shufflevector.ll | 2 +- test/CodeGen/ARM/deprecated-asm.s | 2 +- test/CodeGen/ARM/ifcvt-branch-weight-bug.ll | 4 +- test/CodeGen/ARM/ifcvt-branch-weight.ll | 4 +- test/CodeGen/ARM/ifcvt-iter-indbr.ll | 8 +- .../CodeGen/ARM/illegal-bitfield-loadstore.ll | 24 +- test/CodeGen/ARM/jump-table-tbh.ll | 4 +- test/CodeGen/ARM/machine-licm.ll | 2 +- test/CodeGen/ARM/misched-copy-arm.ll | 4 +- test/CodeGen/ARM/negate-i1.ll | 4 +- test/CodeGen/ARM/neon_vabs.ll | 26 +- test/CodeGen/ARM/nest-register.ll | 2 +- test/CodeGen/ARM/noopt-dmb-v7.ll | 2 +- test/CodeGen/ARM/select_const.ll | 52 +- test/CodeGen/ARM/setcc-logic.ll | 8 +- test/CodeGen/ARM/tail-merge-branch-weight.ll | 6 +- test/CodeGen/ARM/taildup-branch-weight.ll | 4 +- .../ARM/v8m.base-jumptable_alignment.ll | 2 +- test/CodeGen/ARM/vbits.ll | 118 +- test/CodeGen/ARM/vcvt.ll | 56 +- test/CodeGen/ARM/vext.ll | 42 +- test/CodeGen/ARM/vpadd.ll | 62 +- test/CodeGen/ARM/vtrn.ll | 44 +- test/CodeGen/ARM/vuzp.ll | 54 +- test/CodeGen/ARM/vzip.ll | 46 +- test/CodeGen/AVR/atomics/fence.ll | 2 +- .../AVR/select-must-add-unconditional-jump.ll | 24 +- test/CodeGen/Generic/MachineBranchProb.ll | 20 +- .../Hexagon/branch-folder-hoist-kills.mir | 2 +- test/CodeGen/Hexagon/hwloop-redef-imm.mir | 4 +- test/CodeGen/Hexagon/ifcvt-edge-weight.ll | 4 +- .../X86/frame-info-save-restore-points.mir | 18 +- .../MIR/X86/implicit-register-flag.mir | 6 +- test/CodeGen/MIR/X86/jump-table-info.mir | 18 +- .../MIR/X86/machine-basic-block-operands.mir | 6 +- test/CodeGen/MIR/X86/newline-handling.mir | 16 +- .../X86/successor-basic-blocks-weights.mir | 6 +- test/CodeGen/MSP430/BranchSelector.ll | 2 +- .../Mips/compactbranches/empty-block.mir | 6 +- test/CodeGen/Mips/lcb4a.ll | 2 +- test/CodeGen/Mips/prevent-hoisting.ll | 2 +- .../PowerPC/2006-07-07-ComputeMaskedBits.ll | 2 +- test/CodeGen/PowerPC/addegluecrash.ll | 2 +- test/CodeGen/PowerPC/andc.ll | 8 +- test/CodeGen/PowerPC/atomics-constant.ll | 2 +- test/CodeGen/PowerPC/atomics-regression.ll | 2276 +++++------ test/CodeGen/PowerPC/branch_coalesce.ll | 4 +- test/CodeGen/PowerPC/fabs.ll | 4 +- test/CodeGen/PowerPC/fma-aggr-FMF.ll | 4 +- test/CodeGen/PowerPC/fp64-to-int16.ll | 2 +- test/CodeGen/PowerPC/hello-reloc.s | 2 +- test/CodeGen/PowerPC/licm-remat.ll | 2 +- test/CodeGen/PowerPC/licm-tocReg.ll | 2 +- test/CodeGen/PowerPC/logic-ops-on-compares.ll | 12 +- test/CodeGen/PowerPC/machine-combiner.ll | 18 +- .../memCmpUsedInZeroEqualityComparison.ll | 28 +- test/CodeGen/PowerPC/memcmp.ll | 8 +- test/CodeGen/PowerPC/negate-i1.ll | 4 +- test/CodeGen/PowerPC/ppc32-nest.ll | 2 +- test/CodeGen/PowerPC/ppc64-nest.ll | 2 +- test/CodeGen/PowerPC/pr32140.ll | 6 +- test/CodeGen/PowerPC/pr33093.ll | 4 +- test/CodeGen/PowerPC/select-addrRegRegOnly.ll | 4 +- test/CodeGen/PowerPC/select_const.ll | 150 +- test/CodeGen/PowerPC/setcc-logic.ll | 72 +- test/CodeGen/PowerPC/setcc-to-sub.ll | 8 +- test/CodeGen/PowerPC/shift_mask.ll | 48 +- test/CodeGen/PowerPC/sjlj.ll | 2 +- .../PowerPC/tail-dup-branch-to-fallthrough.ll | 8 +- test/CodeGen/PowerPC/tail-dup-layout.ll | 20 +- test/CodeGen/PowerPC/testBitReverse.ll | 4 +- test/CodeGen/PowerPC/testComparesi32gtu.ll | 4 +- test/CodeGen/PowerPC/testComparesi32leu.ll | 2 +- test/CodeGen/PowerPC/testComparesi32ltu.ll | 4 +- test/CodeGen/PowerPC/testComparesieqsc.ll | 16 +- test/CodeGen/PowerPC/testComparesieqsi.ll | 16 +- test/CodeGen/PowerPC/testComparesieqsll.ll | 16 +- test/CodeGen/PowerPC/testComparesieqss.ll | 16 +- test/CodeGen/PowerPC/testComparesiequc.ll | 16 +- test/CodeGen/PowerPC/testComparesiequi.ll | 16 +- test/CodeGen/PowerPC/testComparesiequll.ll | 16 +- test/CodeGen/PowerPC/testComparesiequs.ll | 16 +- test/CodeGen/PowerPC/testComparesigesc.ll | 8 +- test/CodeGen/PowerPC/testComparesigesi.ll | 8 +- test/CodeGen/PowerPC/testComparesigesll.ll | 16 +- test/CodeGen/PowerPC/testComparesigess.ll | 8 +- test/CodeGen/PowerPC/testComparesigtsc.ll | 12 +- test/CodeGen/PowerPC/testComparesigtsi.ll | 12 +- test/CodeGen/PowerPC/testComparesigtsll.ll | 12 +- test/CodeGen/PowerPC/testComparesigtss.ll | 14 +- test/CodeGen/PowerPC/testComparesilesc.ll | 8 +- test/CodeGen/PowerPC/testComparesilesi.ll | 8 +- test/CodeGen/PowerPC/testComparesilesll.ll | 16 +- test/CodeGen/PowerPC/testComparesiless.ll | 8 +- test/CodeGen/PowerPC/testComparesiltsc.ll | 8 +- test/CodeGen/PowerPC/testComparesiltsi.ll | 10 +- test/CodeGen/PowerPC/testComparesiltsll.ll | 10 +- test/CodeGen/PowerPC/testComparesiltss.ll | 8 +- test/CodeGen/PowerPC/testComparesinesll.ll | 16 +- test/CodeGen/PowerPC/testComparesineuc.ll | 16 +- test/CodeGen/PowerPC/testComparesineull.ll | 16 +- test/CodeGen/PowerPC/testComparesineus.ll | 16 +- test/CodeGen/PowerPC/testCompareslleqsc.ll | 16 +- test/CodeGen/PowerPC/testCompareslleqsi.ll | 16 +- test/CodeGen/PowerPC/testCompareslleqsll.ll | 16 +- test/CodeGen/PowerPC/testCompareslleqss.ll | 16 +- test/CodeGen/PowerPC/testComparesllequc.ll | 16 +- test/CodeGen/PowerPC/testComparesllequi.ll | 16 +- test/CodeGen/PowerPC/testComparesllequll.ll | 16 +- test/CodeGen/PowerPC/testComparesllequs.ll | 16 +- test/CodeGen/PowerPC/testComparesllgesc.ll | 8 +- test/CodeGen/PowerPC/testComparesllgesi.ll | 8 +- test/CodeGen/PowerPC/testComparesllgesll.ll | 16 +- test/CodeGen/PowerPC/testComparesllgess.ll | 8 +- test/CodeGen/PowerPC/testComparesllgtsll.ll | 12 +- test/CodeGen/PowerPC/testComparesllgtus.ll | 16 +- test/CodeGen/PowerPC/testCompareslllesc.ll | 8 +- test/CodeGen/PowerPC/testCompareslllesi.ll | 8 +- test/CodeGen/PowerPC/testCompareslllesll.ll | 16 +- test/CodeGen/PowerPC/testComparesllless.ll | 8 +- test/CodeGen/PowerPC/testComparesllltsll.ll | 10 +- test/CodeGen/PowerPC/testComparesllltuc.ll | 8 +- test/CodeGen/PowerPC/testComparesllltui.ll | 16 +- test/CodeGen/PowerPC/testComparesllltus.ll | 6 +- test/CodeGen/PowerPC/testComparesllnesll.ll | 16 +- test/CodeGen/PowerPC/testComparesllneull.ll | 16 +- test/CodeGen/PowerPC/vec_add_sub_quadword.ll | 2 +- test/CodeGen/PowerPC/vec_extract_p9.ll | 36 +- test/CodeGen/PowerPC/vec_extract_p9_2.ll | 48 +- test/CodeGen/PowerPC/vec_int_ext.ll | 48 +- test/CodeGen/PowerPC/vec_revb.ll | 8 +- test/CodeGen/PowerPC/vselect-constants.ll | 28 +- test/CodeGen/RISCV/addc-adde-sube-subc.ll | 4 +- test/CodeGen/RISCV/alu32.ll | 38 +- test/CodeGen/RISCV/bare-select.ll | 4 +- test/CodeGen/RISCV/blockaddress.ll | 2 +- test/CodeGen/RISCV/branch.ll | 2 +- test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll | 30 +- test/CodeGen/RISCV/calls.ll | 12 +- test/CodeGen/RISCV/div.ll | 20 +- test/CodeGen/RISCV/i32-icmp.ll | 20 +- test/CodeGen/RISCV/imm.ll | 10 +- test/CodeGen/RISCV/indirectbr.ll | 4 +- test/CodeGen/RISCV/jumptable.ll | 2 +- test/CodeGen/RISCV/mem.ll | 24 +- test/CodeGen/RISCV/mul.ll | 12 +- test/CodeGen/RISCV/rem.ll | 4 +- test/CodeGen/RISCV/rotl-rotr.ll | 4 +- test/CodeGen/RISCV/select-cc.ll | 22 +- test/CodeGen/RISCV/sext-zext-trunc.ll | 60 +- test/CodeGen/RISCV/shifts.ll | 6 +- test/CodeGen/RISCV/wide-mem.ll | 4 +- test/CodeGen/SPARC/analyze-branch.ll | 4 +- test/CodeGen/SPARC/vector-extract-elt.ll | 2 +- test/CodeGen/SystemZ/DAGCombiner_isAlias.ll | 2 +- test/CodeGen/SystemZ/dag-combine-02.ll | 2 +- test/CodeGen/SystemZ/int-cmp-51.ll | 2 +- test/CodeGen/SystemZ/pr32372.ll | 2 +- test/CodeGen/SystemZ/pr32505.ll | 2 +- test/CodeGen/SystemZ/strcmp-01.ll | 6 +- test/CodeGen/SystemZ/strlen-01.ll | 4 +- .../SystemZ/vec-cmp-cmp-logic-select.ll | 84 +- test/CodeGen/SystemZ/vec-cmpsel.ll | 78 +- test/CodeGen/SystemZ/vec-trunc-to-i1.ll | 4 +- .../Thumb2/ifcvt-rescan-bug-2016-08-22.ll | 2 +- test/CodeGen/WebAssembly/dbgvalue.ll | 4 +- test/CodeGen/WebAssembly/signext-arg.ll | 2 +- test/CodeGen/X86/2006-01-19-ISelFoldingBug.ll | 2 +- test/CodeGen/X86/2006-03-01-InstrSchedBug.ll | 2 +- test/CodeGen/X86/2008-02-14-BitMiscompile.ll | 2 +- .../X86/2009-04-12-FastIselOverflowCrash.ll | 2 +- test/CodeGen/X86/2010-05-12-FastAllocKills.ll | 16 +- .../X86/2010-08-04-MaskedSignedCompare.ll | 4 +- test/CodeGen/X86/2011-10-19-widen_vselect.ll | 16 +- test/CodeGen/X86/2011-10-21-widen-cmp.ll | 6 +- ...011-12-26-extractelement-duplicate-load.ll | 4 +- test/CodeGen/X86/2011-12-8-bitcastintprom.ll | 4 +- test/CodeGen/X86/2011-20-21-zext-ui2fp.ll | 2 +- test/CodeGen/X86/2012-01-11-split-cv.ll | 2 +- test/CodeGen/X86/2012-01-12-extract-sv.ll | 2 +- test/CodeGen/X86/2012-04-26-sdglue.ll | 2 +- test/CodeGen/X86/2012-07-10-extload64.ll | 6 +- test/CodeGen/X86/2012-08-16-setcc.ll | 8 +- test/CodeGen/X86/2012-1-10-buildvector.ll | 4 +- test/CodeGen/X86/2012-12-1-merge-multiple.ll | 2 +- test/CodeGen/X86/3dnow-schedule.ll | 54 +- test/CodeGen/X86/GlobalISel/GV.ll | 16 +- test/CodeGen/X86/GlobalISel/add-scalar.ll | 20 +- test/CodeGen/X86/GlobalISel/add-vec.ll | 56 +- test/CodeGen/X86/GlobalISel/and-scalar.ll | 10 +- test/CodeGen/X86/GlobalISel/binop.ll | 44 +- test/CodeGen/X86/GlobalISel/br.ll | 2 +- test/CodeGen/X86/GlobalISel/brcond.ll | 16 +- test/CodeGen/X86/GlobalISel/callingconv.ll | 72 +- test/CodeGen/X86/GlobalISel/cmp.ll | 26 +- test/CodeGen/X86/GlobalISel/constant.ll | 14 +- test/CodeGen/X86/GlobalISel/ext-x86-64.ll | 6 +- test/CodeGen/X86/GlobalISel/ext.ll | 28 +- test/CodeGen/X86/GlobalISel/fadd-scalar.ll | 4 +- test/CodeGen/X86/GlobalISel/fconstant.ll | 6 +- test/CodeGen/X86/GlobalISel/fdiv-scalar.ll | 4 +- test/CodeGen/X86/GlobalISel/fmul-scalar.ll | 4 +- test/CodeGen/X86/GlobalISel/fpext-scalar.ll | 2 +- test/CodeGen/X86/GlobalISel/frameIndex.ll | 6 +- test/CodeGen/X86/GlobalISel/fsub-scalar.ll | 4 +- test/CodeGen/X86/GlobalISel/gep.ll | 32 +- .../X86/GlobalISel/legalize-brcond.mir | 14 +- test/CodeGen/X86/GlobalISel/legalize-phi.mir | 210 +- .../X86/GlobalISel/memop-scalar-x32.ll | 20 +- test/CodeGen/X86/GlobalISel/memop-scalar.ll | 40 +- test/CodeGen/X86/GlobalISel/memop-vec.ll | 24 +- test/CodeGen/X86/GlobalISel/mul-scalar.ll | 6 +- test/CodeGen/X86/GlobalISel/mul-vec.ll | 18 +- test/CodeGen/X86/GlobalISel/or-scalar.ll | 10 +- test/CodeGen/X86/GlobalISel/phi.ll | 24 +- .../X86/GlobalISel/regbankselect-X86_64.mir | 32 +- test/CodeGen/X86/GlobalISel/select-br.mir | 12 +- test/CodeGen/X86/GlobalISel/select-brcond.mir | 14 +- test/CodeGen/X86/GlobalISel/select-phi.mir | 96 +- test/CodeGen/X86/GlobalISel/sub-scalar.ll | 10 +- test/CodeGen/X86/GlobalISel/sub-vec.ll | 24 +- test/CodeGen/X86/GlobalISel/trunc.ll | 12 +- test/CodeGen/X86/GlobalISel/undef.ll | 8 +- test/CodeGen/X86/GlobalISel/xor-scalar.ll | 10 +- test/CodeGen/X86/MachineBranchProb.ll | 8 +- test/CodeGen/X86/MergeConsecutiveStores.ll | 72 +- test/CodeGen/X86/SwizzleShuff.ll | 10 +- test/CodeGen/X86/TruncAssertSext.ll | 2 +- test/CodeGen/X86/TruncAssertZext.ll | 4 +- test/CodeGen/X86/WidenArith.ll | 4 +- test/CodeGen/X86/add-ext.ll | 22 +- test/CodeGen/X86/add-of-carry.ll | 4 +- test/CodeGen/X86/add-sub-nsw-nuw.ll | 2 +- test/CodeGen/X86/add.ll | 90 +- test/CodeGen/X86/addcarry.ll | 18 +- test/CodeGen/X86/adx-intrinsics.ll | 26 +- test/CodeGen/X86/aes-schedule.ll | 96 +- test/CodeGen/X86/aes_intrinsics.ll | 24 +- test/CodeGen/X86/all-ones-vector.ll | 248 +- test/CodeGen/X86/and-sink.ll | 30 +- test/CodeGen/X86/anyext.ll | 8 +- test/CodeGen/X86/atom-fixup-lea2.ll | 2 +- test/CodeGen/X86/atomic-eflags-reuse.ll | 56 +- test/CodeGen/X86/atomic-minmax-i6432.ll | 40 +- test/CodeGen/X86/atomic128.ll | 50 +- test/CodeGen/X86/avg-mask.ll | 48 +- test/CodeGen/X86/avg.ll | 190 +- test/CodeGen/X86/avx-arith.ll | 62 +- test/CodeGen/X86/avx-basic.ll | 20 +- test/CodeGen/X86/avx-bitcast.ll | 2 +- test/CodeGen/X86/avx-cast.ll | 14 +- test/CodeGen/X86/avx-cmp.ll | 32 +- test/CodeGen/X86/avx-cvt-2.ll | 8 +- test/CodeGen/X86/avx-cvt-3.ll | 24 +- test/CodeGen/X86/avx-cvt.ll | 32 +- test/CodeGen/X86/avx-gfni-intrinsics.ll | 12 +- test/CodeGen/X86/avx-insertelt.ll | 16 +- test/CodeGen/X86/avx-intrinsics-fast-isel.ll | 760 ++-- .../CodeGen/X86/avx-intrinsics-x86-upgrade.ll | 110 +- test/CodeGen/X86/avx-intrinsics-x86.ll | 210 +- test/CodeGen/X86/avx-intrinsics-x86_64.ll | 8 +- test/CodeGen/X86/avx-load-store.ll | 52 +- test/CodeGen/X86/avx-logic.ll | 44 +- test/CodeGen/X86/avx-schedule.ll | 1440 +++---- test/CodeGen/X86/avx-select.ll | 16 +- test/CodeGen/X86/avx-shift.ll | 32 +- test/CodeGen/X86/avx-shuffle-x86_32.ll | 4 +- test/CodeGen/X86/avx-splat.ll | 26 +- test/CodeGen/X86/avx-trunc.ll | 6 +- test/CodeGen/X86/avx-unpack.ll | 40 +- test/CodeGen/X86/avx-vbroadcast.ll | 164 +- test/CodeGen/X86/avx-vbroadcastf128.ll | 52 +- test/CodeGen/X86/avx-vextractf128.ll | 20 +- test/CodeGen/X86/avx-vinsertf128.ll | 20 +- test/CodeGen/X86/avx-vpclmulqdq.ll | 2 +- test/CodeGen/X86/avx-vperm2x128.ll | 144 +- test/CodeGen/X86/avx-vzeroupper.ll | 34 +- test/CodeGen/X86/avx1-logical-load-folding.ll | 16 +- test/CodeGen/X86/avx2-arith.ll | 96 +- test/CodeGen/X86/avx2-cmp.ll | 32 +- test/CodeGen/X86/avx2-conversions.ll | 60 +- test/CodeGen/X86/avx2-fma-fneg-combine.ll | 24 +- test/CodeGen/X86/avx2-gather.ll | 24 +- test/CodeGen/X86/avx2-intrinsics-fast-isel.ll | 470 +-- .../X86/avx2-intrinsics-x86-upgrade.ll | 110 +- test/CodeGen/X86/avx2-intrinsics-x86.ll | 430 +-- test/CodeGen/X86/avx2-logic.ll | 28 +- test/CodeGen/X86/avx2-masked-gather.ll | 160 +- test/CodeGen/X86/avx2-nontemporal.ll | 4 +- test/CodeGen/X86/avx2-phaddsub.ll | 32 +- test/CodeGen/X86/avx2-pmovxrm.ll | 48 +- test/CodeGen/X86/avx2-schedule.ll | 1908 ++++----- test/CodeGen/X86/avx2-shift.ll | 148 +- test/CodeGen/X86/avx2-vbroadcast.ll | 270 +- test/CodeGen/X86/avx2-vbroadcasti128.ll | 52 +- test/CodeGen/X86/avx2-vector-shifts.ll | 140 +- test/CodeGen/X86/avx2-vperm.ll | 16 +- test/CodeGen/X86/avx512-adc-sbb.ll | 2 +- test/CodeGen/X86/avx512-any_extend_load.ll | 12 +- test/CodeGen/X86/avx512-arith.ll | 224 +- test/CodeGen/X86/avx512-bugfix-23634.ll | 2 +- test/CodeGen/X86/avx512-bugfix-25270.ll | 2 +- test/CodeGen/X86/avx512-bugfix-26264.ll | 4 +- test/CodeGen/X86/avx512-build-vector.ll | 4 +- test/CodeGen/X86/avx512-calling-conv.ll | 70 +- test/CodeGen/X86/avx512-cmp-kor-sequence.ll | 2 +- test/CodeGen/X86/avx512-cmp.ll | 28 +- test/CodeGen/X86/avx512-cvt.ll | 348 +- test/CodeGen/X86/avx512-ext.ll | 336 +- .../avx512-extract-subvector-load-store.ll | 212 +- test/CodeGen/X86/avx512-extract-subvector.ll | 138 +- test/CodeGen/X86/avx512-fma-commute.ll | 16 +- test/CodeGen/X86/avx512-fma-intrinsics.ll | 128 +- test/CodeGen/X86/avx512-fma.ll | 34 +- test/CodeGen/X86/avx512-fsel.ll | 2 +- .../X86/avx512-gather-scatter-intrin.ll | 104 +- test/CodeGen/X86/avx512-gfni-intrinsics.ll | 18 +- test/CodeGen/X86/avx512-hadd-hsub.ll | 48 +- test/CodeGen/X86/avx512-i1test.ll | 8 +- test/CodeGen/X86/avx512-insert-extract.ll | 216 +- test/CodeGen/X86/avx512-insert-extract_i1.ll | 2 +- .../X86/avx512-intrinsics-fast-isel.ll | 364 +- test/CodeGen/X86/avx512-intrinsics-upgrade.ll | 504 +-- test/CodeGen/X86/avx512-intrinsics.ll | 748 ++-- test/CodeGen/X86/avx512-load-store.ll | 40 +- test/CodeGen/X86/avx512-logic.ll | 160 +- test/CodeGen/X86/avx512-mask-op.ll | 472 +-- test/CodeGen/X86/avx512-mask-spills.ll | 10 +- test/CodeGen/X86/avx512-mask-zext-bugfix.ll | 2 +- test/CodeGen/X86/avx512-masked-memop-64-32.ll | 40 +- test/CodeGen/X86/avx512-masked_memop-16-8.ll | 24 +- test/CodeGen/X86/avx512-memfold.ll | 10 +- test/CodeGen/X86/avx512-mov.ll | 94 +- test/CodeGen/X86/avx512-pmovxrm.ll | 48 +- test/CodeGen/X86/avx512-regcall-Mask.ll | 88 +- test/CodeGen/X86/avx512-regcall-NoMask.ll | 174 +- test/CodeGen/X86/avx512-rotate.ll | 40 +- test/CodeGen/X86/avx512-scalarIntrinsics.ll | 28 +- test/CodeGen/X86/avx512-scalar_mask.ll | 20 +- test/CodeGen/X86/avx512-schedule.ll | 1882 ++++----- test/CodeGen/X86/avx512-select.ll | 64 +- test/CodeGen/X86/avx512-shift.ll | 36 +- test/CodeGen/X86/avx512-shuffle-schedule.ll | 3400 ++++++++--------- .../avx512-shuffles/broadcast-scalar-fp.ll | 180 +- .../avx512-shuffles/broadcast-scalar-int.ll | 416 +- .../avx512-shuffles/broadcast-vector-fp.ll | 180 +- .../avx512-shuffles/broadcast-vector-int.ll | 216 +- .../X86/avx512-shuffles/duplicate-high.ll | 132 +- .../X86/avx512-shuffles/duplicate-low.ll | 240 +- .../X86/avx512-shuffles/in_lane_permute.ll | 292 +- .../X86/avx512-shuffles/partial_permute.ll | 592 +-- test/CodeGen/X86/avx512-shuffles/permute.ll | 472 +-- .../X86/avx512-shuffles/shuffle-interleave.ll | 220 +- .../X86/avx512-shuffles/shuffle-vec.ll | 320 +- test/CodeGen/X86/avx512-shuffles/shuffle.ll | 468 +-- test/CodeGen/X86/avx512-shuffles/unpack.ll | 440 +-- test/CodeGen/X86/avx512-skx-insert-subvec.ll | 24 +- test/CodeGen/X86/avx512-trunc.ll | 154 +- test/CodeGen/X86/avx512-unsafe-fp-math.ll | 24 +- test/CodeGen/X86/avx512-vbroadcast.ll | 66 +- test/CodeGen/X86/avx512-vbroadcasti128.ll | 38 +- test/CodeGen/X86/avx512-vbroadcasti256.ll | 20 +- test/CodeGen/X86/avx512-vec-cmp.ll | 140 +- test/CodeGen/X86/avx512-vec3-crash.ll | 2 +- test/CodeGen/X86/avx512-vpclmulqdq.ll | 2 +- test/CodeGen/X86/avx512-vpermv3-commute.ll | 58 +- test/CodeGen/X86/avx512-vpternlog-commute.ll | 176 +- test/CodeGen/X86/avx512-vselect-crash.ll | 2 +- test/CodeGen/X86/avx512-vselect.ll | 4 +- .../X86/avx512bw-intrinsics-fast-isel.ll | 128 +- .../X86/avx512bw-intrinsics-upgrade.ll | 360 +- test/CodeGen/X86/avx512bw-intrinsics.ll | 400 +- test/CodeGen/X86/avx512bw-mask-op.ll | 28 +- test/CodeGen/X86/avx512bw-mov.ll | 32 +- test/CodeGen/X86/avx512bw-vec-cmp.ll | 24 +- test/CodeGen/X86/avx512bw-vec-test-testn.ll | 16 +- .../X86/avx512bwvl-intrinsics-fast-isel.ll | 144 +- .../X86/avx512bwvl-intrinsics-upgrade.ll | 468 +-- test/CodeGen/X86/avx512bwvl-intrinsics.ll | 396 +- test/CodeGen/X86/avx512bwvl-mov.ll | 32 +- test/CodeGen/X86/avx512bwvl-vec-cmp.ll | 48 +- test/CodeGen/X86/avx512bwvl-vec-test-testn.ll | 32 +- .../X86/avx512cd-intrinsics-fast-isel.ll | 4 +- .../X86/avx512cd-intrinsics-upgrade.ll | 12 +- test/CodeGen/X86/avx512cd-intrinsics.ll | 14 +- .../X86/avx512cdvl-intrinsics-upgrade.ll | 16 +- test/CodeGen/X86/avx512cdvl-intrinsics.ll | 16 +- .../X86/avx512dq-intrinsics-upgrade.ll | 36 +- test/CodeGen/X86/avx512dq-intrinsics.ll | 58 +- test/CodeGen/X86/avx512dq-mask-op.ll | 8 +- .../X86/avx512dqvl-intrinsics-upgrade.ll | 298 +- test/CodeGen/X86/avx512dqvl-intrinsics.ll | 84 +- test/CodeGen/X86/avx512er-intrinsics.ll | 42 +- test/CodeGen/X86/avx512f-vec-test-testn.ll | 16 +- test/CodeGen/X86/avx512ifma-intrinsics.ll | 32 +- test/CodeGen/X86/avx512ifmavl-intrinsics.ll | 16 +- test/CodeGen/X86/avx512vbmi-intrinsics.ll | 10 +- test/CodeGen/X86/avx512vbmi2-intrinsics.ll | 40 +- test/CodeGen/X86/avx512vbmi2vl-intrinsics.ll | 80 +- test/CodeGen/X86/avx512vbmivl-intrinsics.ll | 20 +- test/CodeGen/X86/avx512vl-arith.ll | 146 +- .../X86/avx512vl-intrinsics-fast-isel.ll | 384 +- .../X86/avx512vl-intrinsics-upgrade.ll | 746 ++-- test/CodeGen/X86/avx512vl-intrinsics.ll | 706 ++-- test/CodeGen/X86/avx512vl-logic.ll | 160 +- test/CodeGen/X86/avx512vl-mov.ll | 128 +- test/CodeGen/X86/avx512vl-vbroadcast.ll | 28 +- test/CodeGen/X86/avx512vl-vec-cmp.ll | 192 +- test/CodeGen/X86/avx512vl-vec-masked-cmp.ll | 3138 +++++++-------- test/CodeGen/X86/avx512vl-vec-test-testn.ll | 64 +- test/CodeGen/X86/avx512vl-vpclmulqdq.ll | 4 +- test/CodeGen/X86/avx512vl_vnni-intrinsics.ll | 16 +- .../X86/avx512vlcd-intrinsics-fast-isel.ll | 8 +- test/CodeGen/X86/avx512vnni-intrinsics.ll | 8 +- .../CodeGen/X86/avx512vpopcntdq-intrinsics.ll | 16 +- test/CodeGen/X86/bc-extract.ll | 12 +- test/CodeGen/X86/bigstructret.ll | 8 +- test/CodeGen/X86/bitcast-and-setcc-128.ll | 102 +- test/CodeGen/X86/bitcast-and-setcc-256.ll | 56 +- test/CodeGen/X86/bitcast-and-setcc-512.ll | 56 +- test/CodeGen/X86/bitcast-i256.ll | 4 +- .../X86/bitcast-int-to-vector-bool-sext.ll | 98 +- .../X86/bitcast-int-to-vector-bool-zext.ll | 122 +- .../CodeGen/X86/bitcast-int-to-vector-bool.ll | 48 +- test/CodeGen/X86/bitcast-int-to-vector.ll | 6 +- test/CodeGen/X86/bitcast-mmx.ll | 16 +- test/CodeGen/X86/bitcast-setcc-128.ll | 102 +- test/CodeGen/X86/bitcast-setcc-256.ll | 56 +- test/CodeGen/X86/bitcast-setcc-512.ll | 60 +- test/CodeGen/X86/bitreverse.ll | 60 +- test/CodeGen/X86/block-placement.ll | 6 +- test/CodeGen/X86/block-placement.mir | 14 +- .../X86/bmi-intrinsics-fast-isel-x86_64.ll | 24 +- test/CodeGen/X86/bmi-intrinsics-fast-isel.ll | 64 +- test/CodeGen/X86/bmi-schedule.ll | 168 +- test/CodeGen/X86/bmi.ll | 144 +- test/CodeGen/X86/bmi2-schedule.ll | 180 +- test/CodeGen/X86/bmi2.ll | 18 +- test/CodeGen/X86/bool-ext-inc.ll | 14 +- test/CodeGen/X86/bool-simplify.ll | 20 +- test/CodeGen/X86/bool-vector.ll | 24 +- test/CodeGen/X86/bool-zext.ll | 12 +- .../X86/broadcast-elm-cross-splat-vec.ll | 384 +- test/CodeGen/X86/broadcastm-lowering.ll | 36 +- test/CodeGen/X86/bswap-rotate.ll | 4 +- test/CodeGen/X86/bswap-vector.ll | 106 +- test/CodeGen/X86/bswap-wide-int.ll | 24 +- test/CodeGen/X86/bswap_tree.ll | 8 +- test/CodeGen/X86/bswap_tree2.ll | 12 +- test/CodeGen/X86/bt.ll | 240 +- test/CodeGen/X86/btq.ll | 8 +- test/CodeGen/X86/build-vector-128.ll | 60 +- test/CodeGen/X86/build-vector-256.ll | 36 +- test/CodeGen/X86/build-vector-512.ll | 32 +- test/CodeGen/X86/buildvec-insertvec.ll | 62 +- test/CodeGen/X86/bypass-slow-division-32.ll | 32 +- test/CodeGen/X86/bypass-slow-division-64.ll | 12 +- test/CodeGen/X86/cast-vsel.ll | 68 +- test/CodeGen/X86/catchpad-weight.ll | 2 +- test/CodeGen/X86/chain_order.ll | 2 +- .../X86/clear_upper_vector_element_bits.ll | 142 +- test/CodeGen/X86/clflushopt-schedule.ll | 10 +- test/CodeGen/X86/clflushopt.ll | 4 +- test/CodeGen/X86/clwb.ll | 2 +- test/CodeGen/X86/clz.ll | 224 +- test/CodeGen/X86/clzero.ll | 4 +- test/CodeGen/X86/cmov-into-branch.ll | 22 +- test/CodeGen/X86/cmov-promotion.ll | 84 +- test/CodeGen/X86/cmov.ll | 26 +- test/CodeGen/X86/cmovcmov.ll | 14 +- test/CodeGen/X86/cmp.ll | 64 +- test/CodeGen/X86/coalesce_commute_movsd.ll | 16 +- test/CodeGen/X86/combine-64bit-vec-binop.ll | 42 +- test/CodeGen/X86/combine-abs.ll | 24 +- test/CodeGen/X86/combine-add.ll | 68 +- test/CodeGen/X86/combine-and.ll | 52 +- test/CodeGen/X86/combine-avx-intrinsics.ll | 12 +- test/CodeGen/X86/combine-avx2-intrinsics.ll | 18 +- test/CodeGen/X86/combine-fcopysign.ll | 48 +- test/CodeGen/X86/combine-mul.ll | 68 +- test/CodeGen/X86/combine-multiplies.ll | 8 +- test/CodeGen/X86/combine-or.ll | 74 +- test/CodeGen/X86/combine-pmuldq.ll | 16 +- test/CodeGen/X86/combine-rotates.ll | 12 +- test/CodeGen/X86/combine-sdiv.ll | 34 +- test/CodeGen/X86/combine-sext-in-reg.ll | 8 +- test/CodeGen/X86/combine-shl.ll | 128 +- test/CodeGen/X86/combine-sra.ll | 56 +- test/CodeGen/X86/combine-srem.ll | 18 +- test/CodeGen/X86/combine-srl.ll | 80 +- test/CodeGen/X86/combine-sse41-intrinsics.ll | 18 +- test/CodeGen/X86/combine-sub.ll | 60 +- test/CodeGen/X86/combine-testm-and.ll | 8 +- test/CodeGen/X86/combine-udiv.ll | 36 +- test/CodeGen/X86/combine-urem.ll | 42 +- test/CodeGen/X86/commute-3dnow.ll | 36 +- test/CodeGen/X86/commute-blend-avx2.ll | 16 +- test/CodeGen/X86/commute-blend-sse41.ll | 6 +- test/CodeGen/X86/commute-clmul.ll | 16 +- test/CodeGen/X86/commute-fcmp.ll | 192 +- test/CodeGen/X86/commute-vpclmulqdq-avx.ll | 6 +- test/CodeGen/X86/commute-vpclmulqdq-avx512.ll | 18 +- test/CodeGen/X86/commute-xop.ll | 80 +- test/CodeGen/X86/complex-fastmath.ll | 24 +- test/CodeGen/X86/compress_expand.ll | 70 +- test/CodeGen/X86/computeKnownBits_urem.ll | 4 +- test/CodeGen/X86/conditional-indecrement.ll | 18 +- .../X86/conditional-tailcall-samedest.mir | 20 +- test/CodeGen/X86/constant-combines.ll | 2 +- .../X86/copysign-constant-magnitude.ll | 16 +- test/CodeGen/X86/critical-edge-split-2.ll | 4 +- test/CodeGen/X86/ctpop-combine.ll | 8 +- test/CodeGen/X86/cvtv2f32.ll | 8 +- test/CodeGen/X86/dag-fmf-cse.ll | 2 +- test/CodeGen/X86/dag-merge-fast-accesses.ll | 12 +- test/CodeGen/X86/dagcombine-buildvector.ll | 4 +- test/CodeGen/X86/dagcombine-cse.ll | 4 +- test/CodeGen/X86/debugloc-no-line-0.ll | 2 +- test/CodeGen/X86/div-rem-simplify.ll | 40 +- test/CodeGen/X86/divide-by-constant.ll | 56 +- test/CodeGen/X86/divrem.ll | 32 +- test/CodeGen/X86/divrem8_ext.ll | 36 +- test/CodeGen/X86/domain-reassignment.mir | 16 +- test/CodeGen/X86/exedeps-movq.ll | 16 +- test/CodeGen/X86/exedepsfix-broadcast.ll | 16 +- test/CodeGen/X86/extract-store.ll | 202 +- test/CodeGen/X86/extractelement-index.ll | 150 +- ...ractelement-legalization-store-ordering.ll | 2 +- test/CodeGen/X86/extractelement-load.ll | 24 +- test/CodeGen/X86/f16c-intrinsics-fast-isel.ll | 24 +- test/CodeGen/X86/f16c-intrinsics.ll | 96 +- test/CodeGen/X86/f16c-schedule.ll | 56 +- test/CodeGen/X86/fadd-combines.ll | 36 +- test/CodeGen/X86/fast-isel-cmp.ll | 320 +- test/CodeGen/X86/fast-isel-constpool.ll | 16 +- test/CodeGen/X86/fast-isel-fptrunc-fpext.ll | 24 +- .../fast-isel-int-float-conversion-x86-64.ll | 24 +- .../X86/fast-isel-int-float-conversion.ll | 48 +- test/CodeGen/X86/fast-isel-load-i1.ll | 4 +- test/CodeGen/X86/fast-isel-nontemporal.ll | 338 +- test/CodeGen/X86/fast-isel-select-cmov.ll | 12 +- test/CodeGen/X86/fast-isel-select-cmov2.ll | 84 +- .../X86/fast-isel-select-pseudo-cmov.ll | 96 +- test/CodeGen/X86/fast-isel-select-sse.ll | 144 +- test/CodeGen/X86/fast-isel-sext-zext.ll | 80 +- test/CodeGen/X86/fast-isel-shift.ll | 72 +- test/CodeGen/X86/fast-isel-store.ll | 184 +- test/CodeGen/X86/fast-isel-vecload.ll | 278 +- test/CodeGen/X86/fastisel-softfloat.ll | 2 +- test/CodeGen/X86/fdiv-combine.ll | 12 +- test/CodeGen/X86/fdiv.ll | 12 +- test/CodeGen/X86/fixup-bw-copy.ll | 18 +- test/CodeGen/X86/fma-commute-x86.ll | 144 +- test/CodeGen/X86/fma-fneg-combine.ll | 54 +- test/CodeGen/X86/fma-intrinsics-x86.ll | 240 +- test/CodeGen/X86/fma-scalar-memfold.ll | 32 +- test/CodeGen/X86/fma-schedule.ll | 448 +-- test/CodeGen/X86/fma.ll | 96 +- test/CodeGen/X86/fma4-commute-x86.ll | 108 +- test/CodeGen/X86/fma4-fneg-combine.ll | 20 +- test/CodeGen/X86/fma4-intrinsics-x86.ll | 56 +- .../X86/fma4-intrinsics-x86_64-folded-load.ll | 20 +- test/CodeGen/X86/fma4-scalar-memfold.ll | 8 +- test/CodeGen/X86/fma_patterns.ll | 432 +-- test/CodeGen/X86/fma_patterns_wide.ll | 282 +- test/CodeGen/X86/fmaddsub-combine.ll | 28 +- test/CodeGen/X86/fmf-flags.ll | 16 +- test/CodeGen/X86/fmsubadd-combine.ll | 30 +- test/CodeGen/X86/fold-load-binops.ll | 32 +- test/CodeGen/X86/fold-load-unops.ll | 48 +- test/CodeGen/X86/fold-rmw-ops.ll | 304 +- test/CodeGen/X86/fold-vector-sext-crash.ll | 2 +- test/CodeGen/X86/fold-vector-sext-crash2.ll | 16 +- test/CodeGen/X86/fold-vector-sext-zext.ll | 80 +- test/CodeGen/X86/fp-fast.ll | 22 +- test/CodeGen/X86/fp-load-trunc.ll | 16 +- test/CodeGen/X86/fp-logic-replace.ll | 24 +- test/CodeGen/X86/fp-logic.ll | 42 +- test/CodeGen/X86/fp-select-cmp-and.ll | 36 +- test/CodeGen/X86/fp-trunc.ll | 16 +- test/CodeGen/X86/fp-une-cmp.ll | 8 +- test/CodeGen/X86/fp128-cast.ll | 12 +- test/CodeGen/X86/fp128-i128.ll | 30 +- test/CodeGen/X86/fp128-select.ll | 6 +- test/CodeGen/X86/gfni-intrinsics.ll | 6 +- test/CodeGen/X86/gpr-to-mask.ll | 40 +- test/CodeGen/X86/haddsub-2.ll | 124 +- test/CodeGen/X86/haddsub-shuf.ll | 32 +- test/CodeGen/X86/haddsub-undef.ll | 76 +- test/CodeGen/X86/haddsub.ll | 96 +- test/CodeGen/X86/half.ll | 106 +- test/CodeGen/X86/horizontal-reduce-smax.ll | 196 +- test/CodeGen/X86/horizontal-reduce-smin.ll | 196 +- test/CodeGen/X86/horizontal-reduce-umax.ll | 196 +- test/CodeGen/X86/horizontal-reduce-umin.ll | 196 +- test/CodeGen/X86/horizontal-shuffle.ll | 96 +- test/CodeGen/X86/i256-add.ll | 8 +- test/CodeGen/X86/i64-mem-copy.ll | 10 +- test/CodeGen/X86/i64-to-float.ll | 40 +- test/CodeGen/X86/iabs.ll | 20 +- .../CodeGen/X86/illegal-bitfield-loadstore.ll | 24 +- test/CodeGen/X86/immediate_merging.ll | 16 +- test/CodeGen/X86/immediate_merging64.ll | 4 +- test/CodeGen/X86/implicit-null-checks.mir | 134 +- test/CodeGen/X86/imul-lea-2.ll | 4 +- test/CodeGen/X86/imul-lea.ll | 2 +- test/CodeGen/X86/imul.ll | 36 +- test/CodeGen/X86/inline-0bh.ll | 2 +- test/CodeGen/X86/inline-asm-fpstack.ll | 46 +- test/CodeGen/X86/inline-sse.ll | 4 +- .../X86/insert-into-constant-vector.ll | 112 +- test/CodeGen/X86/insertelement-duplicates.ll | 8 +- test/CodeGen/X86/insertelement-ones.ll | 106 +- test/CodeGen/X86/insertelement-shuffle.ll | 24 +- test/CodeGen/X86/insertelement-zero.ll | 128 +- test/CodeGen/X86/insertps-combine.ll | 60 +- .../CodeGen/X86/insertps-from-constantpool.ll | 4 +- test/CodeGen/X86/insertps-unfold-load-bug.ll | 4 +- test/CodeGen/X86/jump_sign.ll | 58 +- test/CodeGen/X86/known-bits-vector.ll | 124 +- test/CodeGen/X86/known-bits.ll | 20 +- test/CodeGen/X86/known-signbits-vector.ll | 44 +- test/CodeGen/X86/lea-3.ll | 24 +- test/CodeGen/X86/lea-opt-cse1.ll | 4 +- test/CodeGen/X86/lea-opt-cse2.ll | 8 +- test/CodeGen/X86/lea-opt-cse3.ll | 24 +- test/CodeGen/X86/lea-opt-cse4.ll | 12 +- test/CodeGen/X86/lea32-schedule.ll | 198 +- test/CodeGen/X86/lea64-schedule.ll | 198 +- test/CodeGen/X86/legalize-shift-64.ll | 20 +- test/CodeGen/X86/legalize-shl-vec.ll | 12 +- test/CodeGen/X86/live-out-reg-info.ll | 4 +- test/CodeGen/X86/load-combine.ll | 148 +- test/CodeGen/X86/logical-load-fold.ll | 8 +- test/CodeGen/X86/longlong-deadload.ll | 2 +- test/CodeGen/X86/loop-search.ll | 10 +- test/CodeGen/X86/lower-bitcast.ll | 36 +- test/CodeGen/X86/lower-vec-shift-2.ll | 32 +- test/CodeGen/X86/lower-vec-shift.ll | 48 +- test/CodeGen/X86/lower-vec-shuffle-bug.ll | 8 +- test/CodeGen/X86/lwp-intrinsics-x86_64.ll | 8 +- test/CodeGen/X86/lwp-intrinsics.ll | 24 +- test/CodeGen/X86/lzcnt-schedule.ll | 36 +- test/CodeGen/X86/lzcnt-zext-cmp.ll | 40 +- test/CodeGen/X86/machine-combiner-int-vec.ll | 18 +- test/CodeGen/X86/machine-combiner-int.ll | 24 +- test/CodeGen/X86/machine-combiner.ll | 100 +- test/CodeGen/X86/machine-cp.ll | 12 +- test/CodeGen/X86/machine-cse.ll | 24 +- test/CodeGen/X86/machine-region-info.mir | 16 +- test/CodeGen/X86/madd.ll | 36 +- test/CodeGen/X86/mask-negated-bool.ll | 12 +- test/CodeGen/X86/masked_gather_scatter.ll | 422 +- test/CodeGen/X86/masked_memop.ll | 266 +- test/CodeGen/X86/memcmp-minsize.ll | 124 +- test/CodeGen/X86/memcmp-optsize.ll | 184 +- test/CodeGen/X86/memcmp.ll | 216 +- test/CodeGen/X86/memset-2.ll | 8 +- test/CodeGen/X86/memset-nonzero.ll | 70 +- test/CodeGen/X86/memset.ll | 12 +- test/CodeGen/X86/memset64-on-x86-32.ll | 6 +- .../X86/merge-consecutive-loads-128.ll | 226 +- .../X86/merge-consecutive-loads-256.ll | 112 +- .../X86/merge-consecutive-loads-512.ll | 110 +- test/CodeGen/X86/merge-consecutive-stores.ll | 2 +- test/CodeGen/X86/merge-store-constants.ll | 16 +- .../X86/merge-store-partially-alias-loads.ll | 2 +- test/CodeGen/X86/merge_store.ll | 6 +- .../X86/merge_store_duplicated_loads.ll | 6 +- test/CodeGen/X86/mfence.ll | 8 +- test/CodeGen/X86/misched-copy.ll | 2 +- test/CodeGen/X86/mmx-arg-passing-x86-64.ll | 6 +- test/CodeGen/X86/mmx-arg-passing.ll | 8 +- test/CodeGen/X86/mmx-bitcast.ll | 14 +- test/CodeGen/X86/mmx-coalescing.ll | 2 +- test/CodeGen/X86/mmx-cvt.ll | 40 +- test/CodeGen/X86/mmx-fold-load.ll | 72 +- test/CodeGen/X86/mmx-schedule.ll | 1760 ++++----- test/CodeGen/X86/movbe-schedule.ll | 48 +- test/CodeGen/X86/movgs.ll | 16 +- test/CodeGen/X86/movmsk.ll | 14 +- test/CodeGen/X86/mul-constant-i16.ll | 132 +- test/CodeGen/X86/mul-constant-i32.ll | 528 +-- test/CodeGen/X86/mul-constant-i64.ll | 528 +-- test/CodeGen/X86/mul-constant-result.ll | 16 +- test/CodeGen/X86/mul-i1024.ll | 4 +- test/CodeGen/X86/mul-i256.ll | 4 +- test/CodeGen/X86/mul-i512.ll | 4 +- test/CodeGen/X86/mul128.ll | 4 +- test/CodeGen/X86/mul64.ll | 4 +- test/CodeGen/X86/mulvi32.ll | 58 +- test/CodeGen/X86/mulx32.ll | 4 +- test/CodeGen/X86/mulx64.ll | 4 +- test/CodeGen/X86/neg_cmp.ll | 8 +- test/CodeGen/X86/negate-i1.ll | 32 +- test/CodeGen/X86/negate-shift.ll | 8 +- test/CodeGen/X86/negate.ll | 12 +- test/CodeGen/X86/negative-sin.ll | 12 +- test/CodeGen/X86/no-sse2-avg.ll | 2 +- test/CodeGen/X86/nontemporal-2.ll | 326 +- test/CodeGen/X86/nontemporal-loads.ll | 478 +-- test/CodeGen/X86/nontemporal.ll | 8 +- test/CodeGen/X86/nosse-vector.ll | 28 +- test/CodeGen/X86/not-and-simplify.ll | 8 +- test/CodeGen/X86/oddshuffles.ll | 184 +- test/CodeGen/X86/optimize-max-1.ll | 16 +- test/CodeGen/X86/optimize-max-2.ll | 4 +- test/CodeGen/X86/or-branch.ll | 18 +- test/CodeGen/X86/or-lea.ll | 16 +- .../X86/overflow-intrinsic-setcc-fold.ll | 24 +- test/CodeGen/X86/overflow.ll | 4 +- test/CodeGen/X86/packss.ll | 20 +- test/CodeGen/X86/palignr.ll | 50 +- test/CodeGen/X86/pause.ll | 2 +- test/CodeGen/X86/peep-setb.ll | 18 +- test/CodeGen/X86/peep-test-4.ll | 54 +- test/CodeGen/X86/peephole-cvt-sse.ll | 8 +- .../X86/peephole-na-phys-copy-folding.ll | 72 +- test/CodeGen/X86/peephole-recurrence.mir | 64 +- test/CodeGen/X86/phaddsub.ll | 64 +- test/CodeGen/X86/pku.ll | 4 +- test/CodeGen/X86/pmovsx-inreg.ll | 84 +- test/CodeGen/X86/pmul.ll | 160 +- test/CodeGen/X86/pointer-vector.ll | 24 +- test/CodeGen/X86/popcnt-schedule.ll | 48 +- test/CodeGen/X86/popcnt.ll | 32 +- test/CodeGen/X86/post-ra-sched.ll | 2 +- test/CodeGen/X86/powi.ll | 6 +- test/CodeGen/X86/pr11334.ll | 20 +- test/CodeGen/X86/pr11985.ll | 4 +- test/CodeGen/X86/pr12312.ll | 48 +- test/CodeGen/X86/pr13577.ll | 4 +- test/CodeGen/X86/pr14161.ll | 4 +- test/CodeGen/X86/pr14204.ll | 2 +- test/CodeGen/X86/pr14314.ll | 4 +- test/CodeGen/X86/pr15267.ll | 8 +- test/CodeGen/X86/pr15309.ll | 2 +- test/CodeGen/X86/pr15705.ll | 10 +- test/CodeGen/X86/pr15981.ll | 14 +- test/CodeGen/X86/pr16031.ll | 2 +- test/CodeGen/X86/pr16360.ll | 2 +- test/CodeGen/X86/pr17764.ll | 2 +- test/CodeGen/X86/pr18014.ll | 2 +- test/CodeGen/X86/pr18344.ll | 4 +- test/CodeGen/X86/pr20011.ll | 4 +- test/CodeGen/X86/pr20012.ll | 4 +- test/CodeGen/X86/pr21792.ll | 2 +- test/CodeGen/X86/pr22338.ll | 8 +- test/CodeGen/X86/pr22774.ll | 2 +- test/CodeGen/X86/pr22970.ll | 8 +- test/CodeGen/X86/pr23603.ll | 4 +- test/CodeGen/X86/pr24602.ll | 2 +- test/CodeGen/X86/pr2585.ll | 4 +- test/CodeGen/X86/pr26350.ll | 2 +- test/CodeGen/X86/pr2656.ll | 4 +- test/CodeGen/X86/pr27591.ll | 4 +- test/CodeGen/X86/pr28129.ll | 16 +- test/CodeGen/X86/pr28173.ll | 10 +- test/CodeGen/X86/pr28472.ll | 2 +- test/CodeGen/X86/pr29061.ll | 4 +- test/CodeGen/X86/pr29112.ll | 2 +- test/CodeGen/X86/pr29170.ll | 6 +- test/CodeGen/X86/pr30284.ll | 2 +- test/CodeGen/X86/pr30430.ll | 2 +- test/CodeGen/X86/pr30511.ll | 2 +- test/CodeGen/X86/pr31045.ll | 2 +- test/CodeGen/X86/pr31088.ll | 12 +- test/CodeGen/X86/pr31323.ll | 4 +- test/CodeGen/X86/pr31773.ll | 8 +- test/CodeGen/X86/pr31956.ll | 2 +- test/CodeGen/X86/pr32108.ll | 2 +- test/CodeGen/X86/pr32241.ll | 6 +- test/CodeGen/X86/pr32256.ll | 2 +- test/CodeGen/X86/pr32282.ll | 6 +- test/CodeGen/X86/pr32284.ll | 32 +- test/CodeGen/X86/pr32329.ll | 4 +- test/CodeGen/X86/pr32340.ll | 2 +- test/CodeGen/X86/pr32345.ll | 12 +- test/CodeGen/X86/pr32368.ll | 24 +- test/CodeGen/X86/pr32420.ll | 2 +- test/CodeGen/X86/pr32451.ll | 2 +- test/CodeGen/X86/pr32484.ll | 2 +- test/CodeGen/X86/pr32659.ll | 2 +- test/CodeGen/X86/pr32907.ll | 8 +- test/CodeGen/X86/pr33290.ll | 4 +- test/CodeGen/X86/pr33349.ll | 4 +- test/CodeGen/X86/pr33828.ll | 8 +- test/CodeGen/X86/pr33844.ll | 2 +- test/CodeGen/X86/pr33960.ll | 4 +- test/CodeGen/X86/pr34080.ll | 8 +- test/CodeGen/X86/pr34088.ll | 2 +- test/CodeGen/X86/pr34137.ll | 2 +- test/CodeGen/X86/pr34139.ll | 2 +- test/CodeGen/X86/pr34149.ll | 6 +- test/CodeGen/X86/pr34177.ll | 2 +- test/CodeGen/X86/pr34271-1.ll | 2 +- test/CodeGen/X86/pr34271.ll | 2 +- test/CodeGen/X86/pr34381.ll | 2 +- test/CodeGen/X86/pr34421.ll | 4 +- test/CodeGen/X86/pr34605.ll | 2 +- test/CodeGen/X86/pr34629.ll | 4 +- test/CodeGen/X86/pr34634.ll | 4 +- test/CodeGen/X86/pr34653.ll | 2 +- test/CodeGen/X86/pr34657.ll | 2 +- test/CodeGen/X86/pr34855.ll | 4 +- test/CodeGen/X86/pr35272.ll | 2 +- test/CodeGen/X86/pr35399.ll | 2 +- test/CodeGen/X86/pr35443.ll | 2 +- test/CodeGen/X86/pre-coalesce.mir | 10 +- test/CodeGen/X86/promote-vec3.ll | 16 +- test/CodeGen/X86/pseudo_cmov_lower2.ll | 4 +- test/CodeGen/X86/pshufb-mask-comments.ll | 12 +- test/CodeGen/X86/psubus.ll | 260 +- test/CodeGen/X86/rdrand-x86_64.ll | 2 +- test/CodeGen/X86/rdrand.ll | 18 +- test/CodeGen/X86/rdseed-x86_64.ll | 2 +- test/CodeGen/X86/rdseed.ll | 8 +- test/CodeGen/X86/recip-fastmath.ll | 162 +- test/CodeGen/X86/recip-fastmath2.ll | 216 +- test/CodeGen/X86/recip-pic.ll | 2 +- test/CodeGen/X86/reduce-trunc-shl.ll | 32 +- test/CodeGen/X86/rem.ll | 10 +- .../CodeGen/X86/replace-load-and-with-bzhi.ll | 16 +- test/CodeGen/X86/ret-mmx.ll | 8 +- test/CodeGen/X86/rot16.ll | 32 +- test/CodeGen/X86/rot32.ll | 36 +- test/CodeGen/X86/rot64.ll | 36 +- test/CodeGen/X86/rotate.ll | 120 +- test/CodeGen/X86/rotate4.ll | 32 +- test/CodeGen/X86/rotate_vec.ll | 8 +- test/CodeGen/X86/rounding-ops.ll | 60 +- test/CodeGen/X86/rtm.ll | 20 +- test/CodeGen/X86/sad.ll | 96 +- test/CodeGen/X86/sad_variations.ll | 42 +- test/CodeGen/X86/sandybridge-loads.ll | 4 +- test/CodeGen/X86/sar_fold.ll | 8 +- test/CodeGen/X86/sar_fold64.ll | 12 +- test/CodeGen/X86/sbb.ll | 32 +- test/CodeGen/X86/scalar-int-to-fp.ll | 124 +- test/CodeGen/X86/scatter-schedule.ll | 2 +- test/CodeGen/X86/schedule-x86_32.ll | 120 +- test/CodeGen/X86/schedule-x86_64.ll | 300 +- test/CodeGen/X86/select-mmx.ll | 12 +- test/CodeGen/X86/select-with-and-or.ll | 28 +- test/CodeGen/X86/select.ll | 202 +- test/CodeGen/X86/select_const.ll | 84 +- test/CodeGen/X86/setcc-combine.ll | 24 +- test/CodeGen/X86/setcc-logic.ll | 84 +- test/CodeGen/X86/setcc-lowering.ll | 12 +- test/CodeGen/X86/setcc-narrowing.ll | 2 +- test/CodeGen/X86/setcc-wide-types.ll | 16 +- test/CodeGen/X86/setcc.ll | 12 +- test/CodeGen/X86/sext-i1.ll | 32 +- test/CodeGen/X86/sext-setcc-self.ll | 12 +- test/CodeGen/X86/sha-schedule.ll | 56 +- test/CodeGen/X86/shift-and.ll | 38 +- test/CodeGen/X86/shift-bmi2.ll | 66 +- test/CodeGen/X86/shift-codegen.ll | 4 +- test/CodeGen/X86/shift-combine.ll | 28 +- test/CodeGen/X86/shift-double-x86_64.ll | 14 +- test/CodeGen/X86/shift-double.ll | 74 +- test/CodeGen/X86/shift-folding.ll | 10 +- test/CodeGen/X86/shift-pcmp.ll | 8 +- test/CodeGen/X86/shl-crash-on-legalize.ll | 2 +- test/CodeGen/X86/shrink-compare.ll | 56 +- test/CodeGen/X86/shrink_vmul.ll | 96 +- test/CodeGen/X86/shrink_vmul_sse.ll | 2 +- test/CodeGen/X86/shuffle-combine-crash-2.ll | 4 +- test/CodeGen/X86/shuffle-of-insert.ll | 48 +- .../CodeGen/X86/shuffle-of-splat-multiuses.ll | 16 +- .../X86/shuffle-strided-with-offset-128.ll | 216 +- .../X86/shuffle-strided-with-offset-256.ll | 172 +- .../X86/shuffle-strided-with-offset-512.ll | 122 +- test/CodeGen/X86/shuffle-vs-trunc-128.ll | 160 +- test/CodeGen/X86/shuffle-vs-trunc-256.ll | 142 +- test/CodeGen/X86/shuffle-vs-trunc-512.ll | 84 +- test/CodeGen/X86/sincos.ll | 12 +- test/CodeGen/X86/sink-blockfreq.ll | 2 +- test/CodeGen/X86/sink-out-of-loop.ll | 2 +- test/CodeGen/X86/slow-incdec.ll | 12 +- test/CodeGen/X86/slow-pmulld.ll | 16 +- test/CodeGen/X86/slow-unaligned-mem.ll | 4 +- test/CodeGen/X86/soft-fp-legal-in-HW-reg.ll | 2 +- test/CodeGen/X86/splat-for-size.ll | 40 +- test/CodeGen/X86/split-extend-vector-inreg.ll | 8 +- test/CodeGen/X86/split-store.ll | 34 +- test/CodeGen/X86/sqrt-fastmath-tune.ll | 12 +- test/CodeGen/X86/sqrt-fastmath.ll | 44 +- test/CodeGen/X86/sqrt-partial.ll | 8 +- test/CodeGen/X86/sse-align-12.ll | 8 +- test/CodeGen/X86/sse-fcopysign.ll | 24 +- test/CodeGen/X86/sse-fsignum.ll | 24 +- .../X86/sse-intrinsics-fast-isel-x86_64.ll | 6 +- test/CodeGen/X86/sse-intrinsics-fast-isel.ll | 432 +-- .../CodeGen/X86/sse-intrinsics-x86-upgrade.ll | 38 +- test/CodeGen/X86/sse-intrinsics-x86.ll | 166 +- test/CodeGen/X86/sse-intrinsics-x86_64.ll | 24 +- test/CodeGen/X86/sse-minmax.ll | 308 +- test/CodeGen/X86/sse-only.ll | 2 +- test/CodeGen/X86/sse-scalar-fp-arith-unary.ll | 16 +- test/CodeGen/X86/sse-scalar-fp-arith.ll | 284 +- test/CodeGen/X86/sse-schedule.ll | 980 ++--- test/CodeGen/X86/sse1.ll | 24 +- .../X86/sse2-intrinsics-fast-isel-x86_64.ll | 14 +- test/CodeGen/X86/sse2-intrinsics-fast-isel.ll | 844 ++-- .../X86/sse2-intrinsics-x86-upgrade.ll | 68 +- test/CodeGen/X86/sse2-intrinsics-x86.ll | 484 +-- test/CodeGen/X86/sse2-intrinsics-x86_64.ll | 24 +- test/CodeGen/X86/sse2-schedule.ll | 2500 ++++++------ test/CodeGen/X86/sse2-vector-shifts.ll | 74 +- test/CodeGen/X86/sse2.ll | 84 +- test/CodeGen/X86/sse3-avx-addsub-2.ll | 68 +- test/CodeGen/X86/sse3-avx-addsub.ll | 60 +- test/CodeGen/X86/sse3-intrinsics-fast-isel.ll | 44 +- test/CodeGen/X86/sse3-intrinsics-x86.ll | 32 +- test/CodeGen/X86/sse3-schedule.ll | 240 +- test/CodeGen/X86/sse3.ll | 72 +- .../CodeGen/X86/sse41-intrinsics-fast-isel.ll | 244 +- .../X86/sse41-intrinsics-x86-upgrade.ll | 56 +- test/CodeGen/X86/sse41-intrinsics-x86.ll | 142 +- test/CodeGen/X86/sse41-pmovxrm.ll | 48 +- test/CodeGen/X86/sse41-schedule.ll | 882 ++--- test/CodeGen/X86/sse41.ll | 232 +- .../X86/sse42-intrinsics-fast-isel-x86_64.ll | 4 +- .../CodeGen/X86/sse42-intrinsics-fast-isel.ll | 72 +- test/CodeGen/X86/sse42-intrinsics-x86.ll | 82 +- test/CodeGen/X86/sse42-intrinsics-x86_64.ll | 4 +- test/CodeGen/X86/sse42-schedule.ll | 198 +- .../CodeGen/X86/sse4a-intrinsics-fast-isel.ll | 24 +- test/CodeGen/X86/sse4a-schedule.ll | 36 +- test/CodeGen/X86/sse4a-upgrade.ll | 8 +- test/CodeGen/X86/sse4a.ll | 48 +- test/CodeGen/X86/sse_partial_update.ll | 14 +- .../CodeGen/X86/ssse3-intrinsics-fast-isel.ll | 68 +- test/CodeGen/X86/ssse3-intrinsics-x86.ll | 78 +- test/CodeGen/X86/ssse3-schedule.ll | 320 +- test/CodeGen/X86/stack-folding-bmi.ll | 4 +- test/CodeGen/X86/stack-folding-lwp.ll | 8 +- test/CodeGen/X86/stack-folding-tbm.ll | 4 +- test/CodeGen/X86/stack-protector-weight.ll | 12 +- test/CodeGen/X86/statepoint-live-in.ll | 12 +- test/CodeGen/X86/stores-merging.ll | 10 +- test/CodeGen/X86/subcarry.ll | 4 +- test/CodeGen/X86/subvector-broadcast.ll | 412 +- test/CodeGen/X86/switch-edge-weight.ll | 102 +- test/CodeGen/X86/switch-jump-table.ll | 6 +- .../CodeGen/X86/switch-lower-peel-top-case.ll | 20 +- test/CodeGen/X86/switch.ll | 4 +- test/CodeGen/X86/swizzle-2.ll | 84 +- test/CodeGen/X86/swizzle-avx2.ll | 14 +- .../X86/tbm-intrinsics-fast-isel-x86_64.ll | 20 +- test/CodeGen/X86/tbm-intrinsics-fast-isel.ll | 40 +- test/CodeGen/X86/tbm-intrinsics-x86_64.ll | 12 +- test/CodeGen/X86/tbm_patterns.ll | 136 +- test/CodeGen/X86/tls-pie.ll | 24 +- test/CodeGen/X86/tls-shrink-wrapping.ll | 2 +- test/CodeGen/X86/trunc-ext-ld-st.ll | 24 +- test/CodeGen/X86/trunc-store.ll | 4 +- test/CodeGen/X86/trunc-to-bool.ll | 18 +- test/CodeGen/X86/uint64-to-float.ll | 6 +- test/CodeGen/X86/uint_to_fp-2.ll | 4 +- test/CodeGen/X86/uint_to_fp-3.ll | 16 +- test/CodeGen/X86/uint_to_fp.ll | 4 +- test/CodeGen/X86/umul-with-overflow.ll | 12 +- test/CodeGen/X86/unaligned-32-byte-memops.ll | 60 +- test/CodeGen/X86/urem-i8-constant.ll | 2 +- test/CodeGen/X86/urem-power-of-two.ll | 24 +- test/CodeGen/X86/use-add-flags.ll | 20 +- test/CodeGen/X86/v2f32.ll | 20 +- test/CodeGen/X86/v4f32-immediate.ll | 4 +- test/CodeGen/X86/v8i1-masks.ll | 8 +- test/CodeGen/X86/vaargs.ll | 2 +- test/CodeGen/X86/vaes-intrinsics-avx-x86.ll | 2 +- .../CodeGen/X86/vaes-intrinsics-avx512-x86.ll | 8 +- .../X86/vaes-intrinsics-avx512vl-x86.ll | 16 +- test/CodeGen/X86/var-permute-128.ll | 26 +- test/CodeGen/X86/var-permute-256.ll | 48 +- test/CodeGen/X86/var-permute-512.ll | 16 +- test/CodeGen/X86/vec-copysign-avx512.ll | 24 +- test/CodeGen/X86/vec-copysign.ll | 16 +- test/CodeGen/X86/vec-trunc-store.ll | 4 +- test/CodeGen/X86/vec3.ll | 4 +- test/CodeGen/X86/vec_cast2.ll | 24 +- test/CodeGen/X86/vec_cmp_sint-128.ll | 168 +- test/CodeGen/X86/vec_cmp_uint-128.ll | 200 +- test/CodeGen/X86/vec_compare-sse4.ll | 12 +- test/CodeGen/X86/vec_ctbits.ll | 12 +- test/CodeGen/X86/vec_ext_inreg.ll | 24 +- test/CodeGen/X86/vec_extract-avx.ll | 32 +- test/CodeGen/X86/vec_extract-mmx.ll | 20 +- test/CodeGen/X86/vec_extract-sse4.ll | 16 +- test/CodeGen/X86/vec_extract.ll | 16 +- test/CodeGen/X86/vec_fabs.ll | 80 +- test/CodeGen/X86/vec_floor.ll | 216 +- test/CodeGen/X86/vec_fneg.ll | 16 +- test/CodeGen/X86/vec_fp_to_int.ll | 340 +- test/CodeGen/X86/vec_fpext.ll | 48 +- test/CodeGen/X86/vec_fptrunc.ll | 48 +- test/CodeGen/X86/vec_i64.ll | 8 +- test/CodeGen/X86/vec_ins_extract-1.ll | 16 +- test/CodeGen/X86/vec_insert-2.ll | 16 +- test/CodeGen/X86/vec_insert-3.ll | 4 +- test/CodeGen/X86/vec_insert-4.ll | 4 +- test/CodeGen/X86/vec_insert-5.ll | 36 +- test/CodeGen/X86/vec_insert-7.ll | 4 +- test/CodeGen/X86/vec_insert-8.ll | 8 +- test/CodeGen/X86/vec_insert-9.ll | 4 +- test/CodeGen/X86/vec_insert-mmx.ll | 16 +- test/CodeGen/X86/vec_int_to_fp.ll | 836 ++-- test/CodeGen/X86/vec_loadsingles.ll | 18 +- test/CodeGen/X86/vec_logical.ll | 20 +- test/CodeGen/X86/vec_minmax_match.ll | 36 +- test/CodeGen/X86/vec_minmax_sint.ll | 416 +- test/CodeGen/X86/vec_minmax_uint.ll | 416 +- test/CodeGen/X86/vec_partial.ll | 12 +- test/CodeGen/X86/vec_reassociate.ll | 40 +- test/CodeGen/X86/vec_return.ll | 4 +- test/CodeGen/X86/vec_sdiv_to_shift.ll | 32 +- test/CodeGen/X86/vec_set-2.ll | 8 +- test/CodeGen/X86/vec_set-3.ll | 12 +- test/CodeGen/X86/vec_set-4.ll | 8 +- test/CodeGen/X86/vec_set-6.ll | 4 +- test/CodeGen/X86/vec_set-7.ll | 4 +- test/CodeGen/X86/vec_set-8.ll | 4 +- test/CodeGen/X86/vec_set-A.ll | 4 +- test/CodeGen/X86/vec_set-B.ll | 8 +- test/CodeGen/X86/vec_set-C.ll | 4 +- test/CodeGen/X86/vec_set-D.ll | 2 +- test/CodeGen/X86/vec_set-F.ll | 4 +- test/CodeGen/X86/vec_set-H.ll | 2 +- test/CodeGen/X86/vec_set.ll | 4 +- test/CodeGen/X86/vec_setcc.ll | 44 +- test/CodeGen/X86/vec_shift.ll | 12 +- test/CodeGen/X86/vec_shift2.ll | 8 +- test/CodeGen/X86/vec_shift3.ll | 12 +- test/CodeGen/X86/vec_shift4.ll | 8 +- test/CodeGen/X86/vec_shift5.ll | 64 +- test/CodeGen/X86/vec_shift6.ll | 54 +- test/CodeGen/X86/vec_shift7.ll | 4 +- test/CodeGen/X86/vec_ss_load_fold.ll | 84 +- test/CodeGen/X86/vec_trunc_sext.ll | 4 +- test/CodeGen/X86/vec_uint_to_fp-fastmath.ll | 24 +- test/CodeGen/X86/vec_unsafe-fp-math.ll | 4 +- test/CodeGen/X86/vec_zero_cse.ll | 16 +- test/CodeGen/X86/vector-bitreverse.ll | 200 +- test/CodeGen/X86/vector-blend.ll | 234 +- test/CodeGen/X86/vector-compare-all_of.ll | 116 +- test/CodeGen/X86/vector-compare-any_of.ll | 116 +- test/CodeGen/X86/vector-compare-combines.ll | 8 +- test/CodeGen/X86/vector-compare-results.ll | 270 +- test/CodeGen/X86/vector-extend-inreg.ll | 8 +- test/CodeGen/X86/vector-half-conversions.ll | 230 +- test/CodeGen/X86/vector-idiv-sdiv-128.ll | 56 +- test/CodeGen/X86/vector-idiv-sdiv-256.ll | 36 +- test/CodeGen/X86/vector-idiv-sdiv-512.ll | 24 +- test/CodeGen/X86/vector-idiv-udiv-128.ll | 56 +- test/CodeGen/X86/vector-idiv-udiv-256.ll | 36 +- test/CodeGen/X86/vector-idiv-udiv-512.ll | 24 +- test/CodeGen/X86/vector-idiv.ll | 12 +- test/CodeGen/X86/vector-interleave.ll | 6 +- test/CodeGen/X86/vector-lzcnt-128.ll | 204 +- test/CodeGen/X86/vector-lzcnt-256.ll | 136 +- test/CodeGen/X86/vector-lzcnt-512.ll | 64 +- .../X86/vector-merge-store-fp-constants.ll | 4 +- test/CodeGen/X86/vector-mul.ll | 208 +- test/CodeGen/X86/vector-narrow-binop.ll | 12 +- test/CodeGen/X86/vector-pcmp.ll | 96 +- test/CodeGen/X86/vector-popcnt-128.ll | 104 +- test/CodeGen/X86/vector-popcnt-256.ll | 48 +- test/CodeGen/X86/vector-popcnt-512.ll | 36 +- test/CodeGen/X86/vector-rem.ll | 6 +- test/CodeGen/X86/vector-rotate-128.ll | 208 +- test/CodeGen/X86/vector-rotate-256.ll | 180 +- test/CodeGen/X86/vector-rotate-512.ll | 80 +- test/CodeGen/X86/vector-sext.ll | 586 +-- test/CodeGen/X86/vector-shift-ashr-128.ll | 248 +- test/CodeGen/X86/vector-shift-ashr-256.ll | 276 +- test/CodeGen/X86/vector-shift-ashr-512.ll | 52 +- test/CodeGen/X86/vector-shift-lshr-128.ll | 246 +- test/CodeGen/X86/vector-shift-lshr-256.ll | 276 +- test/CodeGen/X86/vector-shift-lshr-512.ll | 48 +- test/CodeGen/X86/vector-shift-shl-128.ll | 242 +- test/CodeGen/X86/vector-shift-shl-256.ll | 276 +- test/CodeGen/X86/vector-shift-shl-512.ll | 48 +- test/CodeGen/X86/vector-shuffle-128-v16.ll | 446 +-- test/CodeGen/X86/vector-shuffle-128-v2.ll | 426 +-- test/CodeGen/X86/vector-shuffle-128-v4.ll | 666 ++-- test/CodeGen/X86/vector-shuffle-128-v8.ll | 706 ++-- test/CodeGen/X86/vector-shuffle-256-v16.ll | 888 ++--- test/CodeGen/X86/vector-shuffle-256-v32.ll | 514 +-- test/CodeGen/X86/vector-shuffle-256-v4.ll | 404 +- test/CodeGen/X86/vector-shuffle-256-v8.ll | 528 +-- test/CodeGen/X86/vector-shuffle-512-v16.ll | 142 +- test/CodeGen/X86/vector-shuffle-512-v32.ll | 76 +- test/CodeGen/X86/vector-shuffle-512-v64.ll | 138 +- test/CodeGen/X86/vector-shuffle-512-v8.ll | 668 ++-- test/CodeGen/X86/vector-shuffle-avx512.ll | 192 +- .../X86/vector-shuffle-combining-avx.ll | 116 +- .../X86/vector-shuffle-combining-avx2.ll | 244 +- .../X86/vector-shuffle-combining-avx512bw.ll | 272 +- .../vector-shuffle-combining-avx512bwvl.ll | 24 +- .../vector-shuffle-combining-avx512vbmi.ll | 32 +- .../X86/vector-shuffle-combining-sse41.ll | 4 +- .../X86/vector-shuffle-combining-sse4a.ll | 18 +- .../X86/vector-shuffle-combining-ssse3.ll | 208 +- .../X86/vector-shuffle-combining-xop.ll | 112 +- test/CodeGen/X86/vector-shuffle-combining.ll | 726 ++-- test/CodeGen/X86/vector-shuffle-masked.ll | 276 +- test/CodeGen/X86/vector-shuffle-mmx.ll | 12 +- test/CodeGen/X86/vector-shuffle-sse1.ll | 56 +- test/CodeGen/X86/vector-shuffle-sse41.ll | 12 +- test/CodeGen/X86/vector-shuffle-sse4a.ll | 108 +- test/CodeGen/X86/vector-shuffle-v1.ll | 90 +- test/CodeGen/X86/vector-shuffle-v48.ll | 2 +- .../X86/vector-shuffle-variable-128.ll | 68 +- .../X86/vector-shuffle-variable-256.ll | 28 +- test/CodeGen/X86/vector-sqrt.ll | 4 +- test/CodeGen/X86/vector-trunc-math.ll | 664 ++-- test/CodeGen/X86/vector-trunc.ll | 374 +- test/CodeGen/X86/vector-tzcnt-128.ll | 258 +- test/CodeGen/X86/vector-tzcnt-256.ll | 192 +- test/CodeGen/X86/vector-tzcnt-512.ll | 80 +- test/CodeGen/X86/vector-unsigned-cmp.ll | 72 +- test/CodeGen/X86/vector-zext.ll | 494 +-- test/CodeGen/X86/vector-zmov.ll | 8 +- test/CodeGen/X86/viabs.ll | 174 +- test/CodeGen/X86/vmovq.ll | 4 +- test/CodeGen/X86/vpshufbitqbm-intrinsics.ll | 6 +- test/CodeGen/X86/vselect-2.ll | 28 +- test/CodeGen/X86/vselect-avx.ll | 14 +- test/CodeGen/X86/vselect-constants.ll | 56 +- test/CodeGen/X86/vselect-minmax.ll | 1632 ++++---- test/CodeGen/X86/vselect-packss.ll | 56 +- test/CodeGen/X86/vselect-pcmp.ll | 64 +- test/CodeGen/X86/vselect-zero.ll | 12 +- test/CodeGen/X86/vselect.ll | 148 +- test/CodeGen/X86/vshift-1.ll | 24 +- test/CodeGen/X86/vshift-2.ll | 24 +- test/CodeGen/X86/vshift-3.ll | 20 +- test/CodeGen/X86/vshift-4.ll | 28 +- test/CodeGen/X86/vshift-5.ll | 16 +- test/CodeGen/X86/vshift-6.ll | 4 +- test/CodeGen/X86/vsplit-and.ll | 4 +- test/CodeGen/X86/vzero-excess.ll | 8 +- test/CodeGen/X86/wide-fma-contraction.ll | 4 +- test/CodeGen/X86/wide-integer-cmp.ll | 22 +- test/CodeGen/X86/widen_arith-1.ll | 4 +- test/CodeGen/X86/widen_arith-2.ll | 4 +- test/CodeGen/X86/widen_arith-3.ll | 4 +- test/CodeGen/X86/widen_arith-4.ll | 4 +- test/CodeGen/X86/widen_arith-5.ll | 4 +- test/CodeGen/X86/widen_arith-6.ll | 4 +- test/CodeGen/X86/widen_bitops-0.ll | 48 +- test/CodeGen/X86/widen_bitops-1.ll | 48 +- test/CodeGen/X86/widen_cast-1.ll | 4 +- test/CodeGen/X86/widen_cast-2.ll | 2 +- test/CodeGen/X86/widen_cast-3.ll | 4 +- test/CodeGen/X86/widen_cast-4.ll | 8 +- test/CodeGen/X86/widen_cast-5.ll | 4 +- test/CodeGen/X86/widen_cast-6.ll | 4 +- test/CodeGen/X86/widen_compare-1.ll | 4 +- test/CodeGen/X86/widen_conv-1.ll | 12 +- test/CodeGen/X86/widen_conv-2.ll | 4 +- test/CodeGen/X86/widen_conv-3.ll | 14 +- test/CodeGen/X86/widen_conv-4.ll | 16 +- test/CodeGen/X86/widen_conversions.ll | 4 +- test/CodeGen/X86/widen_extract-1.ll | 4 +- test/CodeGen/X86/widen_load-0.ll | 4 +- test/CodeGen/X86/widen_load-2.ll | 44 +- test/CodeGen/X86/widen_load-3.ll | 16 +- test/CodeGen/X86/widen_shuffle-1.ll | 20 +- test/CodeGen/X86/widened-broadcast.ll | 168 +- test/CodeGen/X86/win64_sibcall.ll | 4 +- test/CodeGen/X86/win_chkstk.ll | 8 +- test/CodeGen/X86/win_coreclr_chkstk.ll | 4 +- test/CodeGen/X86/x32-cet-intrinsics.ll | 16 +- test/CodeGen/X86/x32-lea-1.ll | 2 +- test/CodeGen/X86/x64-cet-intrinsics.ll | 24 +- test/CodeGen/X86/x86-64-intrcc-nosse.ll | 2 +- test/CodeGen/X86/x86-fold-pshufb.ll | 4 +- test/CodeGen/X86/x86-interleaved-access.ll | 106 +- test/CodeGen/X86/x86-interleaved-check.ll | 2 +- .../x86-no_caller_saved_registers-preserve.ll | 2 +- .../X86/x86-setcc-int-to-fp-combine.ll | 10 +- test/CodeGen/X86/x86-shifts.ll | 64 +- test/CodeGen/X86/x86-shrink-wrapping.ll | 6 +- .../CodeGen/X86/x86-upgrade-avx-vbroadcast.ll | 6 +- .../X86/x86-upgrade-avx2-vbroadcast.ll | 2 +- test/CodeGen/X86/x87-schedule.ll | 940 ++--- test/CodeGen/X86/xaluo.ll | 294 +- test/CodeGen/X86/xchg-nofold.ll | 4 +- test/CodeGen/X86/xmulo.ll | 144 +- test/CodeGen/X86/xop-ifma.ll | 22 +- test/CodeGen/X86/xop-intrinsics-fast-isel.ll | 256 +- .../X86/xop-intrinsics-x86_64-upgrade.ll | 154 +- test/CodeGen/X86/xop-intrinsics-x86_64.ll | 166 +- test/CodeGen/X86/xop-mask-comments.ll | 48 +- test/CodeGen/X86/xop-pcmov.ll | 24 +- test/CodeGen/X86/xor-icmp.ll | 16 +- test/CodeGen/X86/xor-select-i1-combine.ll | 4 +- test/CodeGen/X86/zext-shl.ll | 4 +- test/CodeGen/X86/zext-trunc.ll | 2 +- test/DebugInfo/COFF/asan-module-ctor.ll | 2 +- test/DebugInfo/COFF/inlining-header.ll | 2 +- test/DebugInfo/COFF/local-variable-gap.ll | 2 +- test/DebugInfo/COFF/local-variables.ll | 4 +- test/DebugInfo/COFF/multifile.ll | 4 +- test/DebugInfo/COFF/multifunction.ll | 12 +- test/DebugInfo/COFF/pieces.ll | 2 +- test/DebugInfo/COFF/register-variables.ll | 4 +- test/DebugInfo/COFF/simple.ll | 4 +- .../MIR/X86/live-debug-values-3preds.mir | 4 +- test/DebugInfo/MIR/X86/live-debug-values.mir | 4 +- .../live-debug-vars-unused-arg-debugonly.mir | 8 +- test/DebugInfo/SystemZ/variable-loc.s | 2 +- .../DebugInfo/X86/dbg-value-transfer-order.ll | 2 +- test/DebugInfo/X86/live-debug-values.ll | 4 +- .../PowerPC/ppc32_elf_rel_addr16.s | 2 +- .../RuntimeDyld/X86/COFF_x86_64.s | 2 +- .../X86/ELF_x64-64_PIC_relocations.s | 2 +- .../RuntimeDyld/X86/ELF_x86_64_StubBuf.s | 2 +- .../AddressSanitizer/X86/asm_mov.s | 4 +- .../X86/asm_mov_no_instrumentation.s | 2 +- .../AddressSanitizer/X86/asm_swap_intel.s | 2 +- test/MC/AArch64/arm64-ilp32.s | 2 +- test/MC/AArch64/arm64-leaf-compact-unwind.s | 16 +- test/MC/AArch64/basic-pic.s | 10 +- test/MC/AArch64/elf-extern.s | 2 +- test/MC/AArch64/inline-asm-modifiers.s | 12 +- test/MC/AArch64/jump-table.s | 4 +- test/MC/ARM/2010-11-30-reloc-movt.s | 2 +- test/MC/ARM/elf-eflags-eabi.s | 2 +- test/MC/ARM/elf-movt.s | 2 +- test/MC/AsmParser/seh-directive-errors.s | 2 +- test/MC/COFF/basic-coff-64.s | 2 +- test/MC/COFF/basic-coff.s | 2 +- test/MC/COFF/cv-def-range.s | 2 +- test/MC/COFF/cv-empty-linetable.s | 2 +- .../MC/COFF/cv-inline-linetable-unreachable.s | 2 +- test/MC/COFF/cv-inline-linetable.s | 2 +- test/MC/COFF/diff.s | 2 +- test/MC/COFF/seh-linkonce.s | 2 +- test/MC/COFF/seh-section-2.s | 4 +- test/MC/COFF/simple-fixups.s | 6 +- test/MC/COFF/symbol-alias.s | 2 +- test/MC/COFF/symbol-fragment-offset-64.s | 2 +- test/MC/COFF/symbol-fragment-offset.s | 2 +- test/MC/COFF/weak.s | 4 +- test/MC/ELF/ARM/clang-section.s | 12 +- test/MC/ELF/basic-elf-32.s | 2 +- test/MC/ELF/basic-elf-64.s | 2 +- test/MC/ELF/call-abs.s | 2 +- test/MC/ELF/fde.s | 2 +- test/MC/MachO/debug_frame.s | 2 +- test/MC/Mips/do_switch1.s | 2 +- test/MC/Mips/do_switch2.s | 2 +- test/MC/Mips/do_switch3.s | 2 +- test/MC/Mips/elf-N64.s | 2 +- test/MC/Mips/elf-gprel-32-64.s | 2 +- test/MC/Mips/elf-relsym.s | 2 +- test/MC/Mips/elf-tls.s | 6 +- test/MC/Mips/mips_gprel16.s | 4 +- test/MC/Mips/r-mips-got-disp.s | 2 +- test/MC/Mips/xgot.s | 2 +- test/MC/PowerPC/tls-gd-obj.s | 2 +- test/MC/PowerPC/tls-ie-obj.s | 2 +- test/MC/PowerPC/tls-ld-obj.s | 2 +- test/MC/X86/compact-unwind.s | 4 +- .../X86/Inputs/unprotected-fullinfo.s | 8 +- .../X86/Inputs/unprotected-lineinfo.s | 8 +- .../X86/Inputs/unprotected-nolineinfo.s | 8 +- test/tools/llvm-dwarfdump/X86/brief.s | 2 +- test/tools/llvm-dwarfdump/X86/debugloc.s | 4 +- test/tools/llvm-dwarfdump/X86/lookup.s | 2 +- .../llvm-dwarfdump/X86/verify_debug_info.s | 2 +- .../llvm-dwarfdump/X86/verify_die_ranges.s | 2 +- 1435 files changed, 50355 insertions(+), 50304 deletions(-) diff --git a/docs/MIRLangRef.rst b/docs/MIRLangRef.rst index b4ca8f2347a..fff0d3ef0eb 100644 --- a/docs/MIRLangRef.rst +++ b/docs/MIRLangRef.rst @@ -246,13 +246,25 @@ blocks are referenced using the following syntax: .. code-block:: text - %bb.[.] + %bb. -Examples: +Example: .. code-block:: llvm %bb.0 + +The following syntax is also supported, but the former syntax is preferred for +block references: + +.. code-block:: text + + %bb.[.] + +Example: + +.. code-block:: llvm + %bb.1.then Successors diff --git a/docs/NVPTXUsage.rst b/docs/NVPTXUsage.rst index 159fe078653..38222afbc63 100644 --- a/docs/NVPTXUsage.rst +++ b/docs/NVPTXUsage.rst @@ -499,7 +499,7 @@ The output we get from ``llc`` (as of LLVM 3.4): .reg .s32 %r<2>; .reg .s64 %rl<8>; - // BB#0: // %entry + // %bb.0: // %entry ld.param.u64 %rl1, [kernel_param_0]; mov.u32 %r1, %tid.x; mul.wide.s32 %rl2, %r1, 4; @@ -897,7 +897,7 @@ This gives us the following PTX (excerpt): .reg .s32 %r<21>; .reg .s64 %rl<8>; - // BB#0: // %entry + // %bb.0: // %entry ld.param.u64 %rl2, [kernel_param_0]; mov.u32 %r3, %tid.x; ld.param.u64 %rl3, [kernel_param_1]; @@ -921,7 +921,7 @@ This gives us the following PTX (excerpt): abs.f32 %f4, %f1; setp.gtu.f32 %p4, %f4, 0f7F800000; @%p4 bra BB0_4; - // BB#3: // %__nv_isnanf.exit5.i + // %bb.3: // %__nv_isnanf.exit5.i abs.f32 %f5, %f2; setp.le.f32 %p5, %f5, 0f7F800000; @%p5 bra BB0_5; @@ -953,7 +953,7 @@ This gives us the following PTX (excerpt): selp.f32 %f110, 0f7F800000, %f99, %p16; setp.eq.f32 %p17, %f110, 0f7F800000; @%p17 bra BB0_28; - // BB#27: + // %bb.27: fma.rn.f32 %f110, %f110, %f108, %f110; BB0_28: // %__internal_accurate_powf.exit.i setp.lt.f32 %p18, %f1, 0f00000000; diff --git a/include/llvm/CodeGen/MachineBasicBlock.h b/include/llvm/CodeGen/MachineBasicBlock.h index 0f5b04d9045..0730eb763e1 100644 --- a/include/llvm/CodeGen/MachineBasicBlock.h +++ b/include/llvm/CodeGen/MachineBasicBlock.h @@ -25,6 +25,7 @@ #include "llvm/MC/LaneBitmask.h" #include "llvm/MC/MCRegisterInfo.h" #include "llvm/Support/BranchProbability.h" +#include "llvm/Support/Printable.h" #include #include #include @@ -771,6 +772,14 @@ private: raw_ostream& operator<<(raw_ostream &OS, const MachineBasicBlock &MBB); +/// Prints a machine basic block reference. +/// +/// The format is: +/// %bb.5 - a machine basic block with MBB.getNumber() == 5. +/// +/// Usage: OS << printMBBReference(MBB) << '\n'; +Printable printMBBReference(const MachineBasicBlock &MBB); + // This is useful when building IndexedMaps keyed on basic block pointers. struct MBB2NumberFunctor { using argument_type = const MachineBasicBlock *; diff --git a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp index 2d82f81ed70..1bc8b4eee0f 100644 --- a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -2710,7 +2710,8 @@ void AsmPrinter::EmitBasicBlockStart(const MachineBasicBlock &MBB) const { (isBlockOnlyReachableByFallthrough(&MBB) && !MBB.isEHFuncletEntry())) { if (isVerbose()) { // NOTE: Want this comment at start of line, don't emit with AddComment. - OutStreamer->emitRawComment(" BB#" + Twine(MBB.getNumber()) + ":", false); + OutStreamer->emitRawComment(" %bb." + Twine(MBB.getNumber()) + ":", + false); } } else { OutStreamer->EmitLabel(MBB.getSymbol()); diff --git a/lib/CodeGen/BranchFolding.cpp b/lib/CodeGen/BranchFolding.cpp index d31260e767f..92e73cb502c 100644 --- a/lib/CodeGen/BranchFolding.cpp +++ b/lib/CodeGen/BranchFolding.cpp @@ -613,8 +613,8 @@ ProfitableToMerge(MachineBasicBlock *MBB1, MachineBasicBlock *MBB2, CommonTailLen = ComputeCommonTailLength(MBB1, MBB2, I1, I2); if (CommonTailLen == 0) return false; - DEBUG(dbgs() << "Common tail length of BB#" << MBB1->getNumber() - << " and BB#" << MBB2->getNumber() << " is " << CommonTailLen + DEBUG(dbgs() << "Common tail length of " << printMBBReference(*MBB1) + << " and " << printMBBReference(*MBB2) << " is " << CommonTailLen << '\n'); // It's almost always profitable to merge any number of non-terminator @@ -770,7 +770,7 @@ bool BranchFolder::CreateCommonTailOnlyBlock(MachineBasicBlock *&PredBB, SameTails[commonTailIndex].getTailStartPos(); MachineBasicBlock *MBB = SameTails[commonTailIndex].getBlock(); - DEBUG(dbgs() << "\nSplitting BB#" << MBB->getNumber() << ", size " + DEBUG(dbgs() << "\nSplitting " << printMBBReference(*MBB) << ", size " << maxCommonTailLength); // If the split block unconditionally falls-thru to SuccBB, it will be @@ -920,20 +920,17 @@ bool BranchFolder::TryTailMergeBlocks(MachineBasicBlock *SuccBB, bool MadeChange = false; DEBUG(dbgs() << "\nTryTailMergeBlocks: "; - for (unsigned i = 0, e = MergePotentials.size(); i != e; ++i) - dbgs() << "BB#" << MergePotentials[i].getBlock()->getNumber() - << (i == e-1 ? "" : ", "); - dbgs() << "\n"; - if (SuccBB) { - dbgs() << " with successor BB#" << SuccBB->getNumber() << '\n'; + for (unsigned i = 0, e = MergePotentials.size(); i != e; ++i) dbgs() + << printMBBReference(*MergePotentials[i].getBlock()) + << (i == e - 1 ? "" : ", "); + dbgs() << "\n"; if (SuccBB) { + dbgs() << " with successor " << printMBBReference(*SuccBB) << '\n'; if (PredBB) - dbgs() << " which has fall-through from BB#" - << PredBB->getNumber() << "\n"; - } - dbgs() << "Looking for common tails of at least " - << MinCommonTailLength << " instruction" - << (MinCommonTailLength == 1 ? "" : "s") << '\n'; - ); + dbgs() << " which has fall-through from " + << printMBBReference(*PredBB) << "\n"; + } dbgs() << "Looking for common tails of at least " + << MinCommonTailLength << " instruction" + << (MinCommonTailLength == 1 ? "" : "s") << '\n';); // Sort by hash value so that blocks with identical end sequences sort // together. @@ -1013,13 +1010,13 @@ bool BranchFolder::TryTailMergeBlocks(MachineBasicBlock *SuccBB, // MBB is common tail. Adjust all other BB's to jump to this one. // Traversal must be forwards so erases work. - DEBUG(dbgs() << "\nUsing common tail in BB#" << MBB->getNumber() + DEBUG(dbgs() << "\nUsing common tail in " << printMBBReference(*MBB) << " for "); for (unsigned int i=0, e = SameTails.size(); i != e; ++i) { if (commonTailIndex == i) continue; - DEBUG(dbgs() << "BB#" << SameTails[i].getBlock()->getNumber() - << (i == e-1 ? "" : ", ")); + DEBUG(dbgs() << printMBBReference(*SameTails[i].getBlock()) + << (i == e - 1 ? "" : ", ")); // Hack the end off BB i, making it jump to BB commonTailIndex instead. replaceTailWithBranchTo(SameTails[i].getTailStartPos(), *MBB); // BB i is no longer a predecessor of SuccBB; remove it from the worklist. diff --git a/lib/CodeGen/BranchRelaxation.cpp b/lib/CodeGen/BranchRelaxation.cpp index 99fa4dc6791..0d87f142c7c 100644 --- a/lib/CodeGen/BranchRelaxation.cpp +++ b/lib/CodeGen/BranchRelaxation.cpp @@ -143,7 +143,7 @@ void BranchRelaxation::verify() { LLVM_DUMP_METHOD void BranchRelaxation::dumpBBs() { for (auto &MBB : *MF) { const BasicBlockInfo &BBI = BlockInfo[MBB.getNumber()]; - dbgs() << format("BB#%u\toffset=%08x\t", MBB.getNumber(), BBI.Offset) + dbgs() << format("%bb.%u\toffset=%08x\t", MBB.getNumber(), BBI.Offset) << format("size=%#x\n", BBI.Size); } } @@ -287,13 +287,10 @@ bool BranchRelaxation::isBlockInRange( if (TII->isBranchOffsetInRange(MI.getOpcode(), DestOffset - BrOffset)) return true; - DEBUG( - dbgs() << "Out of range branch to destination BB#" << DestBB.getNumber() - << " from BB#" << MI.getParent()->getNumber() - << " to " << DestOffset - << " offset " << DestOffset - BrOffset - << '\t' << MI - ); + DEBUG(dbgs() << "Out of range branch to destination " + << printMBBReference(DestBB) << " from " + << printMBBReference(*MI.getParent()) << " to " << DestOffset + << " offset " << DestOffset - BrOffset << '\t' << MI); return false; } @@ -366,9 +363,9 @@ bool BranchRelaxation::fixupConditionalBranch(MachineInstr &MI) { // just created), so we can invert the condition. MachineBasicBlock &NextBB = *std::next(MachineFunction::iterator(MBB)); - DEBUG(dbgs() << " Insert B to BB#" << TBB->getNumber() - << ", invert condition and change dest. to BB#" - << NextBB.getNumber() << '\n'); + DEBUG(dbgs() << " Insert B to " << printMBBReference(*TBB) + << ", invert condition and change dest. to " + << printMBBReference(NextBB) << '\n'); unsigned &MBBSize = BlockInfo[MBB->getNumber()].Size; diff --git a/lib/CodeGen/EarlyIfConversion.cpp b/lib/CodeGen/EarlyIfConversion.cpp index bb181b7e165..461da8f138f 100644 --- a/lib/CodeGen/EarlyIfConversion.cpp +++ b/lib/CodeGen/EarlyIfConversion.cpp @@ -185,7 +185,7 @@ bool SSAIfConv::canSpeculateInstrs(MachineBasicBlock *MBB) { // Reject any live-in physregs. It's probably CPSR/EFLAGS, and very hard to // get right. if (!MBB->livein_empty()) { - DEBUG(dbgs() << "BB#" << MBB->getNumber() << " has live-ins.\n"); + DEBUG(dbgs() << printMBBReference(*MBB) << " has live-ins.\n"); return false; } @@ -199,7 +199,7 @@ bool SSAIfConv::canSpeculateInstrs(MachineBasicBlock *MBB) { continue; if (++InstrCount > BlockInstrLimit && !Stress) { - DEBUG(dbgs() << "BB#" << MBB->getNumber() << " has more than " + DEBUG(dbgs() << printMBBReference(*MBB) << " has more than " << BlockInstrLimit << " instructions.\n"); return false; } @@ -246,7 +246,7 @@ bool SSAIfConv::canSpeculateInstrs(MachineBasicBlock *MBB) { if (!DefMI || DefMI->getParent() != Head) continue; if (InsertAfter.insert(DefMI).second) - DEBUG(dbgs() << "BB#" << MBB->getNumber() << " depends on " << *DefMI); + DEBUG(dbgs() << printMBBReference(*MBB) << " depends on " << *DefMI); if (DefMI->isTerminator()) { DEBUG(dbgs() << "Can't insert instructions below terminator.\n"); return false; @@ -361,10 +361,10 @@ bool SSAIfConv::canConvertIf(MachineBasicBlock *MBB) { if (Succ1->pred_size() != 1 || Succ1->succ_size() != 1 || Succ1->succ_begin()[0] != Tail) return false; - DEBUG(dbgs() << "\nDiamond: BB#" << Head->getNumber() - << " -> BB#" << Succ0->getNumber() - << "/BB#" << Succ1->getNumber() - << " -> BB#" << Tail->getNumber() << '\n'); + DEBUG(dbgs() << "\nDiamond: " << printMBBReference(*Head) << " -> " + << printMBBReference(*Succ0) << "/" + << printMBBReference(*Succ1) << " -> " + << printMBBReference(*Tail) << '\n'); // Live-in physregs are tricky to get right when speculating code. if (!Tail->livein_empty()) { @@ -372,9 +372,9 @@ bool SSAIfConv::canConvertIf(MachineBasicBlock *MBB) { return false; } } else { - DEBUG(dbgs() << "\nTriangle: BB#" << Head->getNumber() - << " -> BB#" << Succ0->getNumber() - << " -> BB#" << Tail->getNumber() << '\n'); + DEBUG(dbgs() << "\nTriangle: " << printMBBReference(*Head) << " -> " + << printMBBReference(*Succ0) << " -> " + << printMBBReference(*Tail) << '\n'); } // This is a triangle or a diamond. @@ -563,8 +563,8 @@ void SSAIfConv::convertIf(SmallVectorImpl &RemovedBlocks) { assert(Head->succ_empty() && "Additional head successors?"); if (!ExtraPreds && Head->isLayoutSuccessor(Tail)) { // Splice Tail onto the end of Head. - DEBUG(dbgs() << "Joining tail BB#" << Tail->getNumber() - << " into head BB#" << Head->getNumber() << '\n'); + DEBUG(dbgs() << "Joining tail " << printMBBReference(*Tail) << " into head " + << printMBBReference(*Head) << '\n'); Head->splice(Head->end(), Tail, Tail->begin(), Tail->end()); Head->transferSuccessorsAndUpdatePHIs(Tail); diff --git a/lib/CodeGen/EdgeBundles.cpp b/lib/CodeGen/EdgeBundles.cpp index b3a25544be3..54c53eb1631 100644 --- a/lib/CodeGen/EdgeBundles.cpp +++ b/lib/CodeGen/EdgeBundles.cpp @@ -80,13 +80,15 @@ raw_ostream &WriteGraph<>(raw_ostream &O, const EdgeBundles &G, O << "digraph {\n"; for (const auto &MBB : *MF) { unsigned BB = MBB.getNumber(); - O << "\t\"BB#" << BB << "\" [ shape=box ]\n" - << '\t' << G.getBundle(BB, false) << " -> \"BB#" << BB << "\"\n" - << "\t\"BB#" << BB << "\" -> " << G.getBundle(BB, true) << '\n'; + O << "\t\"" << printMBBReference(MBB) << "\" [ shape=box ]\n" + << '\t' << G.getBundle(BB, false) << " -> \"" << printMBBReference(MBB) + << "\"\n" + << "\t\"" << printMBBReference(MBB) << "\" -> " << G.getBundle(BB, true) + << '\n'; for (MachineBasicBlock::const_succ_iterator SI = MBB.succ_begin(), SE = MBB.succ_end(); SI != SE; ++SI) - O << "\t\"BB#" << BB << "\" -> \"BB#" << (*SI)->getNumber() - << "\" [ color=lightgray ]\n"; + O << "\t\"" << printMBBReference(MBB) << "\" -> \"" + << printMBBReference(**SI) << "\" [ color=lightgray ]\n"; } O << "}\n"; return O; diff --git a/lib/CodeGen/ExecutionDepsFix.cpp b/lib/CodeGen/ExecutionDepsFix.cpp index 73c4b6a145d..df51ecc0001 100644 --- a/lib/CodeGen/ExecutionDepsFix.cpp +++ b/lib/CodeGen/ExecutionDepsFix.cpp @@ -200,7 +200,7 @@ void ExecutionDepsFix::enterBasicBlock(MachineBasicBlock *MBB) { LiveRegs[rx].Def = -1; } } - DEBUG(dbgs() << "BB#" << MBB->getNumber() << ": entry\n"); + DEBUG(dbgs() << printMBBReference(*MBB) << ": entry\n"); return; } @@ -246,7 +246,7 @@ void ExecutionDepsFix::enterBasicBlock(MachineBasicBlock *MBB) { } } DEBUG( - dbgs() << "BB#" << MBB->getNumber() + dbgs() << printMBBReference(*MBB) << (!isBlockDone(MBB) ? ": incomplete\n" : ": all preds known\n")); } diff --git a/lib/CodeGen/IfConversion.cpp b/lib/CodeGen/IfConversion.cpp index 567461c1945..1bac5685ec5 100644 --- a/lib/CodeGen/IfConversion.cpp +++ b/lib/CodeGen/IfConversion.cpp @@ -406,12 +406,12 @@ bool IfConverter::runOnMachineFunction(MachineFunction &MF) { case ICSimpleFalse: { bool isFalse = Kind == ICSimpleFalse; if ((isFalse && DisableSimpleF) || (!isFalse && DisableSimple)) break; - DEBUG(dbgs() << "Ifcvt (Simple" << (Kind == ICSimpleFalse ? - " false" : "") - << "): BB#" << BBI.BB->getNumber() << " (" - << ((Kind == ICSimpleFalse) - ? BBI.FalseBB->getNumber() - : BBI.TrueBB->getNumber()) << ") "); + DEBUG(dbgs() << "Ifcvt (Simple" + << (Kind == ICSimpleFalse ? " false" : "") + << "): " << printMBBReference(*BBI.BB) << " (" + << ((Kind == ICSimpleFalse) ? BBI.FalseBB->getNumber() + : BBI.TrueBB->getNumber()) + << ") "); RetVal = IfConvertSimple(BBI, Kind); DEBUG(dbgs() << (RetVal ? "succeeded!" : "failed!") << "\n"); if (RetVal) { @@ -435,9 +435,9 @@ bool IfConverter::runOnMachineFunction(MachineFunction &MF) { DEBUG(dbgs() << " false"); if (isRev) DEBUG(dbgs() << " rev"); - DEBUG(dbgs() << "): BB#" << BBI.BB->getNumber() << " (T:" - << BBI.TrueBB->getNumber() << ",F:" - << BBI.FalseBB->getNumber() << ") "); + DEBUG(dbgs() << "): " << printMBBReference(*BBI.BB) + << " (T:" << BBI.TrueBB->getNumber() + << ",F:" << BBI.FalseBB->getNumber() << ") "); RetVal = IfConvertTriangle(BBI, Kind); DEBUG(dbgs() << (RetVal ? "succeeded!" : "failed!") << "\n"); if (RetVal) { @@ -453,9 +453,9 @@ bool IfConverter::runOnMachineFunction(MachineFunction &MF) { } case ICDiamond: if (DisableDiamond) break; - DEBUG(dbgs() << "Ifcvt (Diamond): BB#" << BBI.BB->getNumber() << " (T:" - << BBI.TrueBB->getNumber() << ",F:" - << BBI.FalseBB->getNumber() << ") "); + DEBUG(dbgs() << "Ifcvt (Diamond): " << printMBBReference(*BBI.BB) + << " (T:" << BBI.TrueBB->getNumber() + << ",F:" << BBI.FalseBB->getNumber() << ") "); RetVal = IfConvertDiamond(BBI, Kind, NumDups, NumDups2, Token->TClobbersPred, Token->FClobbersPred); @@ -464,10 +464,9 @@ bool IfConverter::runOnMachineFunction(MachineFunction &MF) { break; case ICForkedDiamond: if (DisableForkedDiamond) break; - DEBUG(dbgs() << "Ifcvt (Forked Diamond): BB#" - << BBI.BB->getNumber() << " (T:" - << BBI.TrueBB->getNumber() << ",F:" - << BBI.FalseBB->getNumber() << ") "); + DEBUG(dbgs() << "Ifcvt (Forked Diamond): " << printMBBReference(*BBI.BB) + << " (T:" << BBI.TrueBB->getNumber() + << ",F:" << BBI.FalseBB->getNumber() << ") "); RetVal = IfConvertForkedDiamond(BBI, Kind, NumDups, NumDups2, Token->TClobbersPred, Token->FClobbersPred); diff --git a/lib/CodeGen/LiveDebugVariables.cpp b/lib/CodeGen/LiveDebugVariables.cpp index 97bb7c712f6..5f0559b7d99 100644 --- a/lib/CodeGen/LiveDebugVariables.cpp +++ b/lib/CodeGen/LiveDebugVariables.cpp @@ -1174,7 +1174,7 @@ void UserValue::emitDebugValues(VirtRegMap *VRM, LiveIntervals &LIS, MachineFunction::iterator MBB = LIS.getMBBFromIndex(Start)->getIterator(); SlotIndex MBBEnd = LIS.getMBBEndIdx(&*MBB); - DEBUG(dbgs() << " BB#" << MBB->getNumber() << '-' << MBBEnd); + DEBUG(dbgs() << ' ' << printMBBReference(*MBB) << '-' << MBBEnd); insertDebugValue(&*MBB, Start, Stop, Loc, Spilled, LIS, TII, TRI); // This interval may span multiple basic blocks. // Insert a DBG_VALUE into each one. @@ -1184,7 +1184,7 @@ void UserValue::emitDebugValues(VirtRegMap *VRM, LiveIntervals &LIS, if (++MBB == MFEnd) break; MBBEnd = LIS.getMBBEndIdx(&*MBB); - DEBUG(dbgs() << " BB#" << MBB->getNumber() << '-' << MBBEnd); + DEBUG(dbgs() << ' ' << printMBBReference(*MBB) << '-' << MBBEnd); insertDebugValue(&*MBB, Start, Stop, Loc, Spilled, LIS, TII, TRI); } DEBUG(dbgs() << '\n'); diff --git a/lib/CodeGen/LiveIntervalAnalysis.cpp b/lib/CodeGen/LiveIntervalAnalysis.cpp index fb7fbe7f1c2..06807542b34 100644 --- a/lib/CodeGen/LiveIntervalAnalysis.cpp +++ b/lib/CodeGen/LiveIntervalAnalysis.cpp @@ -323,7 +323,7 @@ void LiveIntervals::computeLiveInRegUnits() { // Create phi-defs at Begin for all live-in registers. SlotIndex Begin = Indexes->getMBBStartIdx(&MBB); - DEBUG(dbgs() << Begin << "\tBB#" << MBB.getNumber()); + DEBUG(dbgs() << Begin << "\t" << printMBBReference(MBB)); for (const auto &LI : MBB.liveins()) { for (MCRegUnitIterator Units(LI.PhysReg, TRI); Units.isValid(); ++Units) { unsigned Unit = *Units; diff --git a/lib/CodeGen/LiveRangeCalc.cpp b/lib/CodeGen/LiveRangeCalc.cpp index 0074a9fd907..352d75ec3ae 100644 --- a/lib/CodeGen/LiveRangeCalc.cpp +++ b/lib/CodeGen/LiveRangeCalc.cpp @@ -377,7 +377,7 @@ bool LiveRangeCalc::findReachingDefs(LiveRange &LR, MachineBasicBlock &UseMBB, MBB->getParent()->verify(); const TargetRegisterInfo *TRI = MRI->getTargetRegisterInfo(); errs() << "The register " << printReg(PhysReg, TRI) - << " needs to be live in to BB#" << MBB->getNumber() + << " needs to be live in to " << printMBBReference(*MBB) << ", but is missing from the live-in list.\n"; report_fatal_error("Invalid global physical register"); } diff --git a/lib/CodeGen/MIRParser/MILexer.cpp b/lib/CodeGen/MIRParser/MILexer.cpp index d23df9c137b..ac696923794 100644 --- a/lib/CodeGen/MIRParser/MILexer.cpp +++ b/lib/CodeGen/MIRParser/MILexer.cpp @@ -277,6 +277,9 @@ static Cursor maybeLexMachineBasicBlock(Cursor C, MIToken &Token, C.advance(); StringRef Number = NumberRange.upto(C); unsigned StringOffset = PrefixLength + Number.size(); // Drop '%bb.' + // TODO: The format bb.. is supported only when it's not a + // reference. Once we deprecate the format where the irname shows up, we + // should only lex forward if it is a reference. if (C.peek() == '.') { C.advance(); // Skip '.' ++StringOffset; diff --git a/lib/CodeGen/MIRParser/MIParser.cpp b/lib/CodeGen/MIRParser/MIParser.cpp index 9b5539a7ca7..de951e42c8f 100644 --- a/lib/CodeGen/MIRParser/MIParser.cpp +++ b/lib/CodeGen/MIRParser/MIParser.cpp @@ -1344,6 +1344,8 @@ bool MIParser::parseMBBReference(MachineBasicBlock *&MBB) { return error(Twine("use of undefined machine basic block #") + Twine(Number)); MBB = MBBInfo->second; + // TODO: Only parse the name if it's a MachineBasicBlockLabel. Deprecate once + // we drop the from the bb.. format. if (!Token.stringValue().empty() && Token.stringValue() != MBB->getName()) return error(Twine("the name of machine basic block #") + Twine(Number) + " isn't '" + Token.stringValue() + "'"); diff --git a/lib/CodeGen/MIRPrinter.cpp b/lib/CodeGen/MIRPrinter.cpp index 92dc0aea421..aa0f38036b1 100644 --- a/lib/CodeGen/MIRPrinter.cpp +++ b/lib/CodeGen/MIRPrinter.cpp @@ -157,7 +157,6 @@ public: void print(const MachineBasicBlock &MBB); void print(const MachineInstr &MI); - void printMBBReference(const MachineBasicBlock &MBB); void printIRBlockReference(const BasicBlock &BB); void printIRValueReference(const Value &V); void printStackObjectReference(int FrameIndex); @@ -338,13 +337,11 @@ void MIRPrinter::convert(ModuleSlotTracker &MST, YamlMFI.HasMustTailInVarArgFunc = MFI.hasMustTailInVarArgFunc(); if (MFI.getSavePoint()) { raw_string_ostream StrOS(YamlMFI.SavePoint.Value); - MIPrinter(StrOS, MST, RegisterMaskIds, StackObjectOperandMapping) - .printMBBReference(*MFI.getSavePoint()); + StrOS << printMBBReference(*MFI.getSavePoint()); } if (MFI.getRestorePoint()) { raw_string_ostream StrOS(YamlMFI.RestorePoint.Value); - MIPrinter(StrOS, MST, RegisterMaskIds, StackObjectOperandMapping) - .printMBBReference(*MFI.getRestorePoint()); + StrOS << printMBBReference(*MFI.getRestorePoint()); } } @@ -493,8 +490,7 @@ void MIRPrinter::convert(ModuleSlotTracker &MST, Entry.ID = ID++; for (const auto *MBB : Table.MBBs) { raw_string_ostream StrOS(Str); - MIPrinter(StrOS, MST, RegisterMaskIds, StackObjectOperandMapping) - .printMBBReference(*MBB); + StrOS << printMBBReference(*MBB); Entry.Blocks.push_back(StrOS.str()); Str.clear(); } @@ -616,7 +612,7 @@ void MIPrinter::print(const MachineBasicBlock &MBB) { for (auto I = MBB.succ_begin(), E = MBB.succ_end(); I != E; ++I) { if (I != MBB.succ_begin()) OS << ", "; - printMBBReference(**I); + OS << printMBBReference(**I); if (!SimplifyMIR || !canPredictProbs) OS << '(' << format("0x%08" PRIx32, MBB.getSuccProbability(I).getNumerator()) @@ -764,14 +760,6 @@ void MIPrinter::print(const MachineInstr &MI) { } } -void MIPrinter::printMBBReference(const MachineBasicBlock &MBB) { - OS << "%bb." << MBB.getNumber(); - if (const auto *BB = MBB.getBasicBlock()) { - if (BB->hasName()) - OS << '.' << BB->getName(); - } -} - static void printIRSlotNumber(raw_ostream &OS, int Slot) { if (Slot == -1) OS << ""; @@ -967,7 +955,7 @@ void MIPrinter::print(const MachineInstr &MI, unsigned OpIdx, Op.getFPImm()->printAsOperand(OS, /*PrintType=*/true, MST); break; case MachineOperand::MO_MachineBasicBlock: - printMBBReference(*Op.getMBB()); + OS << printMBBReference(*Op.getMBB()); break; case MachineOperand::MO_FrameIndex: printStackObjectReference(Op.getIndex()); diff --git a/lib/CodeGen/MachineBasicBlock.cpp b/lib/CodeGen/MachineBasicBlock.cpp index 8863ac23607..5522d6694e0 100644 --- a/lib/CodeGen/MachineBasicBlock.cpp +++ b/lib/CodeGen/MachineBasicBlock.cpp @@ -70,6 +70,10 @@ raw_ostream &llvm::operator<<(raw_ostream &OS, const MachineBasicBlock &MBB) { return OS; } +Printable llvm::printMBBReference(const MachineBasicBlock &MBB) { + return Printable([&MBB](raw_ostream &OS) { return MBB.printAsOperand(OS); }); +} + /// When an MBB is added to an MF, we need to update the parent pointer of the /// MBB, the MBB numbering, and any instructions in the MBB to be on the right /// operand list for registers. @@ -281,7 +285,7 @@ void MachineBasicBlock::print(raw_ostream &OS, ModuleSlotTracker &MST, if (Indexes) OS << Indexes->getMBBStartIdx(this) << '\t'; - OS << "BB#" << getNumber() << ": "; + OS << printMBBReference(*this) << ": "; const char *Comma = ""; if (const BasicBlock *LBB = getBasicBlock()) { @@ -313,7 +317,7 @@ void MachineBasicBlock::print(raw_ostream &OS, ModuleSlotTracker &MST, if (Indexes) OS << '\t'; OS << " Predecessors according to CFG:"; for (const_pred_iterator PI = pred_begin(), E = pred_end(); PI != E; ++PI) - OS << " BB#" << (*PI)->getNumber(); + OS << " " << printMBBReference(*(*PI)); OS << '\n'; } @@ -334,7 +338,7 @@ void MachineBasicBlock::print(raw_ostream &OS, ModuleSlotTracker &MST, if (Indexes) OS << '\t'; OS << " Successors according to CFG:"; for (const_succ_iterator SI = succ_begin(), E = succ_end(); SI != E; ++SI) { - OS << " BB#" << (*SI)->getNumber(); + OS << " " << printMBBReference(*(*SI)); if (!Probs.empty()) OS << '(' << *getProbabilityIterator(SI) << ')'; } @@ -350,7 +354,7 @@ void MachineBasicBlock::print(raw_ostream &OS, ModuleSlotTracker &MST, void MachineBasicBlock::printAsOperand(raw_ostream &OS, bool /*PrintType*/) const { - OS << "BB#" << getNumber(); + OS << "%bb." << getNumber(); } void MachineBasicBlock::removeLiveIn(MCPhysReg Reg, LaneBitmask LaneMask) { @@ -767,10 +771,9 @@ MachineBasicBlock *MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ, MachineBasicBlock *NMBB = MF->CreateMachineBasicBlock(); MF->insert(std::next(MachineFunction::iterator(this)), NMBB); - DEBUG(dbgs() << "Splitting critical edge:" - " BB#" << getNumber() - << " -- BB#" << NMBB->getNumber() - << " -- BB#" << Succ->getNumber() << '\n'); + DEBUG(dbgs() << "Splitting critical edge: " << printMBBReference(*this) + << " -- " << printMBBReference(*NMBB) << " -- " + << printMBBReference(*Succ) << '\n'); LiveIntervals *LIS = P.getAnalysisIfAvailable(); SlotIndexes *Indexes = P.getAnalysisIfAvailable(); @@ -1023,8 +1026,8 @@ bool MachineBasicBlock::canSplitCriticalEdge( // case that we can't handle. Since this never happens in properly optimized // code, just skip those edges. if (TBB && TBB == FBB) { - DEBUG(dbgs() << "Won't split critical edge after degenerate BB#" - << getNumber() << '\n'); + DEBUG(dbgs() << "Won't split critical edge after degenerate " + << printMBBReference(*this) << '\n'); return false; } return true; diff --git a/lib/CodeGen/MachineBlockPlacement.cpp b/lib/CodeGen/MachineBlockPlacement.cpp index f0285ea8f8e..87af9533b32 100644 --- a/lib/CodeGen/MachineBlockPlacement.cpp +++ b/lib/CodeGen/MachineBlockPlacement.cpp @@ -546,7 +546,7 @@ INITIALIZE_PASS_END(MachineBlockPlacement, DEBUG_TYPE, static std::string getBlockName(const MachineBasicBlock *BB) { std::string Result; raw_string_ostream OS(Result); - OS << "BB#" << BB->getNumber(); + OS << printMBBReference(*BB); OS << " ('" << BB->getName() << "')"; OS.flush(); return Result; diff --git a/lib/CodeGen/MachineBranchProbabilityInfo.cpp b/lib/CodeGen/MachineBranchProbabilityInfo.cpp index 21eff9dfff9..e4952aaaba0 100644 --- a/lib/CodeGen/MachineBranchProbabilityInfo.cpp +++ b/lib/CodeGen/MachineBranchProbabilityInfo.cpp @@ -84,7 +84,7 @@ raw_ostream &MachineBranchProbabilityInfo::printEdgeProbability( const MachineBasicBlock *Dst) const { const BranchProbability Prob = getEdgeProbability(Src, Dst); - OS << "edge MBB#" << Src->getNumber() << " -> MBB#" << Dst->getNumber() + OS << "edge " << printMBBReference(*Src) << " -> " << printMBBReference(*Dst) << " probability is " << Prob << (isEdgeHot(Src, Dst) ? " [HOT edge]\n" : "\n"); diff --git a/lib/CodeGen/MachineFunction.cpp b/lib/CodeGen/MachineFunction.cpp index 1f55b8fa495..f0d5eec4dea 100644 --- a/lib/CodeGen/MachineFunction.cpp +++ b/lib/CodeGen/MachineFunction.cpp @@ -546,7 +546,7 @@ namespace llvm { raw_string_ostream OSS(OutStr); if (isSimple()) { - OSS << "BB#" << Node->getNumber(); + OSS << printMBBReference(*Node); if (const BasicBlock *BB = Node->getBasicBlock()) OSS << ": " << BB->getName(); } else @@ -908,7 +908,7 @@ void MachineJumpTableInfo::print(raw_ostream &OS) const { for (unsigned i = 0, e = JumpTables.size(); i != e; ++i) { OS << " jt#" << i << ": "; for (unsigned j = 0, f = JumpTables[i].MBBs.size(); j != f; ++j) - OS << " BB#" << JumpTables[i].MBBs[j]->getNumber(); + OS << ' ' << printMBBReference(*JumpTables[i].MBBs[j]); } OS << '\n'; diff --git a/lib/CodeGen/MachineLICM.cpp b/lib/CodeGen/MachineLICM.cpp index 3e622b4a23c..a251a08a516 100644 --- a/lib/CodeGen/MachineLICM.cpp +++ b/lib/CodeGen/MachineLICM.cpp @@ -563,8 +563,8 @@ void MachineLICM::HoistPostRA(MachineInstr *MI, unsigned Def) { // Now move the instructions to the predecessor, inserting it before any // terminator instructions. - DEBUG(dbgs() << "Hoisting to BB#" << Preheader->getNumber() << " from BB#" - << MI->getParent()->getNumber() << ": " << *MI); + DEBUG(dbgs() << "Hoisting to " << printMBBReference(*Preheader) << " from " + << printMBBReference(*MI->getParent()) << ": " << *MI); // Splice the instruction to the preheader. MachineBasicBlock *MBB = MI->getParent(); @@ -601,14 +601,14 @@ bool MachineLICM::IsGuaranteedToExecute(MachineBasicBlock *BB) { } void MachineLICM::EnterScope(MachineBasicBlock *MBB) { - DEBUG(dbgs() << "Entering BB#" << MBB->getNumber() << '\n'); + DEBUG(dbgs() << "Entering " << printMBBReference(*MBB) << '\n'); // Remember livein register pressure. BackTrace.push_back(RegPressure); } void MachineLICM::ExitScope(MachineBasicBlock *MBB) { - DEBUG(dbgs() << "Exiting BB#" << MBB->getNumber() << '\n'); + DEBUG(dbgs() << "Exiting " << printMBBReference(*MBB) << '\n'); BackTrace.pop_back(); } @@ -1336,9 +1336,9 @@ bool MachineLICM::Hoist(MachineInstr *MI, MachineBasicBlock *Preheader) { DEBUG({ dbgs() << "Hoisting " << *MI; if (MI->getParent()->getBasicBlock()) - dbgs() << " from BB#" << MI->getParent()->getNumber(); + dbgs() << " from " << printMBBReference(*MI->getParent()); if (Preheader->getBasicBlock()) - dbgs() << " to BB#" << Preheader->getNumber(); + dbgs() << " to " << printMBBReference(*Preheader); dbgs() << "\n"; }); diff --git a/lib/CodeGen/MachineOperand.cpp b/lib/CodeGen/MachineOperand.cpp index 2dbf57fc16a..acd83dde455 100644 --- a/lib/CodeGen/MachineOperand.cpp +++ b/lib/CodeGen/MachineOperand.cpp @@ -428,7 +428,7 @@ void MachineOperand::print(raw_ostream &OS, ModuleSlotTracker &MST, } break; case MachineOperand::MO_MachineBasicBlock: - OS << "getNumber() << ">"; + OS << printMBBReference(*getMBB()); break; case MachineOperand::MO_FrameIndex: OS << "'; diff --git a/lib/CodeGen/MachineScheduler.cpp b/lib/CodeGen/MachineScheduler.cpp index 6aaacb479fe..3e796cc3add 100644 --- a/lib/CodeGen/MachineScheduler.cpp +++ b/lib/CodeGen/MachineScheduler.cpp @@ -98,7 +98,7 @@ static cl::opt MISchedCutoff("misched-cutoff", cl::Hidden, static cl::opt SchedOnlyFunc("misched-only-func", cl::Hidden, cl::desc("Only schedule this function")); static cl::opt SchedOnlyBlock("misched-only-block", cl::Hidden, - cl::desc("Only schedule this MBB#")); + cl::desc("Only schedule this MBB#")); #else static bool ViewMISchedDAGs = false; #endif // NDEBUG @@ -548,15 +548,14 @@ void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler, continue; } DEBUG(dbgs() << "********** MI Scheduling **********\n"); - DEBUG(dbgs() << MF->getName() - << ":BB#" << MBB->getNumber() << " " << MBB->getName() - << "\n From: " << *I << " To: "; + DEBUG(dbgs() << MF->getName() << ":" << printMBBReference(*MBB) << " " + << MBB->getName() << "\n From: " << *I << " To: "; if (RegionEnd != MBB->end()) dbgs() << *RegionEnd; else dbgs() << "End"; dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n'); if (DumpCriticalPathLength) { errs() << MF->getName(); - errs() << ":BB# " << MBB->getNumber(); + errs() << ":%bb. " << MBB->getNumber(); errs() << " " << MBB->getName() << " \n"; } @@ -823,11 +822,11 @@ void ScheduleDAGMI::schedule() { placeDebugValues(); DEBUG({ - unsigned BBNum = begin()->getParent()->getNumber(); - dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n"; - dumpSchedule(); - dbgs() << '\n'; - }); + dbgs() << "*** Final schedule for " + << printMBBReference(*begin()->getParent()) << " ***\n"; + dumpSchedule(); + dbgs() << '\n'; + }); } /// Apply each ScheduleDAGMutation step in order. @@ -1261,11 +1260,11 @@ void ScheduleDAGMILive::schedule() { placeDebugValues(); DEBUG({ - unsigned BBNum = begin()->getParent()->getNumber(); - dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n"; - dumpSchedule(); - dbgs() << '\n'; - }); + dbgs() << "*** Final schedule for " + << printMBBReference(*begin()->getParent()) << " ***\n"; + dumpSchedule(); + dbgs() << '\n'; + }); } /// Build the DAG and setup three register pressure trackers. diff --git a/lib/CodeGen/MachineSink.cpp b/lib/CodeGen/MachineSink.cpp index 11257d98e37..7857084c4e6 100644 --- a/lib/CodeGen/MachineSink.cpp +++ b/lib/CodeGen/MachineSink.cpp @@ -243,17 +243,17 @@ MachineSinking::AllUsesDominatedByBlock(unsigned Reg, // into and they are all PHI nodes. In this case, machine-sink must break // the critical edge first. e.g. // - // BB#1: derived from LLVM BB %bb4.preheader - // Predecessors according to CFG: BB#0 + // %bb.1: derived from LLVM BB %bb4.preheader + // Predecessors according to CFG: %bb.0 // ... // %reg16385 = DEC64_32r %reg16437, %eflags // ... - // JE_4 , %eflags - // Successors according to CFG: BB#37 BB#2 + // JE_4 <%bb.37>, %eflags + // Successors according to CFG: %bb.37 %bb.2 // - // BB#2: derived from LLVM BB %bb.nph - // Predecessors according to CFG: BB#0 BB#1 - // %reg16386 = PHI %reg16434, , %reg16385, + // %bb.2: derived from LLVM BB %bb.nph + // Predecessors according to CFG: %bb.0 %bb.1 + // %reg16386 = PHI %reg16434, %bb.0, %reg16385, %bb.1 BreakPHIEdge = true; for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) { MachineInstr *UseInst = MO.getParent(); @@ -321,10 +321,10 @@ bool MachineSinking::runOnMachineFunction(MachineFunction &MF) { for (auto &Pair : ToSplit) { auto NewSucc = Pair.first->SplitCriticalEdge(Pair.second, *this); if (NewSucc != nullptr) { - DEBUG(dbgs() << " *** Splitting critical edge:" - " BB#" << Pair.first->getNumber() - << " -- BB#" << NewSucc->getNumber() - << " -- BB#" << Pair.second->getNumber() << '\n'); + DEBUG(dbgs() << " *** Splitting critical edge: " + << printMBBReference(*Pair.first) << " -- " + << printMBBReference(*NewSucc) << " -- " + << printMBBReference(*Pair.second) << '\n'); MadeChange = true; ++NumSplit; } else @@ -460,33 +460,33 @@ bool MachineSinking::PostponeSplitCriticalEdge(MachineInstr &MI, // It's not always legal to break critical edges and sink the computation // to the edge. // - // BB#1: + // %bb.1: // v1024 - // Beq BB#3 + // Beq %bb.3 // - // BB#2: + // %bb.2: // ... no uses of v1024 // - // BB#3: + // %bb.3: // ... // = v1024 // - // If BB#1 -> BB#3 edge is broken and computation of v1024 is inserted: + // If %bb.1 -> %bb.3 edge is broken and computation of v1024 is inserted: // - // BB#1: + // %bb.1: // ... - // Bne BB#2 - // BB#4: + // Bne %bb.2 + // %bb.4: // v1024 = - // B BB#3 - // BB#2: + // B %bb.3 + // %bb.2: // ... no uses of v1024 // - // BB#3: + // %bb.3: // ... // = v1024 // - // This is incorrect since v1024 is not computed along the BB#1->BB#2->BB#3 + // This is incorrect since v1024 is not computed along the %bb.1->%bb.2->%bb.3 // flow. We need to ensure the new basic block where the computation is // sunk to dominates all the uses. // It's only legal to break critical edge and sink the computation to the diff --git a/lib/CodeGen/MachineTraceMetrics.cpp b/lib/CodeGen/MachineTraceMetrics.cpp index 453b47b71f7..d81c6f8a31e 100644 --- a/lib/CodeGen/MachineTraceMetrics.cpp +++ b/lib/CodeGen/MachineTraceMetrics.cpp @@ -396,7 +396,8 @@ MachineTraceMetrics::getEnsemble(MachineTraceMetrics::Strategy strategy) { } void MachineTraceMetrics::invalidate(const MachineBasicBlock *MBB) { - DEBUG(dbgs() << "Invalidate traces through BB#" << MBB->getNumber() << '\n'); + DEBUG(dbgs() << "Invalidate traces through " << printMBBReference(*MBB) + << '\n'); BlockInfo[MBB->getNumber()].invalidate(); for (unsigned i = 0; i != TS_NumStrategies; ++i) if (Ensembles[i]) @@ -476,8 +477,8 @@ public: /// Compute the trace through MBB. void MachineTraceMetrics::Ensemble::computeTrace(const MachineBasicBlock *MBB) { - DEBUG(dbgs() << "Computing " << getName() << " trace through BB#" - << MBB->getNumber() << '\n'); + DEBUG(dbgs() << "Computing " << getName() << " trace through " + << printMBBReference(*MBB) << '\n'); // Set up loop bounds for the backwards post-order traversal. LoopBounds Bounds(BlockInfo, MTM.Loops); @@ -485,13 +486,13 @@ void MachineTraceMetrics::Ensemble::computeTrace(const MachineBasicBlock *MBB) { Bounds.Downward = false; Bounds.Visited.clear(); for (auto I : inverse_post_order_ext(MBB, Bounds)) { - DEBUG(dbgs() << " pred for BB#" << I->getNumber() << ": "); + DEBUG(dbgs() << " pred for " << printMBBReference(*I) << ": "); TraceBlockInfo &TBI = BlockInfo[I->getNumber()]; // All the predecessors have been visited, pick the preferred one. TBI.Pred = pickTracePred(I); DEBUG({ if (TBI.Pred) - dbgs() << "BB#" << TBI.Pred->getNumber() << '\n'; + dbgs() << printMBBReference(*TBI.Pred) << '\n'; else dbgs() << "null\n"; }); @@ -503,13 +504,13 @@ void MachineTraceMetrics::Ensemble::computeTrace(const MachineBasicBlock *MBB) { Bounds.Downward = true; Bounds.Visited.clear(); for (auto I : post_order_ext(MBB, Bounds)) { - DEBUG(dbgs() << " succ for BB#" << I->getNumber() << ": "); + DEBUG(dbgs() << " succ for " << printMBBReference(*I) << ": "); TraceBlockInfo &TBI = BlockInfo[I->getNumber()]; // All the successors have been visited, pick the preferred one. TBI.Succ = pickTraceSucc(I); DEBUG({ if (TBI.Succ) - dbgs() << "BB#" << TBI.Succ->getNumber() << '\n'; + dbgs() << printMBBReference(*TBI.Succ) << '\n'; else dbgs() << "null\n"; }); @@ -530,8 +531,8 @@ MachineTraceMetrics::Ensemble::invalidate(const MachineBasicBlock *BadMBB) { WorkList.push_back(BadMBB); do { const MachineBasicBlock *MBB = WorkList.pop_back_val(); - DEBUG(dbgs() << "Invalidate BB#" << MBB->getNumber() << ' ' << getName() - << " height.\n"); + DEBUG(dbgs() << "Invalidate " << printMBBReference(*MBB) << ' ' + << getName() << " height.\n"); // Find any MBB predecessors that have MBB as their preferred successor. // They are the only ones that need to be invalidated. for (const MachineBasicBlock *Pred : MBB->predecessors()) { @@ -555,8 +556,8 @@ MachineTraceMetrics::Ensemble::invalidate(const MachineBasicBlock *BadMBB) { WorkList.push_back(BadMBB); do { const MachineBasicBlock *MBB = WorkList.pop_back_val(); - DEBUG(dbgs() << "Invalidate BB#" << MBB->getNumber() << ' ' << getName() - << " depth.\n"); + DEBUG(dbgs() << "Invalidate " << printMBBReference(*MBB) << ' ' + << getName() << " depth.\n"); // Find any MBB successors that have MBB as their preferred predecessor. // They are the only ones that need to be invalidated. for (const MachineBasicBlock *Succ : MBB->successors()) { @@ -859,7 +860,7 @@ computeInstrDepths(const MachineBasicBlock *MBB) { // Go through trace blocks in top-down order, stopping after the center block. while (!Stack.empty()) { MBB = Stack.pop_back_val(); - DEBUG(dbgs() << "\nDepths for BB#" << MBB->getNumber() << ":\n"); + DEBUG(dbgs() << "\nDepths for " << printMBBReference(*MBB) << ":\n"); TraceBlockInfo &TBI = BlockInfo[MBB->getNumber()]; TBI.HasValidInstrDepths = true; TBI.CriticalPath = 0; @@ -1044,7 +1045,7 @@ computeInstrHeights(const MachineBasicBlock *MBB) { SmallVector Deps; for (;!Stack.empty(); Stack.pop_back()) { MBB = Stack.back(); - DEBUG(dbgs() << "Heights for BB#" << MBB->getNumber() << ":\n"); + DEBUG(dbgs() << "Heights for " << printMBBReference(*MBB) << ":\n"); TraceBlockInfo &TBI = BlockInfo[MBB->getNumber()]; TBI.HasValidInstrHeights = true; TBI.CriticalPath = 0; @@ -1131,7 +1132,7 @@ computeInstrHeights(const MachineBasicBlock *MBB) { // Update virtual live-in heights. They were added by addLiveIns() with a 0 // height because the final height isn't known until now. - DEBUG(dbgs() << "BB#" << MBB->getNumber() << " Live-ins:"); + DEBUG(dbgs() << printMBBReference(*MBB) << " Live-ins:"); for (LiveInReg &LIR : TBI.LiveIns) { const MachineInstr *DefMI = MTM.MRI->getVRegDef(LIR.Reg); LIR.Height = Heights.lookup(DefMI); @@ -1289,7 +1290,7 @@ bool MachineTraceMetrics::Trace::isDepInTrace(const MachineInstr &DefMI, void MachineTraceMetrics::Ensemble::print(raw_ostream &OS) const { OS << getName() << " ensemble:\n"; for (unsigned i = 0, e = BlockInfo.size(); i != e; ++i) { - OS << " BB#" << i << '\t'; + OS << " %bb." << i << '\t'; BlockInfo[i].print(OS); OS << '\n'; } @@ -1299,10 +1300,10 @@ void MachineTraceMetrics::TraceBlockInfo::print(raw_ostream &OS) const { if (hasValidDepth()) { OS << "depth=" << InstrDepth; if (Pred) - OS << " pred=BB#" << Pred->getNumber(); + OS << " pred=" << printMBBReference(*Pred); else OS << " pred=null"; - OS << " head=BB#" << Head; + OS << " head=%bb." << Head; if (HasValidInstrDepths) OS << " +instrs"; } else @@ -1311,10 +1312,10 @@ void MachineTraceMetrics::TraceBlockInfo::print(raw_ostream &OS) const { if (hasValidHeight()) { OS << "height=" << InstrHeight; if (Succ) - OS << " succ=BB#" << Succ->getNumber(); + OS << " succ=" << printMBBReference(*Succ); else OS << " succ=null"; - OS << " tail=BB#" << Tail; + OS << " tail=%bb." << Tail; if (HasValidInstrHeights) OS << " +instrs"; } else @@ -1326,18 +1327,18 @@ void MachineTraceMetrics::TraceBlockInfo::print(raw_ostream &OS) const { void MachineTraceMetrics::Trace::print(raw_ostream &OS) const { unsigned MBBNum = &TBI - &TE.BlockInfo[0]; - OS << TE.getName() << " trace BB#" << TBI.Head << " --> BB#" << MBBNum - << " --> BB#" << TBI.Tail << ':'; + OS << TE.getName() << " trace %bb." << TBI.Head << " --> %bb." << MBBNum + << " --> %bb." << TBI.Tail << ':'; if (TBI.hasValidHeight() && TBI.hasValidDepth()) OS << ' ' << getInstrCount() << " instrs."; if (TBI.HasValidInstrDepths && TBI.HasValidInstrHeights) OS << ' ' << TBI.CriticalPath << " cycles."; const MachineTraceMetrics::TraceBlockInfo *Block = &TBI; - OS << "\nBB#" << MBBNum; + OS << "\n%bb." << MBBNum; while (Block->hasValidDepth() && Block->Pred) { unsigned Num = Block->Pred->getNumber(); - OS << " <- BB#" << Num; + OS << " <- " << printMBBReference(*Block->Pred); Block = &TE.BlockInfo[Num]; } @@ -1345,7 +1346,7 @@ void MachineTraceMetrics::Trace::print(raw_ostream &OS) const { OS << "\n "; while (Block->hasValidHeight() && Block->Succ) { unsigned Num = Block->Succ->getNumber(); - OS << " -> BB#" << Num; + OS << " -> " << printMBBReference(*Block->Succ); Block = &TE.BlockInfo[Num]; } OS << '\n'; diff --git a/lib/CodeGen/MachineVerifier.cpp b/lib/CodeGen/MachineVerifier.cpp index 2d138298a94..fd00a2d7501 100644 --- a/lib/CodeGen/MachineVerifier.cpp +++ b/lib/CodeGen/MachineVerifier.cpp @@ -471,9 +471,8 @@ void MachineVerifier::report(const char *msg, const MachineFunction *MF) { void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) { assert(MBB); report(msg, MBB->getParent()); - errs() << "- basic block: BB#" << MBB->getNumber() - << ' ' << MBB->getName() - << " (" << (const void*)MBB << ')'; + errs() << "- basic block: " << printMBBReference(*MBB) << ' ' + << MBB->getName() << " (" << (const void *)MBB << ')'; if (Indexes) errs() << " [" << Indexes->getMBBStartIdx(MBB) << ';' << Indexes->getMBBEndIdx(MBB) << ')'; @@ -619,8 +618,8 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) { report("MBB has successor that isn't part of the function.", MBB); if (!MBBInfoMap[*I].Preds.count(MBB)) { report("Inconsistent CFG", MBB); - errs() << "MBB is not in the predecessor list of the successor BB#" - << (*I)->getNumber() << ".\n"; + errs() << "MBB is not in the predecessor list of the successor " + << printMBBReference(*(*I)) << ".\n"; } } @@ -631,8 +630,8 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) { report("MBB has predecessor that isn't part of the function.", MBB); if (!MBBInfoMap[*I].Succs.count(MBB)) { report("Inconsistent CFG", MBB); - errs() << "MBB is not in the successor list of the predecessor BB#" - << (*I)->getNumber() << ".\n"; + errs() << "MBB is not in the successor list of the predecessor " + << printMBBReference(*(*I)) << ".\n"; } } @@ -1663,8 +1662,8 @@ void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) { for (MachineBasicBlock *Pred : MBB.predecessors()) { if (!seen.count(Pred)) { report("Missing PHI operand", &Phi); - errs() << "BB#" << Pred->getNumber() - << " is a predecessor according to the CFG.\n"; + errs() << printMBBReference(*Pred) + << " is a predecessor according to the CFG.\n"; } } } @@ -2038,8 +2037,8 @@ void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR, report("Register not marked live out of predecessor", *PI); report_context(LR, Reg, LaneMask); report_context(*VNI); - errs() << " live into BB#" << MFI->getNumber() - << '@' << LiveInts->getMBBStartIdx(&*MFI) << ", not live before " + errs() << " live into " << printMBBReference(*MFI) << '@' + << LiveInts->getMBBStartIdx(&*MFI) << ", not live before " << PEnd << '\n'; continue; } @@ -2048,9 +2047,9 @@ void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR, if (!IsPHI && PVNI != VNI) { report("Different value live out of predecessor", *PI); report_context(LR, Reg, LaneMask); - errs() << "Valno #" << PVNI->id << " live out of BB#" - << (*PI)->getNumber() << '@' << PEnd << "\nValno #" << VNI->id - << " live into BB#" << MFI->getNumber() << '@' + errs() << "Valno #" << PVNI->id << " live out of " + << printMBBReference(*(*PI)) << '@' << PEnd << "\nValno #" + << VNI->id << " live into " << printMBBReference(*MFI) << '@' << LiveInts->getMBBStartIdx(&*MFI) << '\n'; } } @@ -2201,11 +2200,11 @@ void MachineVerifier::verifyStackFrame() { (SPState[(*I)->getNumber()].ExitValue != BBState.EntryValue || SPState[(*I)->getNumber()].ExitIsSetup != BBState.EntryIsSetup)) { report("The exit stack state of a predecessor is inconsistent.", MBB); - errs() << "Predecessor BB#" << (*I)->getNumber() << " has exit state (" - << SPState[(*I)->getNumber()].ExitValue << ", " - << SPState[(*I)->getNumber()].ExitIsSetup - << "), while BB#" << MBB->getNumber() << " has entry state (" - << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n"; + errs() << "Predecessor " << printMBBReference(*(*I)) + << " has exit state (" << SPState[(*I)->getNumber()].ExitValue + << ", " << SPState[(*I)->getNumber()].ExitIsSetup << "), while " + << printMBBReference(*MBB) << " has entry state (" + << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n"; } } @@ -2217,11 +2216,11 @@ void MachineVerifier::verifyStackFrame() { (SPState[(*I)->getNumber()].EntryValue != BBState.ExitValue || SPState[(*I)->getNumber()].EntryIsSetup != BBState.ExitIsSetup)) { report("The entry stack state of a successor is inconsistent.", MBB); - errs() << "Successor BB#" << (*I)->getNumber() << " has entry state (" - << SPState[(*I)->getNumber()].EntryValue << ", " - << SPState[(*I)->getNumber()].EntryIsSetup - << "), while BB#" << MBB->getNumber() << " has exit state (" - << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n"; + errs() << "Successor " << printMBBReference(*(*I)) + << " has entry state (" << SPState[(*I)->getNumber()].EntryValue + << ", " << SPState[(*I)->getNumber()].EntryIsSetup << "), while " + << printMBBReference(*MBB) << " has exit state (" + << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n"; } } diff --git a/lib/CodeGen/PHIElimination.cpp b/lib/CodeGen/PHIElimination.cpp index 864d6d547ca..82bbe1528c8 100644 --- a/lib/CodeGen/PHIElimination.cpp +++ b/lib/CodeGen/PHIElimination.cpp @@ -593,9 +593,9 @@ bool PHIElimination::SplitPHIEdges(MachineFunction &MF, if (!ShouldSplit && !NoPhiElimLiveOutEarlyExit) continue; if (ShouldSplit) { - DEBUG(dbgs() << printReg(Reg) << " live-out before critical edge BB#" - << PreMBB->getNumber() << " -> BB#" << MBB.getNumber() - << ": " << *BBI); + DEBUG(dbgs() << printReg(Reg) << " live-out before critical edge " + << printMBBReference(*PreMBB) << " -> " + << printMBBReference(MBB) << ": " << *BBI); } // If Reg is not live-in to MBB, it means it must be live-in to some diff --git a/lib/CodeGen/PostRASchedulerList.cpp b/lib/CodeGen/PostRASchedulerList.cpp index 673dc37904f..81dd6ef2444 100644 --- a/lib/CodeGen/PostRASchedulerList.cpp +++ b/lib/CodeGen/PostRASchedulerList.cpp @@ -322,8 +322,8 @@ bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) { static int bbcnt = 0; if (bbcnt++ % DebugDiv != DebugMod) continue; - dbgs() << "*** DEBUG scheduling " << Fn.getName() - << ":BB#" << MBB.getNumber() << " ***\n"; + dbgs() << "*** DEBUG scheduling " << Fn.getName() << ":" + << printMBBReference(MBB) << " ***\n"; } #endif diff --git a/lib/CodeGen/ProcessImplicitDefs.cpp b/lib/CodeGen/ProcessImplicitDefs.cpp index 7fbf7ddde0b..48b48c5f649 100644 --- a/lib/CodeGen/ProcessImplicitDefs.cpp +++ b/lib/CodeGen/ProcessImplicitDefs.cpp @@ -154,7 +154,7 @@ bool ProcessImplicitDefs::runOnMachineFunction(MachineFunction &MF) { if (WorkList.empty()) continue; - DEBUG(dbgs() << "BB#" << MFI->getNumber() << " has " << WorkList.size() + DEBUG(dbgs() << printMBBReference(*MFI) << " has " << WorkList.size() << " implicit defs.\n"); Changed = true; diff --git a/lib/CodeGen/RegAllocGreedy.cpp b/lib/CodeGen/RegAllocGreedy.cpp index 131cd5a17ef..7aa998bc07f 100644 --- a/lib/CodeGen/RegAllocGreedy.cpp +++ b/lib/CodeGen/RegAllocGreedy.cpp @@ -1612,7 +1612,7 @@ void RAGreedy::splitAroundRegion(LiveRangeEdit &LREdit, // Create separate intervals for isolated blocks with multiple uses. if (!IntvIn && !IntvOut) { - DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " isolated.\n"); + DEBUG(dbgs() << printMBBReference(*BI.MBB) << " isolated.\n"); if (SA->shouldSplitSingleBlock(BI, SingleInstrs)) SE->splitSingleBlock(BI); continue; diff --git a/lib/CodeGen/RegisterCoalescer.cpp b/lib/CodeGen/RegisterCoalescer.cpp index 2bbefa97132..09875d336fd 100644 --- a/lib/CodeGen/RegisterCoalescer.cpp +++ b/lib/CodeGen/RegisterCoalescer.cpp @@ -991,8 +991,8 @@ bool RegisterCoalescer::removePartialRedundancy(const CoalescerPair &CP, // Now ok to move copy. if (CopyLeftBB) { - DEBUG(dbgs() << "\tremovePartialRedundancy: Move the copy to BB#" - << CopyLeftBB->getNumber() << '\t' << CopyMI); + DEBUG(dbgs() << "\tremovePartialRedundancy: Move the copy to " + << printMBBReference(*CopyLeftBB) << '\t' << CopyMI); // Insert new copy to CopyLeftBB. auto InsPos = CopyLeftBB->getFirstTerminator(); @@ -1010,8 +1010,8 @@ bool RegisterCoalescer::removePartialRedundancy(const CoalescerPair &CP, // the deleted list. ErasedInstrs.erase(NewCopyMI); } else { - DEBUG(dbgs() << "\tremovePartialRedundancy: Remove the copy from BB#" - << MBB.getNumber() << '\t' << CopyMI); + DEBUG(dbgs() << "\tremovePartialRedundancy: Remove the copy from " + << printMBBReference(MBB) << '\t' << CopyMI); } // Remove CopyMI. @@ -2376,7 +2376,7 @@ JoinVals::analyzeValue(unsigned ValNo, JoinVals &Other) { if (OtherV.ErasableImplicitDef && DefMI && DefMI->getParent() != Indexes->getMBBFromIndex(V.OtherVNI->def)) { DEBUG(dbgs() << "IMPLICIT_DEF defined at " << V.OtherVNI->def - << " extends into BB#" << DefMI->getParent()->getNumber() + << " extends into " << printMBBReference(*DefMI->getParent()) << ", keeping it.\n"); OtherV.ErasableImplicitDef = false; } diff --git a/lib/CodeGen/ScheduleDAGInstrs.cpp b/lib/CodeGen/ScheduleDAGInstrs.cpp index b1a48514910..4b45edcf0de 100644 --- a/lib/CodeGen/ScheduleDAGInstrs.cpp +++ b/lib/CodeGen/ScheduleDAGInstrs.cpp @@ -1043,7 +1043,7 @@ static void toggleKills(const MachineRegisterInfo &MRI, LivePhysRegs &LiveRegs, } void ScheduleDAGInstrs::fixupKills(MachineBasicBlock &MBB) { - DEBUG(dbgs() << "Fixup kills for BB#" << MBB.getNumber() << '\n'); + DEBUG(dbgs() << "Fixup kills for " << printMBBReference(MBB) << '\n'); LiveRegs.init(*TRI); LiveRegs.addLiveOuts(MBB); diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp index a83f4eff383..49f304c8cc8 100644 --- a/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp +++ b/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp @@ -346,9 +346,8 @@ static void GetCostForDef(const ScheduleDAGSDNodes::RegDefIter &RegDefPos, /// Schedule - Schedule the DAG using list scheduling. void ScheduleDAGRRList::Schedule() { - DEBUG(dbgs() - << "********** List Scheduling BB#" << BB->getNumber() - << " '" << BB->getName() << "' **********\n"); + DEBUG(dbgs() << "********** List Scheduling " << printMBBReference(*BB) + << " '" << BB->getName() << "' **********\n"); CurCycle = 0; IssueCount = 0; diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp index 54c1531a018..07b46b9183a 100644 --- a/lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp +++ b/lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp @@ -93,9 +93,8 @@ private: /// Schedule - Schedule the DAG using list scheduling. void ScheduleDAGVLIW::Schedule() { - DEBUG(dbgs() - << "********** List Scheduling BB#" << BB->getNumber() - << " '" << BB->getName() << "' **********\n"); + DEBUG(dbgs() << "********** List Scheduling " << printMBBReference(*BB) + << " '" << BB->getName() << "' **********\n"); // Build the scheduling graph. BuildSchedGraph(AA); diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp index cb37137d547..8f47c5b40ba 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -730,8 +730,9 @@ void SelectionDAGISel::CodeGenAndEmitDAG() { BlockName = (MF->getName() + ":" + FuncInfo->MBB->getBasicBlock()->getName()).str(); } - DEBUG(dbgs() << "Initial selection DAG: BB#" << BlockNumber - << " '" << BlockName << "'\n"; CurDAG->dump()); + DEBUG(dbgs() << "Initial selection DAG: " << printMBBReference(*FuncInfo->MBB) + << " '" << BlockName << "'\n"; + CurDAG->dump()); if (ViewDAGCombine1 && MatchFilterBB) CurDAG->viewGraph("dag-combine1 input for " + BlockName); @@ -743,8 +744,10 @@ void SelectionDAGISel::CodeGenAndEmitDAG() { CurDAG->Combine(BeforeLegalizeTypes, AA, OptLevel); } - DEBUG(dbgs() << "Optimized lowered selection DAG: BB#" << BlockNumber - << " '" << BlockName << "'\n"; CurDAG->dump()); + DEBUG(dbgs() << "Optimized lowered selection DAG: " + << printMBBReference(*FuncInfo->MBB) << " '" << BlockName + << "'\n"; + CurDAG->dump()); // Second step, hack on the DAG until it only uses operations and types that // the target supports. @@ -758,8 +761,10 @@ void SelectionDAGISel::CodeGenAndEmitDAG() { Changed = CurDAG->LegalizeTypes(); } - DEBUG(dbgs() << "Type-legalized selection DAG: BB#" << BlockNumber - << " '" << BlockName << "'\n"; CurDAG->dump()); + DEBUG(dbgs() << "Type-legalized selection DAG: " + << printMBBReference(*FuncInfo->MBB) << " '" << BlockName + << "'\n"; + CurDAG->dump()); // Only allow creation of legal node types. CurDAG->NewNodesMustHaveLegalTypes = true; @@ -775,8 +780,10 @@ void SelectionDAGISel::CodeGenAndEmitDAG() { CurDAG->Combine(AfterLegalizeTypes, AA, OptLevel); } - DEBUG(dbgs() << "Optimized type-legalized selection DAG: BB#" << BlockNumber - << " '" << BlockName << "'\n"; CurDAG->dump()); + DEBUG(dbgs() << "Optimized type-legalized selection DAG: " + << printMBBReference(*FuncInfo->MBB) << " '" << BlockName + << "'\n"; + CurDAG->dump()); } { @@ -786,8 +793,10 @@ void SelectionDAGISel::CodeGenAndEmitDAG() { } if (Changed) { - DEBUG(dbgs() << "Vector-legalized selection DAG: BB#" << BlockNumber - << " '" << BlockName << "'\n"; CurDAG->dump()); + DEBUG(dbgs() << "Vector-legalized selection DAG: " + << printMBBReference(*FuncInfo->MBB) << " '" << BlockName + << "'\n"; + CurDAG->dump()); { NamedRegionTimer T("legalize_types2", "Type Legalization 2", GroupName, @@ -795,8 +804,10 @@ void SelectionDAGISel::CodeGenAndEmitDAG() { CurDAG->LegalizeTypes(); } - DEBUG(dbgs() << "Vector/type-legalized selection DAG: BB#" << BlockNumber - << " '" << BlockName << "'\n"; CurDAG->dump()); + DEBUG(dbgs() << "Vector/type-legalized selection DAG: " + << printMBBReference(*FuncInfo->MBB) << " '" << BlockName + << "'\n"; + CurDAG->dump()); if (ViewDAGCombineLT && MatchFilterBB) CurDAG->viewGraph("dag-combine-lv input for " + BlockName); @@ -808,8 +819,10 @@ void SelectionDAGISel::CodeGenAndEmitDAG() { CurDAG->Combine(AfterLegalizeVectorOps, AA, OptLevel); } - DEBUG(dbgs() << "Optimized vector-legalized selection DAG: BB#" - << BlockNumber << " '" << BlockName << "'\n"; CurDAG->dump()); + DEBUG(dbgs() << "Optimized vector-legalized selection DAG: " + << printMBBReference(*FuncInfo->MBB) << " '" << BlockName + << "'\n"; + CurDAG->dump()); } if (ViewLegalizeDAGs && MatchFilterBB) @@ -821,8 +834,10 @@ void SelectionDAGISel::CodeGenAndEmitDAG() { CurDAG->Legalize(); } - DEBUG(dbgs() << "Legalized selection DAG: BB#" << BlockNumber - << " '" << BlockName << "'\n"; CurDAG->dump()); + DEBUG(dbgs() << "Legalized selection DAG: " + << printMBBReference(*FuncInfo->MBB) << " '" << BlockName + << "'\n"; + CurDAG->dump()); if (ViewDAGCombine2 && MatchFilterBB) CurDAG->viewGraph("dag-combine2 input for " + BlockName); @@ -834,8 +849,10 @@ void SelectionDAGISel::CodeGenAndEmitDAG() { CurDAG->Combine(AfterLegalizeDAG, AA, OptLevel); } - DEBUG(dbgs() << "Optimized legalized selection DAG: BB#" << BlockNumber - << " '" << BlockName << "'\n"; CurDAG->dump()); + DEBUG(dbgs() << "Optimized legalized selection DAG: " + << printMBBReference(*FuncInfo->MBB) << " '" << BlockName + << "'\n"; + CurDAG->dump()); if (OptLevel != CodeGenOpt::None) ComputeLiveOutVRegInfo(); @@ -851,8 +868,10 @@ void SelectionDAGISel::CodeGenAndEmitDAG() { DoInstructionSelection(); } - DEBUG(dbgs() << "Selected selection DAG: BB#" << BlockNumber - << " '" << BlockName << "'\n"; CurDAG->dump()); + DEBUG(dbgs() << "Selected selection DAG: " + << printMBBReference(*FuncInfo->MBB) << " '" << BlockName + << "'\n"; + CurDAG->dump()); if (ViewSchedDAGs && MatchFilterBB) CurDAG->viewGraph("scheduler input for " + BlockName); @@ -919,9 +938,9 @@ public: } // end anonymous namespace void SelectionDAGISel::DoInstructionSelection() { - DEBUG(dbgs() << "===== Instruction selection begins: BB#" - << FuncInfo->MBB->getNumber() - << " '" << FuncInfo->MBB->getName() << "'\n"); + DEBUG(dbgs() << "===== Instruction selection begins: " + << printMBBReference(*FuncInfo->MBB) << " '" + << FuncInfo->MBB->getName() << "'\n"); PreprocessISelDAG(); diff --git a/lib/CodeGen/SlotIndexes.cpp b/lib/CodeGen/SlotIndexes.cpp index 25a1c37b145..184329a659c 100644 --- a/lib/CodeGen/SlotIndexes.cpp +++ b/lib/CodeGen/SlotIndexes.cpp @@ -264,7 +264,7 @@ LLVM_DUMP_METHOD void SlotIndexes::dump() const { } for (unsigned i = 0, e = MBBRanges.size(); i != e; ++i) - dbgs() << "BB#" << i << "\t[" << MBBRanges[i].first << ';' + dbgs() << "%bb." << i << "\t[" << MBBRanges[i].first << ';' << MBBRanges[i].second << ")\n"; } #endif diff --git a/lib/CodeGen/SplitKit.cpp b/lib/CodeGen/SplitKit.cpp index 49f31333acf..fc85ea3d166 100644 --- a/lib/CodeGen/SplitKit.cpp +++ b/lib/CodeGen/SplitKit.cpp @@ -729,7 +729,8 @@ SlotIndex SplitEditor::enterIntvAtEnd(MachineBasicBlock &MBB) { assert(OpenIdx && "openIntv not called before enterIntvAtEnd"); SlotIndex End = LIS.getMBBEndIdx(&MBB); SlotIndex Last = End.getPrevSlot(); - DEBUG(dbgs() << " enterIntvAtEnd BB#" << MBB.getNumber() << ", " << Last); + DEBUG(dbgs() << " enterIntvAtEnd " << printMBBReference(MBB) << ", " + << Last); VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Last); if (!ParentVNI) { DEBUG(dbgs() << ": not live\n"); @@ -808,7 +809,8 @@ SlotIndex SplitEditor::leaveIntvBefore(SlotIndex Idx) { SlotIndex SplitEditor::leaveIntvAtTop(MachineBasicBlock &MBB) { assert(OpenIdx && "openIntv not called before leaveIntvAtTop"); SlotIndex Start = LIS.getMBBStartIdx(&MBB); - DEBUG(dbgs() << " leaveIntvAtTop BB#" << MBB.getNumber() << ", " << Start); + DEBUG(dbgs() << " leaveIntvAtTop " << printMBBReference(MBB) << ", " + << Start); VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Start); if (!ParentVNI) { @@ -906,15 +908,15 @@ SplitEditor::findShallowDominator(MachineBasicBlock *MBB, // MBB isn't in a loop, it doesn't get any better. All dominators have a // higher frequency by definition. if (!Loop) { - DEBUG(dbgs() << "Def in BB#" << DefMBB->getNumber() << " dominates BB#" - << MBB->getNumber() << " at depth 0\n"); + DEBUG(dbgs() << "Def in " << printMBBReference(*DefMBB) << " dominates " + << printMBBReference(*MBB) << " at depth 0\n"); return MBB; } // We'll never be able to exit the DefLoop. if (Loop == DefLoop) { - DEBUG(dbgs() << "Def in BB#" << DefMBB->getNumber() << " dominates BB#" - << MBB->getNumber() << " in the same loop\n"); + DEBUG(dbgs() << "Def in " << printMBBReference(*DefMBB) << " dominates " + << printMBBReference(*MBB) << " in the same loop\n"); return MBB; } @@ -923,8 +925,8 @@ SplitEditor::findShallowDominator(MachineBasicBlock *MBB, if (Depth < BestDepth) { BestMBB = MBB; BestDepth = Depth; - DEBUG(dbgs() << "Def in BB#" << DefMBB->getNumber() << " dominates BB#" - << MBB->getNumber() << " at depth " << Depth << '\n'); + DEBUG(dbgs() << "Def in " << printMBBReference(*DefMBB) << " dominates " + << printMBBReference(*MBB) << " at depth " << Depth << '\n'); } // Leave loop by going to the immediate dominator of the loop header. @@ -1063,7 +1065,7 @@ void SplitEditor::hoistCopies() { DEBUG(dbgs() << "Multi-mapped complement " << VNI->id << '@' << VNI->def << " for parent " << ParentVNI->id << '@' << ParentVNI->def - << " hoist to BB#" << Dom.first->getNumber() << ' ' + << " hoist to " << printMBBReference(*Dom.first) << ' ' << Dom.second << '\n'); } @@ -1173,7 +1175,7 @@ bool SplitEditor::transferValues() { if (Start != BlockStart) { VNInfo *VNI = LI.extendInBlock(BlockStart, std::min(BlockEnd, End)); assert(VNI && "Missing def for complex mapped value"); - DEBUG(dbgs() << ':' << VNI->id << "*BB#" << MBB->getNumber()); + DEBUG(dbgs() << ':' << VNI->id << "*" << printMBBReference(*MBB)); // MBB has its own def. Is it also live-out? if (BlockEnd <= End) LRC.setLiveOutValue(&*MBB, VNI); @@ -1186,7 +1188,7 @@ bool SplitEditor::transferValues() { // Handle the live-in blocks covered by [Start;End). assert(Start <= BlockStart && "Expected live-in block"); while (BlockStart < End) { - DEBUG(dbgs() << ">BB#" << MBB->getNumber()); + DEBUG(dbgs() << ">" << printMBBReference(*MBB)); BlockEnd = LIS.getMBBEndIdx(&*MBB); if (BlockStart == ParentVNI->def) { // This block has the def of a parent PHI, so it isn't live-in. @@ -1329,7 +1331,7 @@ void SplitEditor::rewriteAssigned(bool ExtendRanges) { unsigned RegIdx = RegAssign.lookup(Idx); LiveInterval &LI = LIS.getInterval(Edit->get(RegIdx)); MO.setReg(LI.reg); - DEBUG(dbgs() << " rewr BB#" << MI->getParent()->getNumber() << '\t' + DEBUG(dbgs() << " rewr " << printMBBReference(*MI->getParent()) << '\t' << Idx << ':' << RegIdx << '\t' << *MI); // Extend liveness to Idx if the instruction reads reg. @@ -1563,9 +1565,9 @@ void SplitEditor::splitLiveThroughBlock(unsigned MBBNum, SlotIndex Start, Stop; std::tie(Start, Stop) = LIS.getSlotIndexes()->getMBBRange(MBBNum); - DEBUG(dbgs() << "BB#" << MBBNum << " [" << Start << ';' << Stop - << ") intf " << LeaveBefore << '-' << EnterAfter - << ", live-through " << IntvIn << " -> " << IntvOut); + DEBUG(dbgs() << "%bb." << MBBNum << " [" << Start << ';' << Stop << ") intf " + << LeaveBefore << '-' << EnterAfter << ", live-through " + << IntvIn << " -> " << IntvOut); assert((IntvIn || IntvOut) && "Use splitSingleBlock for isolated blocks"); @@ -1665,7 +1667,7 @@ void SplitEditor::splitRegInBlock(const SplitAnalysis::BlockInfo &BI, SlotIndex Start, Stop; std::tie(Start, Stop) = LIS.getSlotIndexes()->getMBBRange(BI.MBB); - DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " [" << Start << ';' << Stop + DEBUG(dbgs() << printMBBReference(*BI.MBB) << " [" << Start << ';' << Stop << "), uses " << BI.FirstInstr << '-' << BI.LastInstr << ", reg-in " << IntvIn << ", leave before " << LeaveBefore << (BI.LiveOut ? ", stack-out" : ", killed in block")); @@ -1757,7 +1759,7 @@ void SplitEditor::splitRegOutBlock(const SplitAnalysis::BlockInfo &BI, SlotIndex Start, Stop; std::tie(Start, Stop) = LIS.getSlotIndexes()->getMBBRange(BI.MBB); - DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " [" << Start << ';' << Stop + DEBUG(dbgs() << printMBBReference(*BI.MBB) << " [" << Start << ';' << Stop << "), uses " << BI.FirstInstr << '-' << BI.LastInstr << ", reg-out " << IntvOut << ", enter after " << EnterAfter << (BI.LiveIn ? ", stack-in" : ", defined in block")); diff --git a/lib/CodeGen/StackColoring.cpp b/lib/CodeGen/StackColoring.cpp index 0a7be1d12fa..18bba42907b 100644 --- a/lib/CodeGen/StackColoring.cpp +++ b/lib/CodeGen/StackColoring.cpp @@ -739,7 +739,7 @@ unsigned StackColoring::collectMarkers(unsigned NumSlot) { } else { for (auto Slot : slots) { DEBUG(dbgs() << "Found a use of slot #" << Slot); - DEBUG(dbgs() << " at BB#" << MBB->getNumber() << " index "); + DEBUG(dbgs() << " at " << printMBBReference(*MBB) << " index "); DEBUG(Indexes->getInstructionIndex(MI).print(dbgs())); const AllocaInst *Allocation = MFI->getObjectAllocation(Slot); if (Allocation) { diff --git a/lib/CodeGen/TailDuplicator.cpp b/lib/CodeGen/TailDuplicator.cpp index 7adf9b037b5..63eb6cc651b 100644 --- a/lib/CodeGen/TailDuplicator.cpp +++ b/lib/CodeGen/TailDuplicator.cpp @@ -111,9 +111,10 @@ static void VerifyPHIs(MachineFunction &MF, bool CheckExtra) { } } if (!Found) { - dbgs() << "Malformed PHI in BB#" << MBB->getNumber() << ": " << *MI; - dbgs() << " missing input from predecessor BB#" - << PredBB->getNumber() << '\n'; + dbgs() << "Malformed PHI in " << printMBBReference(*MBB) << ": " + << *MI; + dbgs() << " missing input from predecessor " + << printMBBReference(*PredBB) << '\n'; llvm_unreachable(nullptr); } } @@ -121,15 +122,16 @@ static void VerifyPHIs(MachineFunction &MF, bool CheckExtra) { for (unsigned i = 1, e = MI->getNumOperands(); i != e; i += 2) { MachineBasicBlock *PHIBB = MI->getOperand(i + 1).getMBB(); if (CheckExtra && !Preds.count(PHIBB)) { - dbgs() << "Warning: malformed PHI in BB#" << MBB->getNumber() << ": " - << *MI; - dbgs() << " extra input from predecessor BB#" << PHIBB->getNumber() - << '\n'; + dbgs() << "Warning: malformed PHI in " << printMBBReference(*MBB) + << ": " << *MI; + dbgs() << " extra input from predecessor " + << printMBBReference(*PHIBB) << '\n'; llvm_unreachable(nullptr); } if (PHIBB->getNumber() < 0) { - dbgs() << "Malformed PHI in BB#" << MBB->getNumber() << ": " << *MI; - dbgs() << " non-existing BB#" << PHIBB->getNumber() << '\n'; + dbgs() << "Malformed PHI in " << printMBBReference(*MBB) << ": " + << *MI; + dbgs() << " non-existing " << printMBBReference(*PHIBB) << '\n'; llvm_unreachable(nullptr); } } @@ -783,7 +785,8 @@ bool TailDuplicator::tailDuplicate(bool IsSimple, MachineBasicBlock *TailBB, MachineBasicBlock *ForcedLayoutPred, SmallVectorImpl &TDBBs, SmallVectorImpl &Copies) { - DEBUG(dbgs() << "\n*** Tail-duplicating BB#" << TailBB->getNumber() << '\n'); + DEBUG(dbgs() << "\n*** Tail-duplicating " << printMBBReference(*TailBB) + << '\n'); DenseSet UsedByPhi; getRegsUsedByPHIs(*TailBB, &UsedByPhi); diff --git a/lib/Target/AArch64/AArch64ConditionOptimizer.cpp b/lib/Target/AArch64/AArch64ConditionOptimizer.cpp index d1bcd3dcaec..f765825cdee 100644 --- a/lib/Target/AArch64/AArch64ConditionOptimizer.cpp +++ b/lib/Target/AArch64/AArch64ConditionOptimizer.cpp @@ -207,7 +207,7 @@ MachineInstr *AArch64ConditionOptimizer::findSuitableCompare( return nullptr; } } - DEBUG(dbgs() << "Flags not defined in BB#" << MBB->getNumber() << '\n'); + DEBUG(dbgs() << "Flags not defined in " << printMBBReference(*MBB) << '\n'); return nullptr; } diff --git a/lib/Target/AArch64/AArch64ConditionalCompares.cpp b/lib/Target/AArch64/AArch64ConditionalCompares.cpp index 668d21d0b16..f7c97117ba5 100644 --- a/lib/Target/AArch64/AArch64ConditionalCompares.cpp +++ b/lib/Target/AArch64/AArch64ConditionalCompares.cpp @@ -369,7 +369,7 @@ MachineInstr *SSACCmpConv::findConvertibleCompare(MachineBasicBlock *MBB) { return nullptr; } } - DEBUG(dbgs() << "Flags not defined in BB#" << MBB->getNumber() << '\n'); + DEBUG(dbgs() << "Flags not defined in " << printMBBReference(*MBB) << '\n'); return nullptr; } @@ -383,7 +383,7 @@ bool SSACCmpConv::canSpeculateInstrs(MachineBasicBlock *MBB, // Reject any live-in physregs. It's probably NZCV/EFLAGS, and very hard to // get right. if (!MBB->livein_empty()) { - DEBUG(dbgs() << "BB#" << MBB->getNumber() << " has live-ins.\n"); + DEBUG(dbgs() << printMBBReference(*MBB) << " has live-ins.\n"); return false; } @@ -396,7 +396,7 @@ bool SSACCmpConv::canSpeculateInstrs(MachineBasicBlock *MBB, continue; if (++InstrCount > BlockInstrLimit && !Stress) { - DEBUG(dbgs() << "BB#" << MBB->getNumber() << " has more than " + DEBUG(dbgs() << printMBBReference(*MBB) << " has more than " << BlockInstrLimit << " instructions.\n"); return false; } @@ -458,8 +458,9 @@ bool SSACCmpConv::canConvert(MachineBasicBlock *MBB) { return false; // The CFG topology checks out. - DEBUG(dbgs() << "\nTriangle: BB#" << Head->getNumber() << " -> BB#" - << CmpBB->getNumber() << " -> BB#" << Tail->getNumber() << '\n'); + DEBUG(dbgs() << "\nTriangle: " << printMBBReference(*Head) << " -> " + << printMBBReference(*CmpBB) << " -> " + << printMBBReference(*Tail) << '\n'); ++NumConsidered; // Tail is allowed to have many predecessors, but we can't handle PHIs yet. @@ -562,8 +563,9 @@ bool SSACCmpConv::canConvert(MachineBasicBlock *MBB) { } void SSACCmpConv::convert(SmallVectorImpl &RemovedBlocks) { - DEBUG(dbgs() << "Merging BB#" << CmpBB->getNumber() << " into BB#" - << Head->getNumber() << ":\n" << *CmpBB); + DEBUG(dbgs() << "Merging " << printMBBReference(*CmpBB) << " into " + << printMBBReference(*Head) << ":\n" + << *CmpBB); // All CmpBB instructions are moved into Head, and CmpBB is deleted. // Update the CFG first. diff --git a/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp b/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp index ec98980fa0b..98480835376 100644 --- a/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp +++ b/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp @@ -12,9 +12,9 @@ // 1. For BBs that are targets of CBZ/CBNZ instructions, we know the value of // the CBZ/CBNZ source register is zero on the taken/not-taken path. For // instance, the copy instruction in the code below can be removed because -// the CBZW jumps to BB#2 when w0 is zero. +// the CBZW jumps to %bb.2 when w0 is zero. // -// BB#1: +// %bb.1: // cbz w0, .LBB0_2 // .LBB0_2: // mov w0, wzr ; <-- redundant @@ -22,11 +22,11 @@ // 2. If the flag setting instruction defines a register other than WZR/XZR, we // can remove a zero copy in some cases. // -// BB#0: +// %bb.0: // subs w0, w1, w2 // str w0, [x1] // b.ne .LBB0_2 -// BB#1: +// %bb.1: // mov w0, wzr ; <-- redundant // str w0, [x2] // .LBB0_2 @@ -35,7 +35,7 @@ // constant (i.e., ADDS[W|X]ri, SUBS[W|X]ri), we can remove a mov immediate // in some cases. // -// BB#0: +// %bb.0: // subs xzr, x0, #1 // b.eq .LBB0_1 // .LBB0_1: diff --git a/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp b/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp index 879f65e1228..5ff82c5d1e0 100644 --- a/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp +++ b/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp @@ -270,8 +270,8 @@ LLVM_DUMP_METHOD void PHILinearize::dump(MachineRegisterInfo *MRI) { dbgs() << "Dest: " << printReg(Element.DestReg, TRI) << " Sources: {"; for (auto &SI : Element.Sources) { - dbgs() << printReg(SI.first, TRI) << "(BB#" - << SI.second->getNumber() << "),"; + dbgs() << printReg(SI.first, TRI) << '(' << printMBBReference(*SI.second) + << "),"; } dbgs() << "}\n"; } @@ -658,7 +658,7 @@ RegionMRT *MRT::buildMRT(MachineFunction &MF, continue; } - DEBUG(dbgs() << "Visiting BB#" << MBB->getNumber() << "\n"); + DEBUG(dbgs() << "Visiting " << printMBBReference(*MBB) << "\n"); MBBMRT *NewMBB = new MBBMRT(MBB); MachineRegion *Region = RegionInfo->getRegionFor(MBB); @@ -705,7 +705,7 @@ void LinearizedRegion::storeLiveOutReg(MachineBasicBlock *MBB, unsigned Reg, // If this is live out of the MBB for (auto &UI : MRI->use_operands(Reg)) { if (UI.getParent()->getParent() != MBB) { - DEBUG(dbgs() << "Add LiveOut (MBB BB#" << MBB->getNumber() + DEBUG(dbgs() << "Add LiveOut (MBB " << printMBBReference(*MBB) << "): " << printReg(Reg, TRI) << "\n"); addLiveOut(Reg); } else { @@ -749,7 +749,8 @@ void LinearizedRegion::storeLiveOuts(MachineBasicBlock *MBB, const MachineRegisterInfo *MRI, const TargetRegisterInfo *TRI, PHILinearize &PHIInfo) { - DEBUG(dbgs() << "-Store Live Outs Begin (BB#" << MBB->getNumber() << ")-\n"); + DEBUG(dbgs() << "-Store Live Outs Begin (" << printMBBReference(*MBB) + << ")-\n"); for (auto &II : *MBB) { for (auto &RI : II.defs()) { storeLiveOutReg(MBB, RI.getReg(), RI.getParent(), MRI, TRI, PHIInfo); @@ -773,8 +774,8 @@ void LinearizedRegion::storeLiveOuts(MachineBasicBlock *MBB, for (int i = 0; i < numPreds; ++i) { if (getPHIPred(PHI, i) == MBB) { unsigned PHIReg = getPHISourceReg(PHI, i); - DEBUG(dbgs() << "Add LiveOut (PhiSource BB#" << MBB->getNumber() - << " -> BB#" << (*SI)->getNumber() + DEBUG(dbgs() << "Add LiveOut (PhiSource " << printMBBReference(*MBB) + << " -> " << printMBBReference(*(*SI)) << "): " << printReg(PHIReg, TRI) << "\n"); addLiveOut(PHIReg); } @@ -1480,8 +1481,8 @@ bool AMDGPUMachineCFGStructurizer::shrinkPHI(MachineInstr &PHI, if (SourceMBB) { MIB.addReg(CombinedSourceReg); MIB.addMBB(SourceMBB); - DEBUG(dbgs() << printReg(CombinedSourceReg, TRI) << ", BB#" - << SourceMBB->getNumber()); + DEBUG(dbgs() << printReg(CombinedSourceReg, TRI) << ", " + << printMBBReference(*SourceMBB)); } for (unsigned i = 0; i < NumInputs; ++i) { @@ -1492,8 +1493,8 @@ bool AMDGPUMachineCFGStructurizer::shrinkPHI(MachineInstr &PHI, MachineBasicBlock *SourcePred = getPHIPred(PHI, i); MIB.addReg(SourceReg); MIB.addMBB(SourcePred); - DEBUG(dbgs() << printReg(SourceReg, TRI) << ", BB#" - << SourcePred->getNumber()); + DEBUG(dbgs() << printReg(SourceReg, TRI) << ", " + << printMBBReference(*SourcePred)); } DEBUG(dbgs() << ")\n"); } @@ -1524,8 +1525,8 @@ void AMDGPUMachineCFGStructurizer::replacePHI( getPHIDestReg(PHI)); MIB.addReg(CombinedSourceReg); MIB.addMBB(LastMerge); - DEBUG(dbgs() << printReg(CombinedSourceReg, TRI) << ", BB#" - << LastMerge->getNumber()); + DEBUG(dbgs() << printReg(CombinedSourceReg, TRI) << ", " + << printMBBReference(*LastMerge)); for (unsigned i = 0; i < NumInputs; ++i) { if (isPHIRegionIndex(PHIRegionIndices, i)) { continue; @@ -1534,8 +1535,8 @@ void AMDGPUMachineCFGStructurizer::replacePHI( MachineBasicBlock *SourcePred = getPHIPred(PHI, i); MIB.addReg(SourceReg); MIB.addMBB(SourcePred); - DEBUG(dbgs() << printReg(SourceReg, TRI) << ", BB#" - << SourcePred->getNumber()); + DEBUG(dbgs() << printReg(SourceReg, TRI) << ", " + << printMBBReference(*SourcePred)); } DEBUG(dbgs() << ")\n"); } else { @@ -1572,8 +1573,8 @@ void AMDGPUMachineCFGStructurizer::replaceEntryPHI( getPHIDestReg(PHI)); MIB.addReg(CombinedSourceReg); MIB.addMBB(IfMBB); - DEBUG(dbgs() << printReg(CombinedSourceReg, TRI) << ", BB#" - << IfMBB->getNumber()); + DEBUG(dbgs() << printReg(CombinedSourceReg, TRI) << ", " + << printMBBReference(*IfMBB)); unsigned NumInputs = getPHINumInputs(PHI); for (unsigned i = 0; i < NumInputs; ++i) { if (isPHIRegionIndex(PHIRegionIndices, i)) { @@ -1583,8 +1584,8 @@ void AMDGPUMachineCFGStructurizer::replaceEntryPHI( MachineBasicBlock *SourcePred = getPHIPred(PHI, i); MIB.addReg(SourceReg); MIB.addMBB(SourcePred); - DEBUG(dbgs() << printReg(SourceReg, TRI) << ", BB#" - << SourcePred->getNumber()); + DEBUG(dbgs() << printReg(SourceReg, TRI) << ", " + << printMBBReference(*SourcePred)); } DEBUG(dbgs() << ")\n"); PHI.eraseFromParent(); @@ -1749,11 +1750,11 @@ void AMDGPUMachineCFGStructurizer::insertMergePHI(MachineBasicBlock *IfBB, if (MergeBB->succ_begin() == MergeBB->succ_end()) { return; } - DEBUG(dbgs() << "Merge PHI (BB#" << MergeBB->getNumber() + DEBUG(dbgs() << "Merge PHI (" << printMBBReference(*MergeBB) << "): " << printReg(DestRegister, TRI) << " = PHI(" - << printReg(IfSourceRegister, TRI) << ", BB#" - << IfBB->getNumber() << printReg(CodeSourceRegister, TRI) - << ", BB#" << CodeBB->getNumber() << ")\n"); + << printReg(IfSourceRegister, TRI) << ", " + << printMBBReference(*IfBB) << printReg(CodeSourceRegister, TRI) + << ", " << printMBBReference(*CodeBB) << ")\n"); const DebugLoc &DL = MergeBB->findDebugLoc(MergeBB->begin()); MachineInstrBuilder MIB = BuildMI(*MergeBB, MergeBB->instr_begin(), DL, TII->get(TargetOpcode::PHI), DestRegister); @@ -1811,8 +1812,8 @@ static void removeExternalCFGEdges(MachineBasicBlock *StartMBB, for (auto SI : Succs) { std::pair Edge = SI; - DEBUG(dbgs() << "Removing edge: BB#" << Edge.first->getNumber() << " -> BB#" - << Edge.second->getNumber() << "\n"); + DEBUG(dbgs() << "Removing edge: " << printMBBReference(*Edge.first) + << " -> " << printMBBReference(*Edge.second) << "\n"); Edge.first->removeSuccessor(Edge.second); } } @@ -1850,8 +1851,8 @@ MachineBasicBlock *AMDGPUMachineCFGStructurizer::createIfBlock( if (!CodeBBEnd->isSuccessor(MergeBB)) CodeBBEnd->addSuccessor(MergeBB); - DEBUG(dbgs() << "Moved MBB#" << CodeBBStart->getNumber() << " through MBB#" - << CodeBBEnd->getNumber() << "\n"); + DEBUG(dbgs() << "Moved " << printMBBReference(*CodeBBStart) << " through " + << printMBBReference(*CodeBBEnd) << "\n"); // If we have a single predecessor we can find a reasonable debug location MachineBasicBlock *SinglePred = @@ -2064,7 +2065,7 @@ void AMDGPUMachineCFGStructurizer::rewriteLiveOutRegs(MachineBasicBlock *IfBB, // is a source block for a definition. SmallVector Sources; if (PHIInfo.findSourcesFromMBB(CodeBB, Sources)) { - DEBUG(dbgs() << "Inserting PHI Live Out from BB#" << CodeBB->getNumber() + DEBUG(dbgs() << "Inserting PHI Live Out from " << printMBBReference(*CodeBB) << "\n"); for (auto SI : Sources) { unsigned DestReg; @@ -2172,16 +2173,17 @@ void AMDGPUMachineCFGStructurizer::createEntryPHI(LinearizedRegion *CurrentRegio CurrentBackedgeReg = NewBackedgeReg; DEBUG(dbgs() << "Inserting backedge PHI: " << printReg(NewBackedgeReg, TRI) << " = PHI(" - << printReg(CurrentBackedgeReg, TRI) << ", BB#" - << getPHIPred(*PHIDefInstr, 0)->getNumber() << ", " + << printReg(CurrentBackedgeReg, TRI) << ", " + << printMBBReference(*getPHIPred(*PHIDefInstr, 0)) + << ", " << printReg(getPHISourceReg(*PHIDefInstr, 1), TRI) - << ", BB#" << (*SRI).second->getNumber()); + << ", " << printMBBReference(*(*SRI).second)); } } else { MIB.addReg(SourceReg); MIB.addMBB((*SRI).second); - DEBUG(dbgs() << printReg(SourceReg, TRI) << ", BB#" - << (*SRI).second->getNumber() << ", "); + DEBUG(dbgs() << printReg(SourceReg, TRI) << ", " + << printMBBReference(*(*SRI).second) << ", "); } } @@ -2189,8 +2191,8 @@ void AMDGPUMachineCFGStructurizer::createEntryPHI(LinearizedRegion *CurrentRegio if (CurrentBackedgeReg != 0) { MIB.addReg(CurrentBackedgeReg); MIB.addMBB(Exit); - DEBUG(dbgs() << printReg(CurrentBackedgeReg, TRI) << ", BB#" - << Exit->getNumber() << ")\n"); + DEBUG(dbgs() << printReg(CurrentBackedgeReg, TRI) << ", " + << printMBBReference(*Exit) << ")\n"); } else { DEBUG(dbgs() << ")\n"); } @@ -2443,11 +2445,12 @@ void AMDGPUMachineCFGStructurizer::splitLoopPHI(MachineInstr &PHI, << " = PHI("); MIB.addReg(PHISource); MIB.addMBB(Entry); - DEBUG(dbgs() << printReg(PHISource, TRI) << ", BB#" << Entry->getNumber()); + DEBUG(dbgs() << printReg(PHISource, TRI) << ", " + << printMBBReference(*Entry)); MIB.addReg(RegionSourceReg); MIB.addMBB(RegionSourceMBB); - DEBUG(dbgs() << " ," << printReg(RegionSourceReg, TRI) << ", BB#" - << RegionSourceMBB->getNumber() << ")\n"); + DEBUG(dbgs() << " ," << printReg(RegionSourceReg, TRI) << ", " + << printMBBReference(*RegionSourceMBB) << ")\n"); } void AMDGPUMachineCFGStructurizer::splitLoopPHIs(MachineBasicBlock *Entry, @@ -2528,9 +2531,9 @@ AMDGPUMachineCFGStructurizer::splitEntry(LinearizedRegion *LRegion) { MachineBasicBlock *EntrySucc = split(Entry->getFirstNonPHI()); MachineBasicBlock *Exit = LRegion->getExit(); - DEBUG(dbgs() << "Split BB#" << Entry->getNumber() << " to BB#" - << Entry->getNumber() << " -> BB#" << EntrySucc->getNumber() - << "\n"); + DEBUG(dbgs() << "Split " << printMBBReference(*Entry) << " to " + << printMBBReference(*Entry) << " -> " + << printMBBReference(*EntrySucc) << "\n"); LRegion->addMBB(EntrySucc); // Make the backedge go to Entry Succ diff --git a/lib/Target/AMDGPU/GCNIterativeScheduler.cpp b/lib/Target/AMDGPU/GCNIterativeScheduler.cpp index 942063d5f93..56d639aca52 100644 --- a/lib/Target/AMDGPU/GCNIterativeScheduler.cpp +++ b/lib/Target/AMDGPU/GCNIterativeScheduler.cpp @@ -63,8 +63,8 @@ static void printRegion(raw_ostream &OS, unsigned MaxInstNum = std::numeric_limits::max()) { auto BB = Begin->getParent(); - OS << BB->getParent()->getName() << ":BB#" << BB->getNumber() - << ' ' << BB->getName() << ":\n"; + OS << BB->getParent()->getName() << ":" << printMBBReference(*BB) << ' ' + << BB->getName() << ":\n"; auto I = Begin; MaxInstNum = std::max(MaxInstNum, 1u); for (; I != End && MaxInstNum; ++I, --MaxInstNum) { diff --git a/lib/Target/AMDGPU/GCNSchedStrategy.cpp b/lib/Target/AMDGPU/GCNSchedStrategy.cpp index 155b400ba02..38803204d6e 100644 --- a/lib/Target/AMDGPU/GCNSchedStrategy.cpp +++ b/lib/Target/AMDGPU/GCNSchedStrategy.cpp @@ -531,9 +531,8 @@ void GCNScheduleDAGMILive::finalizeSchedule() { } DEBUG(dbgs() << "********** MI Scheduling **********\n"); - DEBUG(dbgs() << MF.getName() - << ":BB#" << MBB->getNumber() << " " << MBB->getName() - << "\n From: " << *begin() << " To: "; + DEBUG(dbgs() << MF.getName() << ":" << printMBBReference(*MBB) << " " + << MBB->getName() << "\n From: " << *begin() << " To: "; if (RegionEnd != MBB->end()) dbgs() << *RegionEnd; else dbgs() << "End"; dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n'); diff --git a/lib/Target/AMDGPU/SIFixSGPRCopies.cpp b/lib/Target/AMDGPU/SIFixSGPRCopies.cpp index d6b99966760..8b155c2d278 100644 --- a/lib/Target/AMDGPU/SIFixSGPRCopies.cpp +++ b/lib/Target/AMDGPU/SIFixSGPRCopies.cpp @@ -22,7 +22,7 @@ /// %2 = VECTOR_INST /// %3 = COPY %2 /// BB2: -/// %4 = PHI %1 , , %3 , +/// %4 = PHI %1 , <%bb.0>, %3 , <%bb.1> /// %5 = VECTOR_INST %4 /// /// @@ -37,7 +37,7 @@ /// %2 = VECTOR_INST /// %3 = COPY %2 /// BB2: -/// %4 = PHI %0 , , %3 , +/// %4 = PHI %0 , <%bb.0>, %3 , <%bb.1> /// %5 = VECTOR_INST %4 /// /// Now that the result of the PHI instruction is an SGPR, the register @@ -52,7 +52,7 @@ /// %2 = VECTOR_INST /// %3 = COPY %2 /// BB2: -/// %4 = PHI %0 , , %3 , +/// %4 = PHI %0 , <%bb.0>, %3 , <%bb.1> /// %5 = VECTOR_INST %4 /// /// Now this code contains an illegal copy from a VGPR to an SGPR. @@ -515,8 +515,9 @@ static bool hoistAndMergeSGPRInits(unsigned Reg, if (MDT.dominates(MI1, MI2)) { if (!intereferes(MI2, MI1)) { - DEBUG(dbgs() << "Erasing from BB#" << MI2->getParent()->getNumber() - << " " << *MI2); + DEBUG(dbgs() << "Erasing from " + << printMBBReference(*MI2->getParent()) << " " + << *MI2); MI2->eraseFromParent(); Defs.erase(I2++); Changed = true; @@ -524,8 +525,9 @@ static bool hoistAndMergeSGPRInits(unsigned Reg, } } else if (MDT.dominates(MI2, MI1)) { if (!intereferes(MI1, MI2)) { - DEBUG(dbgs() << "Erasing from BB#" << MI1->getParent()->getNumber() - << " " << *MI1); + DEBUG(dbgs() << "Erasing from " + << printMBBReference(*MI1->getParent()) << " " + << *MI1); MI1->eraseFromParent(); Defs.erase(I1++); Changed = true; @@ -541,10 +543,11 @@ static bool hoistAndMergeSGPRInits(unsigned Reg, MachineBasicBlock::iterator I = MBB->getFirstNonPHI(); if (!intereferes(MI1, I) && !intereferes(MI2, I)) { - DEBUG(dbgs() << "Erasing from BB#" << MI1->getParent()->getNumber() - << " " << *MI1 << "and moving from BB#" - << MI2->getParent()->getNumber() << " to BB#" - << I->getParent()->getNumber() << " " << *MI2); + DEBUG(dbgs() << "Erasing from " + << printMBBReference(*MI1->getParent()) << " " << *MI1 + << "and moving from " + << printMBBReference(*MI2->getParent()) << " to " + << printMBBReference(*I->getParent()) << " " << *MI2); I->getParent()->splice(I, MI2->getParent(), MI2); MI1->eraseFromParent(); Defs.erase(I1++); diff --git a/lib/Target/AMDGPU/SIMachineScheduler.cpp b/lib/Target/AMDGPU/SIMachineScheduler.cpp index c13148bf0a2..3fb39998ff7 100644 --- a/lib/Target/AMDGPU/SIMachineScheduler.cpp +++ b/lib/Target/AMDGPU/SIMachineScheduler.cpp @@ -2050,9 +2050,9 @@ void SIScheduleDAGMI::schedule() placeDebugValues(); DEBUG({ - unsigned BBNum = begin()->getParent()->getNumber(); - dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n"; - dumpSchedule(); - dbgs() << '\n'; - }); + dbgs() << "*** Final schedule for " + << printMBBReference(*begin()->getParent()) << " ***\n"; + dumpSchedule(); + dbgs() << '\n'; + }); } diff --git a/lib/Target/AMDGPU/SIWholeQuadMode.cpp b/lib/Target/AMDGPU/SIWholeQuadMode.cpp index 18649733103..c46fd287106 100644 --- a/lib/Target/AMDGPU/SIWholeQuadMode.cpp +++ b/lib/Target/AMDGPU/SIWholeQuadMode.cpp @@ -224,7 +224,8 @@ FunctionPass *llvm::createSIWholeQuadModePass() { #ifndef NDEBUG LLVM_DUMP_METHOD void SIWholeQuadMode::printInfo() { for (const auto &BII : Blocks) { - dbgs() << "\nBB#" << BII.first->getNumber() << ":\n" + dbgs() << "\n" + << printMBBReference(*BII.first) << ":\n" << " InNeeds = " << PrintState(BII.second.InNeeds) << ", Needs = " << PrintState(BII.second.Needs) << ", OutNeeds = " << PrintState(BII.second.OutNeeds) << "\n\n"; @@ -680,7 +681,7 @@ void SIWholeQuadMode::processBlock(MachineBasicBlock &MBB, unsigned LiveMaskReg, if (!isEntry && BI.Needs == StateWQM && BI.OutNeeds != StateExact) return; - DEBUG(dbgs() << "\nProcessing block BB#" << MBB.getNumber() << ":\n"); + DEBUG(dbgs() << "\nProcessing block " << printMBBReference(MBB) << ":\n"); unsigned SavedWQMReg = 0; unsigned SavedNonWWMReg = 0; diff --git a/lib/Target/ARM/ARMConstantIslandPass.cpp b/lib/Target/ARM/ARMConstantIslandPass.cpp index bc781b26b2c..8baee1ce281 100644 --- a/lib/Target/ARM/ARMConstantIslandPass.cpp +++ b/lib/Target/ARM/ARMConstantIslandPass.cpp @@ -326,7 +326,7 @@ LLVM_DUMP_METHOD void ARMConstantIslands::dumpBBs() { DEBUG({ for (unsigned J = 0, E = BBInfo.size(); J !=E; ++J) { const BasicBlockInfo &BBI = BBInfo[J]; - dbgs() << format("%08x BB#%u\t", BBI.Offset, J) + dbgs() << format("%08x %bb.%u\t", BBI.Offset, J) << " kb=" << unsigned(BBI.KnownBits) << " ua=" << unsigned(BBI.Unalign) << " pa=" << unsigned(BBI.PostAlign) @@ -1071,11 +1071,11 @@ bool ARMConstantIslands::isCPEntryInRange(MachineInstr *MI, unsigned UserOffset, const BasicBlockInfo &BBI = BBInfo[Block]; dbgs() << "User of CPE#" << CPEMI->getOperand(0).getImm() << " max delta=" << MaxDisp - << format(" insn address=%#x", UserOffset) - << " in BB#" << Block << ": " + << format(" insn address=%#x", UserOffset) << " in " + << printMBBReference(*MI->getParent()) << ": " << format("%#x-%x\t", BBI.Offset, BBI.postOffset()) << *MI << format("CPE address=%#x offset=%+d: ", CPEOffset, - int(CPEOffset-UserOffset)); + int(CPEOffset - UserOffset)); }); } @@ -1261,7 +1261,7 @@ bool ARMConstantIslands::findAvailableWater(CPUser &U, unsigned UserOffset, // This is the least amount of required padding seen so far. BestGrowth = Growth; WaterIter = IP; - DEBUG(dbgs() << "Found water after BB#" << WaterBB->getNumber() + DEBUG(dbgs() << "Found water after " << printMBBReference(*WaterBB) << " Growth=" << Growth << '\n'); if (CloserWater && WaterBB == U.MI->getParent()) @@ -1305,8 +1305,8 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex, unsigned CPEOffset = UserBBI.postOffset(CPELogAlign) + Delta; if (isOffsetInRange(UserOffset, CPEOffset, U)) { - DEBUG(dbgs() << "Split at end of BB#" << UserMBB->getNumber() - << format(", expected CPE offset %#x\n", CPEOffset)); + DEBUG(dbgs() << "Split at end of " << printMBBReference(*UserMBB) + << format(", expected CPE offset %#x\n", CPEOffset)); NewMBB = &*++UserMBB->getIterator(); // Add an unconditional branch from UserMBB to fallthrough block. Record // it for branch lengthening; this new branch will not get out of range, @@ -1578,11 +1578,11 @@ bool ARMConstantIslands::isBBInRange(MachineInstr *MI,MachineBasicBlock *DestBB, unsigned BrOffset = getOffsetOf(MI) + PCAdj; unsigned DestOffset = BBInfo[DestBB->getNumber()].Offset; - DEBUG(dbgs() << "Branch of destination BB#" << DestBB->getNumber() - << " from BB#" << MI->getParent()->getNumber() - << " max delta=" << MaxDisp - << " from " << getOffsetOf(MI) << " to " << DestOffset - << " offset " << int(DestOffset-BrOffset) << "\t" << *MI); + DEBUG(dbgs() << "Branch of destination " << printMBBReference(*DestBB) + << " from " << printMBBReference(*MI->getParent()) + << " max delta=" << MaxDisp << " from " << getOffsetOf(MI) + << " to " << DestOffset << " offset " + << int(DestOffset - BrOffset) << "\t" << *MI); if (BrOffset <= DestOffset) { // Branch before the Dest. @@ -1700,9 +1700,9 @@ ARMConstantIslands::fixupConditionalBr(ImmBranch &Br) { } MachineBasicBlock *NextBB = &*++MBB->getIterator(); - DEBUG(dbgs() << " Insert B to BB#" << DestBB->getNumber() - << " also invert condition and change dest. to BB#" - << NextBB->getNumber() << "\n"); + DEBUG(dbgs() << " Insert B to " << printMBBReference(*DestBB) + << " also invert condition and change dest. to " + << printMBBReference(*NextBB) << "\n"); // Insert a new conditional branch and a new unconditional branch. // Also update the ImmBranch as well as adding a new entry for the new branch. @@ -2212,7 +2212,7 @@ bool ARMConstantIslands::optimizeThumb2JumpTables() { .addReg(IdxReg, getKillRegState(IdxRegKill)) .addJumpTableIndex(JTI, JTOP.getTargetFlags()) .addImm(CPEMI->getOperand(0).getImm()); - DEBUG(dbgs() << "BB#" << MBB->getNumber() << ": " << *NewJTMI); + DEBUG(dbgs() << printMBBReference(*MBB) << ": " << *NewJTMI); unsigned JTOpc = ByteOk ? ARM::JUMPTABLE_TBB : ARM::JUMPTABLE_TBH; CPEMI->setDesc(TII->get(JTOpc)); diff --git a/lib/Target/ARM/ARMConstantPoolValue.cpp b/lib/Target/ARM/ARMConstantPoolValue.cpp index 38ea835fbe2..39ae02af513 100644 --- a/lib/Target/ARM/ARMConstantPoolValue.cpp +++ b/lib/Target/ARM/ARMConstantPoolValue.cpp @@ -292,6 +292,6 @@ void ARMConstantPoolMBB::addSelectionDAGCSEId(FoldingSetNodeID &ID) { } void ARMConstantPoolMBB::print(raw_ostream &O) const { - O << "BB#" << MBB->getNumber(); + O << printMBBReference(*MBB); ARMConstantPoolValue::print(O); } diff --git a/lib/Target/BPF/BPFISelDAGToDAG.cpp b/lib/Target/BPF/BPFISelDAGToDAG.cpp index 98cd0f165a6..283359c8b23 100644 --- a/lib/Target/BPF/BPFISelDAGToDAG.cpp +++ b/lib/Target/BPF/BPFISelDAGToDAG.cpp @@ -573,10 +573,10 @@ void BPFDAGToDAGISel::PreprocessTrunc(SDNode *Node, return; } else { // The PHI node looks like: - // %2 = PHI %0, , %1, - // Trace each incoming definition, e.g., (%0, BB#1) and (%1, BB#3) - // The AND operation can be removed if both %0 in BB#1 and %1 in - // BB#3 are defined with with a load matching the MaskN. + // %2 = PHI %0, <%bb.1>, %1, <%bb.3> + // Trace each incoming definition, e.g., (%0, %bb.1) and (%1, %bb.3) + // The AND operation can be removed if both %0 in %bb.1 and %1 in + // %bb.3 are defined with with a load matching the MaskN. DEBUG(dbgs() << "Check PHI Insn: "; MII->dump(); dbgs() << '\n'); unsigned PrevReg = -1; for (unsigned i = 0; i < MII->getNumOperands(); ++i) { diff --git a/lib/Target/Hexagon/BitTracker.cpp b/lib/Target/Hexagon/BitTracker.cpp index 4a10408d8c7..a8b89990277 100644 --- a/lib/Target/Hexagon/BitTracker.cpp +++ b/lib/Target/Hexagon/BitTracker.cpp @@ -767,7 +767,7 @@ bool BT::MachineEvaluator::evaluate(const MachineInstr &MI, void BT::visitPHI(const MachineInstr &PI) { int ThisN = PI.getParent()->getNumber(); if (Trace) - dbgs() << "Visit FI(BB#" << ThisN << "): " << PI; + dbgs() << "Visit FI(" << printMBBReference(*PI.getParent()) << "): " << PI; const MachineOperand &MD = PI.getOperand(0); assert(MD.getSubReg() == 0 && "Unexpected sub-register in definition"); @@ -784,7 +784,8 @@ void BT::visitPHI(const MachineInstr &PI) { const MachineBasicBlock *PB = PI.getOperand(i + 1).getMBB(); int PredN = PB->getNumber(); if (Trace) - dbgs() << " edge BB#" << PredN << "->BB#" << ThisN; + dbgs() << " edge " << printMBBReference(*PB) << "->" + << printMBBReference(*PI.getParent()); if (!EdgeExec.count(CFGEdge(PredN, ThisN))) { if (Trace) dbgs() << " not executable\n"; @@ -809,10 +810,8 @@ void BT::visitPHI(const MachineInstr &PI) { } void BT::visitNonBranch(const MachineInstr &MI) { - if (Trace) { - int ThisN = MI.getParent()->getNumber(); - dbgs() << "Visit MI(BB#" << ThisN << "): " << MI; - } + if (Trace) + dbgs() << "Visit MI(" << printMBBReference(*MI.getParent()) << "): " << MI; if (MI.isDebugValue()) return; assert(!MI.isBranch() && "Unexpected branch instruction"); @@ -897,7 +896,7 @@ void BT::visitBranchesFrom(const MachineInstr &BI) { BTs.clear(); const MachineInstr &MI = *It; if (Trace) - dbgs() << "Visit BR(BB#" << ThisN << "): " << MI; + dbgs() << "Visit BR(" << printMBBReference(B) << "): " << MI; assert(MI.isBranch() && "Expecting branch instruction"); InstrExec.insert(&MI); bool Eval = ME.evaluate(MI, Map, BTs, FallsThrough); @@ -913,7 +912,7 @@ void BT::visitBranchesFrom(const MachineInstr &BI) { if (Trace) { dbgs() << " adding targets:"; for (unsigned i = 0, n = BTs.size(); i < n; ++i) - dbgs() << " BB#" << BTs[i]->getNumber(); + dbgs() << " " << printMBBReference(*BTs[i]); if (FallsThrough) dbgs() << "\n falls through\n"; else diff --git a/lib/Target/Hexagon/HexagonBitSimplify.cpp b/lib/Target/Hexagon/HexagonBitSimplify.cpp index d3cb53e3594..f14beaad339 100644 --- a/lib/Target/Hexagon/HexagonBitSimplify.cpp +++ b/lib/Target/Hexagon/HexagonBitSimplify.cpp @@ -2977,7 +2977,7 @@ void HexagonLoopRescheduling::moveGroup(InstrGroup &G, MachineBasicBlock &LB, } bool HexagonLoopRescheduling::processLoop(LoopCand &C) { - DEBUG(dbgs() << "Processing loop in BB#" << C.LB->getNumber() << "\n"); + DEBUG(dbgs() << "Processing loop in " << printMBBReference(*C.LB) << "\n"); std::vector Phis; for (auto &I : *C.LB) { if (!I.isPHI()) diff --git a/lib/Target/Hexagon/HexagonConstPropagation.cpp b/lib/Target/Hexagon/HexagonConstPropagation.cpp index 9a8762a48fd..80db36071db 100644 --- a/lib/Target/Hexagon/HexagonConstPropagation.cpp +++ b/lib/Target/Hexagon/HexagonConstPropagation.cpp @@ -617,7 +617,7 @@ void MachineConstPropagator::CellMap::print(raw_ostream &os, void MachineConstPropagator::visitPHI(const MachineInstr &PN) { const MachineBasicBlock *MB = PN.getParent(); unsigned MBN = MB->getNumber(); - DEBUG(dbgs() << "Visiting FI(BB#" << MBN << "): " << PN); + DEBUG(dbgs() << "Visiting FI(" << printMBBReference(*MB) << "): " << PN); const MachineOperand &MD = PN.getOperand(0); Register DefR(MD); @@ -642,8 +642,8 @@ Bottomize: const MachineBasicBlock *PB = PN.getOperand(i+1).getMBB(); unsigned PBN = PB->getNumber(); if (!EdgeExec.count(CFGEdge(PBN, MBN))) { - DEBUG(dbgs() << " edge BB#" << PBN << "->BB#" << MBN - << " not executable\n"); + DEBUG(dbgs() << " edge " << printMBBReference(*PB) << "->" + << printMBBReference(*MB) << " not executable\n"); continue; } const MachineOperand &SO = PN.getOperand(i); @@ -658,9 +658,8 @@ Bottomize: LatticeCell SrcC; bool Eval = MCE.evaluate(UseR, Cells.get(UseR.Reg), SrcC); - DEBUG(dbgs() << " edge from BB#" << PBN << ": " - << printReg(UseR.Reg, &MCE.TRI, UseR.SubReg) - << SrcC << '\n'); + DEBUG(dbgs() << " edge from " << printMBBReference(*PB) << ": " + << printReg(UseR.Reg, &MCE.TRI, UseR.SubReg) << SrcC << '\n'); Changed |= Eval ? DefC.meet(SrcC) : DefC.setBottom(); Cells.update(DefR.Reg, DefC); @@ -672,7 +671,7 @@ Bottomize: } void MachineConstPropagator::visitNonBranch(const MachineInstr &MI) { - DEBUG(dbgs() << "Visiting MI(BB#" << MI.getParent()->getNumber() + DEBUG(dbgs() << "Visiting MI(" << printMBBReference(*MI.getParent()) << "): " << MI); CellMap Outputs; bool Eval = MCE.evaluate(MI, Cells, Outputs); @@ -729,8 +728,8 @@ void MachineConstPropagator::visitBranchesFrom(const MachineInstr &BrI) { while (It != End) { const MachineInstr &MI = *It; InstrExec.insert(&MI); - DEBUG(dbgs() << "Visiting " << (EvalOk ? "BR" : "br") << "(BB#" - << MBN << "): " << MI); + DEBUG(dbgs() << "Visiting " << (EvalOk ? "BR" : "br") << "(" + << printMBBReference(B) << "): " << MI); // Do not evaluate subsequent branches if the evaluation of any of the // previous branches failed. Keep iterating over the branches only // to mark them as executable. @@ -772,7 +771,8 @@ void MachineConstPropagator::visitBranchesFrom(const MachineInstr &BrI) { for (const MachineBasicBlock *TB : Targets) { unsigned TBN = TB->getNumber(); - DEBUG(dbgs() << " pushing edge BB#" << MBN << " -> BB#" << TBN << "\n"); + DEBUG(dbgs() << " pushing edge " << printMBBReference(B) << " -> " + << printMBBReference(*TB) << "\n"); FlowQ.push(CFGEdge(MBN, TBN)); } } @@ -870,8 +870,10 @@ void MachineConstPropagator::propagate(MachineFunction &MF) { CFGEdge Edge = FlowQ.front(); FlowQ.pop(); - DEBUG(dbgs() << "Picked edge BB#" << Edge.first << "->BB#" - << Edge.second << '\n'); + DEBUG(dbgs() << "Picked edge " + << printMBBReference(*MF.getBlockNumbered(Edge.first)) << "->" + << printMBBReference(*MF.getBlockNumbered(Edge.second)) + << '\n'); if (Edge.first != EntryNum) if (EdgeExec.count(Edge)) continue; @@ -934,7 +936,8 @@ void MachineConstPropagator::propagate(MachineFunction &MF) { for (const MachineBasicBlock *SB : B.successors()) { unsigned SN = SB->getNumber(); if (!EdgeExec.count(CFGEdge(BN, SN))) - dbgs() << " BB#" << BN << " -> BB#" << SN << '\n'; + dbgs() << " " << printMBBReference(B) << " -> " + << printMBBReference(*SB) << '\n'; } } }); @@ -3126,7 +3129,7 @@ bool HexagonConstEvaluator::rewriteHexBranch(MachineInstr &BrI, if (BrI.getOpcode() == Hexagon::J2_jump) return false; - DEBUG(dbgs() << "Rewrite(BB#" << B.getNumber() << "):" << BrI); + DEBUG(dbgs() << "Rewrite(" << printMBBReference(B) << "):" << BrI); bool Rewritten = false; if (NumTargets > 0) { assert(!FallsThru && "This should have been checked before"); diff --git a/lib/Target/Hexagon/HexagonEarlyIfConv.cpp b/lib/Target/Hexagon/HexagonEarlyIfConv.cpp index 4a6100d02fc..652ea13c414 100644 --- a/lib/Target/Hexagon/HexagonEarlyIfConv.cpp +++ b/lib/Target/Hexagon/HexagonEarlyIfConv.cpp @@ -27,24 +27,24 @@ // // %40 = L2_loadrub_io %39, 1 // %41 = S2_tstbit_i %40, 0 -// J2_jumpt %41, , %pc -// J2_jump , %pc -// Successors according to CFG: BB#4(62) BB#5(62) +// J2_jumpt %41, <%bb.5>, %pc +// J2_jump <%bb.4>, %pc +// Successors according to CFG: %bb.4(62) %bb.5(62) // -// BB#4: derived from LLVM BB %if.then -// Predecessors according to CFG: BB#3 +// %bb.4: derived from LLVM BB %if.then +// Predecessors according to CFG: %bb.3 // %11 = A2_addp %6, %10 // S2_storerd_io %32, 16, %11 -// Successors according to CFG: BB#5 +// Successors according to CFG: %bb.5 // -// BB#5: derived from LLVM BB %if.end -// Predecessors according to CFG: BB#3 BB#4 -// %12 = PHI %6, , %11, +// %bb.5: derived from LLVM BB %if.end +// Predecessors according to CFG: %bb.3 %bb.4 +// %12 = PHI %6, <%bb.3>, %11, <%bb.4> // %13 = A2_addp %7, %12 // %42 = C2_cmpeqi %9, 10 -// J2_jumpf %42, , %pc -// J2_jump , %pc -// Successors according to CFG: BB#6(4) BB#3(124) +// J2_jumpf %42, <%bb.3>, %pc +// J2_jump <%bb.6>, %pc +// Successors according to CFG: %bb.6(4) %bb.3(124) // // would become: // @@ -55,9 +55,9 @@ // %46 = PS_pselect %41, %6, %11 // %13 = A2_addp %7, %46 // %42 = C2_cmpeqi %9, 10 -// J2_jumpf %42, , %pc -// J2_jump , %pc -// Successors according to CFG: BB#6 BB#3 +// J2_jumpf %42, <%bb.3>, %pc +// J2_jump <%bb.6>, %pc +// Successors according to CFG: %bb.6 %bb.3 #include "Hexagon.h" #include "HexagonInstrInfo.h" @@ -238,7 +238,7 @@ bool HexagonEarlyIfConversion::isPreheader(const MachineBasicBlock *B) const { bool HexagonEarlyIfConversion::matchFlowPattern(MachineBasicBlock *B, MachineLoop *L, FlowPattern &FP) { - DEBUG(dbgs() << "Checking flow pattern at BB#" << B->getNumber() << "\n"); + DEBUG(dbgs() << "Checking flow pattern at " << printMBBReference(*B) << "\n"); // Interested only in conditional branches, no .new, no new-value, etc. // Check the terminators directly, it's easier than handling all responses diff --git a/lib/Target/Hexagon/HexagonExpandCondsets.cpp b/lib/Target/Hexagon/HexagonExpandCondsets.cpp index 86645ddf913..78c7c102e7d 100644 --- a/lib/Target/Hexagon/HexagonExpandCondsets.cpp +++ b/lib/Target/Hexagon/HexagonExpandCondsets.cpp @@ -654,7 +654,7 @@ bool HexagonExpandCondsets::split(MachineInstr &MI, return false; TfrCounter++; } - DEBUG(dbgs() << "\nsplitting BB#" << MI.getParent()->getNumber() << ": " + DEBUG(dbgs() << "\nsplitting " << printMBBReference(*MI.getParent()) << ": " << MI); MachineOperand &MD = MI.getOperand(0); // Definition MachineOperand &MP = MI.getOperand(1); // Predicate register diff --git a/lib/Target/Hexagon/HexagonFrameLowering.cpp b/lib/Target/Hexagon/HexagonFrameLowering.cpp index ebb7add82e1..a6a950ea045 100644 --- a/lib/Target/Hexagon/HexagonFrameLowering.cpp +++ b/lib/Target/Hexagon/HexagonFrameLowering.cpp @@ -443,7 +443,7 @@ void HexagonFrameLowering::findShrunkPrologEpilog(MachineFunction &MF, DEBUG({ dbgs() << "Blocks needing SF: {"; for (auto &B : SFBlocks) - dbgs() << " BB#" << B->getNumber(); + dbgs() << " " << printMBBReference(*B); dbgs() << " }\n"; }); // No frame needed? @@ -464,12 +464,16 @@ void HexagonFrameLowering::findShrunkPrologEpilog(MachineFunction &MF, break; } DEBUG({ - dbgs() << "Computed dom block: BB#"; - if (DomB) dbgs() << DomB->getNumber(); - else dbgs() << ""; - dbgs() << ", computed pdom block: BB#"; - if (PDomB) dbgs() << PDomB->getNumber(); - else dbgs() << ""; + dbgs() << "Computed dom block: "; + if (DomB) + dbgs() << printMBBReference(*DomB); + else + dbgs() << ""; + dbgs() << ", computed pdom block: "; + if (PDomB) + dbgs() << printMBBReference(*PDomB); + else + dbgs() << ""; dbgs() << "\n"; }); if (!DomB || !PDomB) @@ -2010,7 +2014,7 @@ void HexagonFrameLowering::optimizeSpillSlots(MachineFunction &MF, auto P = BlockIndexes.insert( std::make_pair(&B, HexagonBlockRanges::InstrIndexMap(B))); auto &IndexMap = P.first->second; - DEBUG(dbgs() << "Index map for BB#" << B.getNumber() << "\n" + DEBUG(dbgs() << "Index map for " << printMBBReference(B) << "\n" << IndexMap << '\n'); for (auto &In : B) { @@ -2129,7 +2133,8 @@ void HexagonFrameLowering::optimizeSpillSlots(MachineFunction &MF, else dbgs() << "\n"; for (auto &R : P.second.Map) - dbgs() << " BB#" << R.first->getNumber() << " { " << R.second << "}\n"; + dbgs() << " " << printMBBReference(*R.first) << " { " << R.second + << "}\n"; } }); @@ -2162,7 +2167,7 @@ void HexagonFrameLowering::optimizeSpillSlots(MachineFunction &MF, auto &FIs = P.second; if (FIs.empty()) continue; - dbgs() << " BB#" << P.first->getNumber() << ": {"; + dbgs() << " " << printMBBReference(*P.first) << ": {"; for (auto I : FIs) { dbgs() << " fi#" << I; if (LoxFIs.count(I)) @@ -2183,7 +2188,7 @@ void HexagonFrameLowering::optimizeSpillSlots(MachineFunction &MF, HexagonBlockRanges::InstrIndexMap &IM = F->second; HexagonBlockRanges::RegToRangeMap LM = HBR.computeLiveMap(IM); HexagonBlockRanges::RegToRangeMap DM = HBR.computeDeadMap(IM, LM); - DEBUG(dbgs() << "BB#" << B.getNumber() << " dead map\n" + DEBUG(dbgs() << printMBBReference(B) << " dead map\n" << HexagonBlockRanges::PrintRangeMap(DM, HRI)); for (auto FI : BlockFIMap[&B]) { diff --git a/lib/Target/Hexagon/HexagonGenInsert.cpp b/lib/Target/Hexagon/HexagonGenInsert.cpp index d1f63699292..99f3a2e9e88 100644 --- a/lib/Target/Hexagon/HexagonGenInsert.cpp +++ b/lib/Target/Hexagon/HexagonGenInsert.cpp @@ -915,7 +915,7 @@ bool HexagonGenInsert::findRecordInsertForms(unsigned VR, void HexagonGenInsert::collectInBlock(MachineBasicBlock *B, OrderedRegisterList &AVs) { if (isDebug()) - dbgs() << "visiting block BB#" << B->getNumber() << "\n"; + dbgs() << "visiting block " << printMBBReference(*B) << "\n"; // First, check if this block is reachable at all. If not, the bit tracker // will not have any information about registers in it. diff --git a/lib/Target/Hexagon/HexagonHardwareLoops.cpp b/lib/Target/Hexagon/HexagonHardwareLoops.cpp index 5c18cc8732d..b5fa0689d04 100644 --- a/lib/Target/Hexagon/HexagonHardwareLoops.cpp +++ b/lib/Target/Hexagon/HexagonHardwareLoops.cpp @@ -1011,7 +1011,7 @@ bool HexagonHardwareLoops::isInvalidLoopOperation(const MachineInstr *MI, bool HexagonHardwareLoops::containsInvalidInstruction(MachineLoop *L, bool IsInnerHWLoop) const { const std::vector &Blocks = L->getBlocks(); - DEBUG(dbgs() << "\nhw_loop head, BB#" << Blocks[0]->getNumber();); + DEBUG(dbgs() << "\nhw_loop head, " << printMBBReference(*Blocks[0])); for (unsigned i = 0, e = Blocks.size(); i != e; ++i) { MachineBasicBlock *MBB = Blocks[i]; for (MachineBasicBlock::iterator @@ -1367,7 +1367,7 @@ bool HexagonHardwareLoops::isLoopFeeder(MachineLoop *L, MachineBasicBlock *A, LoopFeederMap &LoopFeederPhi) const { if (LoopFeederPhi.find(MO->getReg()) == LoopFeederPhi.end()) { const std::vector &Blocks = L->getBlocks(); - DEBUG(dbgs() << "\nhw_loop head, BB#" << Blocks[0]->getNumber();); + DEBUG(dbgs() << "\nhw_loop head, " << printMBBReference(*Blocks[0])); // Ignore all BBs that form Loop. for (unsigned i = 0, e = Blocks.size(); i != e; ++i) { MachineBasicBlock *MBB = Blocks[i]; diff --git a/lib/Target/Hexagon/HexagonInstrInfo.cpp b/lib/Target/Hexagon/HexagonInstrInfo.cpp index 4cdfd09c095..cb00bc770c0 100644 --- a/lib/Target/Hexagon/HexagonInstrInfo.cpp +++ b/lib/Target/Hexagon/HexagonInstrInfo.cpp @@ -463,7 +463,7 @@ bool HexagonInstrInfo::analyzeBranch(MachineBasicBlock &MBB, Cond.push_back(LastInst->getOperand(1)); return false; } - DEBUG(dbgs() << "\nCant analyze BB#" << MBB.getNumber() + DEBUG(dbgs() << "\nCant analyze " << printMBBReference(MBB) << " with one jump\n";); // Otherwise, don't know what this is. return true; @@ -511,7 +511,7 @@ bool HexagonInstrInfo::analyzeBranch(MachineBasicBlock &MBB, FBB = LastInst->getOperand(0).getMBB(); return false; } - DEBUG(dbgs() << "\nCant analyze BB#" << MBB.getNumber() + DEBUG(dbgs() << "\nCant analyze " << printMBBReference(MBB) << " with two jumps";); // Otherwise, can't handle this. return true; @@ -521,7 +521,7 @@ unsigned HexagonInstrInfo::removeBranch(MachineBasicBlock &MBB, int *BytesRemoved) const { assert(!BytesRemoved && "code size not handled"); - DEBUG(dbgs() << "\nRemoving branches out of BB#" << MBB.getNumber()); + DEBUG(dbgs() << "\nRemoving branches out of " << printMBBReference(MBB)); MachineBasicBlock::iterator I = MBB.end(); unsigned Count = 0; while (I != MBB.begin()) { @@ -593,7 +593,7 @@ unsigned HexagonInstrInfo::insertBranch(MachineBasicBlock &MBB, // (ins IntRegs:$src1, IntRegs:$src2, brtarget:$offset) // (ins IntRegs:$src1, u5Imm:$src2, brtarget:$offset) unsigned Flags1 = getUndefRegState(Cond[1].isUndef()); - DEBUG(dbgs() << "\nInserting NVJump for BB#" << MBB.getNumber();); + DEBUG(dbgs() << "\nInserting NVJump for " << printMBBReference(MBB);); if (Cond[2].isReg()) { unsigned Flags2 = getUndefRegState(Cond[2].isUndef()); BuildMI(&MBB, DL, get(BccOpc)).addReg(Cond[1].getReg(), Flags1). @@ -829,9 +829,8 @@ void HexagonInstrInfo::copyPhysReg(MachineBasicBlock &MBB, #ifndef NDEBUG // Show the invalid registers to ease debugging. - dbgs() << "Invalid registers for copy in BB#" << MBB.getNumber() - << ": " << printReg(DestReg, &HRI) - << " = " << printReg(SrcReg, &HRI) << '\n'; + dbgs() << "Invalid registers for copy in " << printMBBReference(MBB) << ": " + << printReg(DestReg, &HRI) << " = " << printReg(SrcReg, &HRI) << '\n'; #endif llvm_unreachable("Unimplemented"); } @@ -4032,8 +4031,9 @@ void HexagonInstrInfo::immediateExtend(MachineInstr &MI) const { bool HexagonInstrInfo::invertAndChangeJumpTarget( MachineInstr &MI, MachineBasicBlock *NewTarget) const { - DEBUG(dbgs() << "\n[invertAndChangeJumpTarget] to BB#" - << NewTarget->getNumber(); MI.dump();); + DEBUG(dbgs() << "\n[invertAndChangeJumpTarget] to " + << printMBBReference(*NewTarget); + MI.dump();); assert(MI.isBranch()); unsigned NewOpcode = getInvertedPredicatedOpcode(MI.getOpcode()); int TargetPos = MI.getNumOperands() - 1; diff --git a/lib/Target/Hexagon/HexagonMachineScheduler.cpp b/lib/Target/Hexagon/HexagonMachineScheduler.cpp index 5daceac6496..8765fc98448 100644 --- a/lib/Target/Hexagon/HexagonMachineScheduler.cpp +++ b/lib/Target/Hexagon/HexagonMachineScheduler.cpp @@ -186,12 +186,10 @@ bool VLIWResourceModel::reserveResources(SUnit *SU) { /// after setting up the current scheduling region. [RegionBegin, RegionEnd) /// only includes instructions that have DAG nodes, not scheduling boundaries. void VLIWMachineScheduler::schedule() { - DEBUG(dbgs() - << "********** MI Converging Scheduling VLIW BB#" << BB->getNumber() - << " " << BB->getName() - << " in_func " << BB->getParent()->getFunction()->getName() - << " at loop depth " << MLI->getLoopDepth(BB) - << " \n"); + DEBUG(dbgs() << "********** MI Converging Scheduling VLIW " + << printMBBReference(*BB) << " " << BB->getName() << " in_func " + << BB->getParent()->getFunction()->getName() << " at loop depth " + << MLI->getLoopDepth(BB) << " \n"); buildDAGWithRegPressure(); @@ -237,8 +235,8 @@ void VLIWMachineScheduler::schedule() { placeDebugValues(); DEBUG({ - unsigned BBNum = begin()->getParent()->getNumber(); - dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n"; + dbgs() << "*** Final schedule for " + << printMBBReference(*begin()->getParent()) << " ***\n"; dumpSchedule(); dbgs() << '\n'; }); diff --git a/lib/Target/Hexagon/HexagonOptAddrMode.cpp b/lib/Target/Hexagon/HexagonOptAddrMode.cpp index f42b6ed9935..d97ed4812f2 100644 --- a/lib/Target/Hexagon/HexagonOptAddrMode.cpp +++ b/lib/Target/Hexagon/HexagonOptAddrMode.cpp @@ -461,7 +461,7 @@ bool HexagonOptAddrMode::changeAddAsl(NodeAddr AddAslUN, DEBUG(dbgs() << "[InstrNode]: " << Print>(UseIA, *DFG) << "\n"); MachineInstr *UseMI = UseIA.Addr->getCode(); - DEBUG(dbgs() << "[MI getParent()->getNumber() + DEBUG(dbgs() << "[MI <" << printMBBReference(*UseMI->getParent()) << ">]: " << *UseMI << "\n"); const MCInstrDesc &UseMID = UseMI->getDesc(); assert(HII->getAddrMode(*UseMI) == HexagonII::BaseImmOffset); @@ -570,7 +570,7 @@ bool HexagonOptAddrMode::processBlock(NodeAddr BA) { NodeAddr OwnerN = UseN.Addr->getOwner(*DFG); MachineInstr *UseMI = OwnerN.Addr->getCode(); - DEBUG(dbgs() << "\t\t[MI getParent()->getNumber() + DEBUG(dbgs() << "\t\t[MI <" << printMBBReference(*UseMI->getParent()) << ">]: " << *UseMI << "\n"); int UseMOnum = -1; diff --git a/lib/Target/Hexagon/HexagonPeephole.cpp b/lib/Target/Hexagon/HexagonPeephole.cpp index 354bb95e448..7f82a5c4c4d 100644 --- a/lib/Target/Hexagon/HexagonPeephole.cpp +++ b/lib/Target/Hexagon/HexagonPeephole.cpp @@ -20,19 +20,18 @@ // ... // %16 = NOT_p %15 // ... -// JMP_c %16, , %pc +// JMP_c %16, <%bb.1>, %pc // // Into // %15 = CMPGTrr %6, %2; // ... -// JMP_cNot %15, , %pc; +// JMP_cNot %15, <%bb.1>, %pc; // // Note: The peephole pass makes the instrucstions like // %170 = SXTW %166 or %16 = NOT_p %15 // redundant and relies on some form of dead removal instructions, like // DCE or DIE to actually eliminate them. - //===----------------------------------------------------------------------===// #include "Hexagon.h" diff --git a/lib/Target/Hexagon/HexagonSplitDouble.cpp b/lib/Target/Hexagon/HexagonSplitDouble.cpp index 75d6750322b..68b5ddd4438 100644 --- a/lib/Target/Hexagon/HexagonSplitDouble.cpp +++ b/lib/Target/Hexagon/HexagonSplitDouble.cpp @@ -536,7 +536,7 @@ void HexagonSplitDoubleRegs::collectIndRegsForLoop(const MachineLoop *L, Rs.insert(CmpR2); DEBUG({ - dbgs() << "For loop at BB#" << HB->getNumber() << " ind regs: "; + dbgs() << "For loop at " << printMBBReference(*HB) << " ind regs: "; dump_partition(dbgs(), Rs, *TRI); dbgs() << '\n'; }); diff --git a/lib/Target/Hexagon/RDFGraph.cpp b/lib/Target/Hexagon/RDFGraph.cpp index 50ebcd5302c..8513ebd1c76 100644 --- a/lib/Target/Hexagon/RDFGraph.cpp +++ b/lib/Target/Hexagon/RDFGraph.cpp @@ -247,7 +247,7 @@ raw_ostream &operator<< (raw_ostream &OS, if (T != MI.operands_end()) { OS << ' '; if (T->isMBB()) - OS << "BB#" << T->getMBB()->getNumber(); + OS << printMBBReference(*T->getMBB()); else if (T->isGlobal()) OS << T->getGlobal()->getName(); else if (T->isSymbol()) @@ -284,13 +284,13 @@ raw_ostream &operator<< (raw_ostream &OS, auto PrintBBs = [&OS] (std::vector Ns) -> void { unsigned N = Ns.size(); for (int I : Ns) { - OS << "BB#" << I; + OS << "%bb." << I; if (--N) OS << ", "; } }; - OS << Print(P.Obj.Id, P.G) << ": --- BB#" << BB->getNumber() + OS << Print(P.Obj.Id, P.G) << ": --- " << printMBBReference(*BB) << " --- preds(" << NP << "): "; for (MachineBasicBlock *B : BB->predecessors()) Ns.push_back(B->getNumber()); @@ -1123,8 +1123,8 @@ void DataFlowGraph::pushDefs(NodeAddr IA, DefStackMap &DefM) { if (!Defined.insert(RR.Reg).second) { MachineInstr *MI = NodeAddr(IA).Addr->getCode(); dbgs() << "Multiple definitions of register: " - << Print(RR, *this) << " in\n " << *MI - << "in BB#" << MI->getParent()->getNumber() << '\n'; + << Print(RR, *this) << " in\n " << *MI << "in " + << printMBBReference(*MI->getParent()) << '\n'; llvm_unreachable(nullptr); } #endif diff --git a/lib/Target/Hexagon/RDFGraph.h b/lib/Target/Hexagon/RDFGraph.h index 399b401c5ff..25c4b67230a 100644 --- a/lib/Target/Hexagon/RDFGraph.h +++ b/lib/Target/Hexagon/RDFGraph.h @@ -111,7 +111,7 @@ // // DFG dump:[ // f1: Function foo -// b2: === BB#0 === preds(0), succs(0): +// b2: === %bb.0 === preds(0), succs(0): // p3: phi [d4(,d12,u9):] // p5: phi [d6(,,u10):] // s7: add [d8(,,u13):, u9(d4):, u10(d6):] diff --git a/lib/Target/Hexagon/RDFLiveness.cpp b/lib/Target/Hexagon/RDFLiveness.cpp index 740cd11136b..13d9a174197 100644 --- a/lib/Target/Hexagon/RDFLiveness.cpp +++ b/lib/Target/Hexagon/RDFLiveness.cpp @@ -814,7 +814,7 @@ void Liveness::computeLiveIns() { for (auto I = B.livein_begin(), E = B.livein_end(); I != E; ++I) LV.push_back(RegisterRef(I->PhysReg, I->LaneMask)); std::sort(LV.begin(), LV.end()); - dbgs() << "BB#" << B.getNumber() << "\t rec = {"; + dbgs() << printMBBReference(B) << "\t rec = {"; for (auto I : LV) dbgs() << ' ' << Print(I, DFG); dbgs() << " }\n"; @@ -963,7 +963,7 @@ void Liveness::traverse(MachineBasicBlock *B, RefMap &LiveIn) { } if (Trace) { - dbgs() << "\n-- BB#" << B->getNumber() << ": " << __func__ + dbgs() << "\n-- " << printMBBReference(*B) << ": " << __func__ << " after recursion into: {"; for (auto I : *N) dbgs() << ' ' << I->getBlock()->getNumber(); diff --git a/lib/Target/MSP430/MSP430BranchSelector.cpp b/lib/Target/MSP430/MSP430BranchSelector.cpp index 424b5ae418f..87c320aa76a 100644 --- a/lib/Target/MSP430/MSP430BranchSelector.cpp +++ b/lib/Target/MSP430/MSP430BranchSelector.cpp @@ -138,15 +138,15 @@ bool MSP430BSel::expandBranches(OffsetVector &BlockOffsets) { continue; } - DEBUG(dbgs() << " Found a branch that needs expanding, BB#" - << DestBB->getNumber() << ", Distance " << BranchDistance - << "\n"); + DEBUG(dbgs() << " Found a branch that needs expanding, " + << printMBBReference(*DestBB) << ", Distance " + << BranchDistance << "\n"); // If JCC is not the last instruction we need to split the MBB. if (MI->getOpcode() == MSP430::JCC && std::next(MI) != EE) { - DEBUG(dbgs() << " Found a basic block that needs to be split, BB#" - << MBB->getNumber() << "\n"); + DEBUG(dbgs() << " Found a basic block that needs to be split, " + << printMBBReference(*MBB) << "\n"); // Create a new basic block. MachineBasicBlock *NewBB = diff --git a/lib/Target/Mips/MipsConstantIslandPass.cpp b/lib/Target/Mips/MipsConstantIslandPass.cpp index 257e8f45a70..4dad98b80ed 100644 --- a/lib/Target/Mips/MipsConstantIslandPass.cpp +++ b/lib/Target/Mips/MipsConstantIslandPass.cpp @@ -430,7 +430,7 @@ bool MipsConstantIslands::isOffsetInRange LLVM_DUMP_METHOD void MipsConstantIslands::dumpBBs() { for (unsigned J = 0, E = BBInfo.size(); J !=E; ++J) { const BasicBlockInfo &BBI = BBInfo[J]; - dbgs() << format("%08x BB#%u\t", BBI.Offset, J) + dbgs() << format("%08x %bb.%u\t", BBI.Offset, J) << format(" size=%#x\n", BBInfo[J].Size); } } @@ -991,11 +991,11 @@ bool MipsConstantIslands::isCPEntryInRange const BasicBlockInfo &BBI = BBInfo[Block]; dbgs() << "User of CPE#" << CPEMI->getOperand(0).getImm() << " max delta=" << MaxDisp - << format(" insn address=%#x", UserOffset) - << " in BB#" << Block << ": " + << format(" insn address=%#x", UserOffset) << " in " + << printMBBReference(*MI->getParent()) << ": " << format("%#x-%x\t", BBI.Offset, BBI.postOffset()) << *MI << format("CPE address=%#x offset=%+d: ", CPEOffset, - int(CPEOffset-UserOffset)); + int(CPEOffset - UserOffset)); }); } @@ -1197,7 +1197,7 @@ bool MipsConstantIslands::findAvailableWater(CPUser &U, unsigned UserOffset, // This is the least amount of required padding seen so far. BestGrowth = Growth; WaterIter = IP; - DEBUG(dbgs() << "Found water after BB#" << WaterBB->getNumber() + DEBUG(dbgs() << "Found water after " << printMBBReference(*WaterBB) << " Growth=" << Growth << '\n'); // Keep looking unless it is perfect. @@ -1236,8 +1236,8 @@ void MipsConstantIslands::createNewWater(unsigned CPUserIndex, unsigned CPEOffset = UserBBI.postOffset(CPELogAlign) + Delta; if (isOffsetInRange(UserOffset, CPEOffset, U)) { - DEBUG(dbgs() << "Split at end of BB#" << UserMBB->getNumber() - << format(", expected CPE offset %#x\n", CPEOffset)); + DEBUG(dbgs() << "Split at end of " << printMBBReference(*UserMBB) + << format(", expected CPE offset %#x\n", CPEOffset)); NewMBB = &*++UserMBB->getIterator(); // Add an unconditional branch from UserMBB to fallthrough block. Record // it for branch lengthening; this new branch will not get out of range, @@ -1470,11 +1470,11 @@ bool MipsConstantIslands::isBBInRange unsigned BrOffset = getOffsetOf(MI) + PCAdj; unsigned DestOffset = BBInfo[DestBB->getNumber()].Offset; - DEBUG(dbgs() << "Branch of destination BB#" << DestBB->getNumber() - << " from BB#" << MI->getParent()->getNumber() - << " max delta=" << MaxDisp - << " from " << getOffsetOf(MI) << " to " << DestOffset - << " offset " << int(DestOffset-BrOffset) << "\t" << *MI); + DEBUG(dbgs() << "Branch of destination " << printMBBReference(*DestBB) + << " from " << printMBBReference(*MI->getParent()) + << " max delta=" << MaxDisp << " from " << getOffsetOf(MI) + << " to " << DestOffset << " offset " + << int(DestOffset - BrOffset) << "\t" << *MI); if (BrOffset <= DestOffset) { // Branch before the Dest. @@ -1615,9 +1615,9 @@ MipsConstantIslands::fixupConditionalBr(ImmBranch &Br) { } MachineBasicBlock *NextBB = &*++MBB->getIterator(); - DEBUG(dbgs() << " Insert B to BB#" << DestBB->getNumber() - << " also invert condition and change dest. to BB#" - << NextBB->getNumber() << "\n"); + DEBUG(dbgs() << " Insert B to " << printMBBReference(*DestBB) + << " also invert condition and change dest. to " + << printMBBReference(*NextBB) << "\n"); // Insert a new conditional branch and a new unconditional branch. // Also update the ImmBranch as well as adding a new entry for the new branch. diff --git a/lib/Target/PowerPC/PPCBranchCoalescing.cpp b/lib/Target/PowerPC/PPCBranchCoalescing.cpp index 4c101f58601..cd078972307 100644 --- a/lib/Target/PowerPC/PPCBranchCoalescing.cpp +++ b/lib/Target/PowerPC/PPCBranchCoalescing.cpp @@ -59,45 +59,45 @@ namespace llvm { /// /// expands to the following machine code: /// -/// BB#0: derived from LLVM BB %entry +/// %bb.0: derived from LLVM BB %entry /// Live Ins: %f1 %f3 %x6 /// /// %0 = COPY %f1; F8RC:%0 /// %5 = CMPLWI %4, 0; CRRC:%5 GPRC:%4 /// %8 = LXSDX %zero8, %7, %rm; /// mem:LD8[ConstantPool] F8RC:%8 G8RC:%7 -/// BCC 76, %5, ; CRRC:%5 -/// Successors according to CFG: BB#1(?%) BB#2(?%) +/// BCC 76, %5, <%bb.2>; CRRC:%5 +/// Successors according to CFG: %bb.1(?%) %bb.2(?%) /// -/// BB#1: derived from LLVM BB %entry -/// Predecessors according to CFG: BB#0 -/// Successors according to CFG: BB#2(?%) +/// %bb.1: derived from LLVM BB %entry +/// Predecessors according to CFG: %bb.0 +/// Successors according to CFG: %bb.2(?%) /// -/// BB#2: derived from LLVM BB %entry -/// Predecessors according to CFG: BB#0 BB#1 -/// %9 = PHI %8, , %0, ; +/// %bb.2: derived from LLVM BB %entry +/// Predecessors according to CFG: %bb.0 %bb.1 +/// %9 = PHI %8, <%bb.1>, %0, <%bb.0>; /// F8RC:%9,%8,%0 /// -/// BCC 76, %5, ; CRRC:%5 -/// Successors according to CFG: BB#3(?%) BB#4(?%) +/// BCC 76, %5, <%bb.4>; CRRC:%5 +/// Successors according to CFG: %bb.3(?%) %bb.4(?%) /// -/// BB#3: derived from LLVM BB %entry -/// Predecessors according to CFG: BB#2 -/// Successors according to CFG: BB#4(?%) +/// %bb.3: derived from LLVM BB %entry +/// Predecessors according to CFG: %bb.2 +/// Successors according to CFG: %bb.4(?%) /// -/// BB#4: derived from LLVM BB %entry -/// Predecessors according to CFG: BB#2 BB#3 -/// %13 = PHI %12, , %2, ; +/// %bb.4: derived from LLVM BB %entry +/// Predecessors according to CFG: %bb.2 %bb.3 +/// %13 = PHI %12, <%bb.3>, %2, <%bb.2>; /// F8RC:%13,%12,%2 /// /// BLR8 %lr8, %rm, %f1 /// /// When this pattern is detected, branch coalescing will try to collapse -/// it by moving code in BB#2 to BB#0 and/or BB#4 and removing BB#3. +/// it by moving code in %bb.2 to %bb.0 and/or %bb.4 and removing %bb.3. /// /// If all conditions are meet, IR should collapse to: /// -/// BB#0: derived from LLVM BB %entry +/// %bb.0: derived from LLVM BB %entry /// Live Ins: %f1 %f3 %x6 /// /// %0 = COPY %f1; F8RC:%0 @@ -105,19 +105,19 @@ namespace llvm { /// %8 = LXSDX %zero8, %7, %rm; /// mem:LD8[ConstantPool] F8RC:%8 G8RC:%7 /// -/// BCC 76, %5, ; CRRC:%5 -/// Successors according to CFG: BB#1(0x2aaaaaaa / 0x80000000 = 33.33%) -/// BB#4(0x55555554 / 0x80000000 = 66.67%) +/// BCC 76, %5, <%bb.4>; CRRC:%5 +/// Successors according to CFG: %bb.1(0x2aaaaaaa / 0x80000000 = 33.33%) +/// %bb.4(0x55555554 / 0x80000000 = 66.67%) /// -/// BB#1: derived from LLVM BB %entry -/// Predecessors according to CFG: BB#0 -/// Successors according to CFG: BB#4(0x40000000 / 0x80000000 = 50.00%) +/// %bb.1: derived from LLVM BB %entry +/// Predecessors according to CFG: %bb.0 +/// Successors according to CFG: %bb.4(0x40000000 / 0x80000000 = 50.00%) /// -/// BB#4: derived from LLVM BB %entry -/// Predecessors according to CFG: BB#0 BB#1 -/// %9 = PHI %8, , %0, ; +/// %bb.4: derived from LLVM BB %entry +/// Predecessors according to CFG: %bb.0 %bb.1 +/// %9 = PHI %8, <%bb.1>, %0, <%bb.0>; /// F8RC:%9,%8,%0 -/// %13 = PHI %12, , %2, ; +/// %13 = PHI %12, <%bb.1>, %2, <%bb.0>; /// F8RC:%13,%12,%2 /// /// BLR8 %lr8, %rm, %f1 diff --git a/lib/Target/PowerPC/PPCCTRLoops.cpp b/lib/Target/PowerPC/PPCCTRLoops.cpp index 8784a831902..fc638829378 100644 --- a/lib/Target/PowerPC/PPCCTRLoops.cpp +++ b/lib/Target/PowerPC/PPCCTRLoops.cpp @@ -690,12 +690,11 @@ check_block: } if (I != BI && clobbersCTR(*I)) { - DEBUG(dbgs() << "BB#" << MBB->getNumber() << " (" << - MBB->getFullName() << ") instruction " << *I << - " clobbers CTR, invalidating " << "BB#" << - BI->getParent()->getNumber() << " (" << - BI->getParent()->getFullName() << ") instruction " << - *BI << "\n"); + DEBUG(dbgs() << printMBBReference(*MBB) << " (" << MBB->getFullName() + << ") instruction " << *I << " clobbers CTR, invalidating " + << printMBBReference(*BI->getParent()) << " (" + << BI->getParent()->getFullName() << ") instruction " << *BI + << "\n"); return false; } @@ -709,10 +708,10 @@ check_block: if (CheckPreds) { queue_preds: if (MachineFunction::iterator(MBB) == MBB->getParent()->begin()) { - DEBUG(dbgs() << "Unable to find a MTCTR instruction for BB#" << - BI->getParent()->getNumber() << " (" << - BI->getParent()->getFullName() << ") instruction " << - *BI << "\n"); + DEBUG(dbgs() << "Unable to find a MTCTR instruction for " + << printMBBReference(*BI->getParent()) << " (" + << BI->getParent()->getFullName() << ") instruction " << *BI + << "\n"); return false; } diff --git a/lib/Target/PowerPC/PPCExpandISEL.cpp b/lib/Target/PowerPC/PPCExpandISEL.cpp index 41e3190c3ee..dfd2b9bfd05 100644 --- a/lib/Target/PowerPC/PPCExpandISEL.cpp +++ b/lib/Target/PowerPC/PPCExpandISEL.cpp @@ -171,7 +171,7 @@ bool PPCExpandISEL::collectISELInstructions() { #ifndef NDEBUG void PPCExpandISEL::DumpISELInstructions() const { for (const auto &I : ISELInstructions) { - DEBUG(dbgs() << "BB#" << I.first << ":\n"); + DEBUG(dbgs() << printMBBReference(*MF->getBlockNumbered(I.first)) << ":\n"); for (const auto &VI : I.second) DEBUG(dbgs() << " "; VI->print(dbgs())); } @@ -191,7 +191,11 @@ bool PPCExpandISEL::canMerge(MachineInstr *PrevPushedMI, MachineInstr *MI) { void PPCExpandISEL::expandAndMergeISELs() { for (auto &BlockList : ISELInstructions) { - DEBUG(dbgs() << "Expanding ISEL instructions in BB#" << BlockList.first + + DEBUG(dbgs() << printMBBReference(*MF->getBlockNumbered(BlockList.first)) + << ":\n"); + DEBUG(dbgs() << "Expanding ISEL instructions in " + << printMBBReference(*MF->getBlockNumbered(BlockList.first)) << "\n"); BlockISELList &CurrentISELList = BlockList.second; diff --git a/lib/Target/PowerPC/PPCMIPeephole.cpp b/lib/Target/PowerPC/PPCMIPeephole.cpp index 1ac7afe2cdc..c6fcea7c956 100644 --- a/lib/Target/PowerPC/PPCMIPeephole.cpp +++ b/lib/Target/PowerPC/PPCMIPeephole.cpp @@ -686,7 +686,7 @@ bool PPCMIPeephole::simplifyCode(void) { DEBUG(LiMI->dump()); // There could be repeated registers in the PHI, e.g: %1 = - // PHI %6, , %8, , %8, ; So if we've + // PHI %6, <%bb.2>, %8, <%bb.3>, %8, <%bb.6>; So if we've // already replaced the def instruction, skip. if (LiMI->getOpcode() == PPC::ADDI || LiMI->getOpcode() == PPC::ADDI8) continue; @@ -1209,8 +1209,9 @@ bool PPCMIPeephole::eliminateRedundantCompare(void) { DEBUG(BI1->dump()); DEBUG(BI2->dump()); if (IsPartiallyRedundant) { - DEBUG(dbgs() << "The following compare is moved into BB#" << - MBBtoMoveCmp->getNumber() << " to handle partial redundancy.\n"); + DEBUG(dbgs() << "The following compare is moved into " + << printMBBReference(*MBBtoMoveCmp) + << " to handle partial redundancy.\n"); DEBUG(CMPI2->dump()); } diff --git a/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp b/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp index c51368d6d2a..0320ecaf853 100644 --- a/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp +++ b/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp @@ -966,7 +966,7 @@ LLVM_DUMP_METHOD void PPCVSXSwapRemoval::dumpSwapVector() { dbgs() << format("%6d", ID); dbgs() << format("%6d", EC->getLeaderValue(ID)); - dbgs() << format(" BB#%3d", MI->getParent()->getNumber()); + dbgs() << format(" %bb.%3d", MI->getParent()->getNumber()); dbgs() << format(" %14s ", TII->getName(MI->getOpcode()).str().c_str()); if (SwapVector[EntryIdx].IsLoad) diff --git a/lib/Target/PowerPC/README.txt b/lib/Target/PowerPC/README.txt index bc09d5f8a7e..b4bf635dc2c 100644 --- a/lib/Target/PowerPC/README.txt +++ b/lib/Target/PowerPC/README.txt @@ -256,7 +256,7 @@ _clamp0g: cmpwi cr0, r3, 0 li r2, 0 blt cr0, LBB1_2 -; BB#1: ; %entry +; %bb.1: ; %entry mr r2, r3 LBB1_2: ; %entry mr r3, r2 diff --git a/lib/Target/PowerPC/README_ALTIVEC.txt b/lib/Target/PowerPC/README_ALTIVEC.txt index f70ebd82bd5..c38e0192316 100644 --- a/lib/Target/PowerPC/README_ALTIVEC.txt +++ b/lib/Target/PowerPC/README_ALTIVEC.txt @@ -233,7 +233,7 @@ declare <16 x i8> @llvm.ppc.altivec.crypto.vpmsumb(<16 x i8>, <16 x i8>) #1 Produces the following code with -mtriple=powerpc64-unknown-linux-gnu: -# BB#0: # %entry +# %bb.0: # %entry addis 3, 2, .LCPI0_0@toc@ha addis 4, 2, .LCPI0_1@toc@ha addi 3, 3, .LCPI0_0@toc@l diff --git a/lib/Target/README.txt b/lib/Target/README.txt index f0fd323bb58..563aee9e1a7 100644 --- a/lib/Target/README.txt +++ b/lib/Target/README.txt @@ -1778,7 +1778,7 @@ We do get this at the codegen level, so something knows about it, but instcombine should catch it earlier: _foo: ## @foo -## BB#0: ## %entry +## %bb.0: ## %entry movl %edi, %eax sarl $4, %eax ret @@ -2234,13 +2234,13 @@ void foo(funcs f, int which) { which we compile to: foo: # @foo -# BB#0: # %entry +# %bb.0: # %entry pushq %rbp movq %rsp, %rbp testl %esi, %esi movq %rdi, %rax je .LBB0_2 -# BB#1: # %if.then +# %bb.1: # %if.then movl $5, %edi callq *%rax popq %rbp diff --git a/lib/Target/SystemZ/SystemZMachineScheduler.cpp b/lib/Target/SystemZ/SystemZMachineScheduler.cpp index 4b0f9256763..08eb73fc362 100644 --- a/lib/Target/SystemZ/SystemZMachineScheduler.cpp +++ b/lib/Target/SystemZ/SystemZMachineScheduler.cpp @@ -74,7 +74,7 @@ advanceTo(MachineBasicBlock::iterator NextBegin) { void SystemZPostRASchedStrategy::enterMBB(MachineBasicBlock *NextMBB) { assert ((SchedStates.find(NextMBB) == SchedStates.end()) && "Entering MBB twice?"); - DEBUG (dbgs() << "+++ Entering MBB#" << NextMBB->getNumber()); + DEBUG(dbgs() << "+++ Entering " << printMBBReference(*NextMBB)); MBB = NextMBB; /// Create a HazardRec for MBB, save it in SchedStates and set HazardRec to @@ -93,8 +93,8 @@ void SystemZPostRASchedStrategy::enterMBB(MachineBasicBlock *NextMBB) { SchedStates.find(SinglePredMBB) == SchedStates.end()) return; - DEBUG (dbgs() << "+++ Continued scheduling from MBB#" - << SinglePredMBB->getNumber() << "\n";); + DEBUG(dbgs() << "+++ Continued scheduling from " + << printMBBReference(*SinglePredMBB) << "\n";); HazardRec->copyState(SchedStates[SinglePredMBB]); @@ -113,7 +113,7 @@ void SystemZPostRASchedStrategy::enterMBB(MachineBasicBlock *NextMBB) { } void SystemZPostRASchedStrategy::leaveMBB() { - DEBUG (dbgs() << "+++ Leaving MBB#" << MBB->getNumber() << "\n";); + DEBUG(dbgs() << "+++ Leaving " << printMBBReference(*MBB) << "\n";); // Advance to first terminator. The successor block will handle terminators // dependent on CFG layout (T/NT branch etc). diff --git a/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp b/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp index 41f315c2825..88daea7e368 100644 --- a/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp +++ b/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp @@ -205,8 +205,7 @@ bool WebAssemblyFixIrreducibleControlFlow::VisitLoop(MachineFunction &MF, continue; unsigned Index = MIB.getInstr()->getNumExplicitOperands() - 1; - DEBUG(dbgs() << "MBB#" << MBB->getNumber() << " has index " << Index - << "\n"); + DEBUG(dbgs() << printMBBReference(*MBB) << " has index " << Index << "\n"); Pair.first->second = Index; for (auto Pred : MBB->predecessors()) diff --git a/lib/Target/X86/README.txt b/lib/Target/X86/README.txt index 799157c926e..11652af9f1f 100644 --- a/lib/Target/X86/README.txt +++ b/lib/Target/X86/README.txt @@ -987,11 +987,11 @@ bb7: ; preds = %entry to: foo: # @foo -# BB#0: # %entry +# %bb.0: # %entry movl 4(%esp), %ecx cmpb $0, 16(%esp) je .LBB0_2 -# BB#1: # %bb +# %bb.1: # %bb movl 8(%esp), %eax addl %ecx, %eax ret @@ -1073,7 +1073,7 @@ declare void @exit(i32) noreturn nounwind This compiles into: _abort_gzip: ## @abort_gzip -## BB#0: ## %entry +## %bb.0: ## %entry subl $12, %esp movb _in_exit.4870.b, %al cmpb $1, %al @@ -1396,7 +1396,7 @@ define i32 @bar(%struct.B* nocapture %a) nounwind readonly optsize { } bar: # @bar -# BB#0: +# %bb.0: movb (%rdi), %al andb $1, %al movzbl %al, %eax @@ -1633,7 +1633,7 @@ In the real code, we get a lot more wrong than this. However, even in this code we generate: _foo: ## @foo -## BB#0: ## %entry +## %bb.0: ## %entry movb (%rsi), %al movb (%rdi), %cl cmpb %al, %cl @@ -1646,12 +1646,12 @@ LBB0_2: ## %if.end movb 1(%rdi), %cl cmpb %al, %cl jne LBB0_1 -## BB#3: ## %if.end38 +## %bb.3: ## %if.end38 movb 2(%rsi), %al movb 2(%rdi), %cl cmpb %al, %cl jne LBB0_1 -## BB#4: ## %if.end60 +## %bb.4: ## %if.end60 movb 3(%rdi), %al cmpb 3(%rsi), %al LBB0_5: ## %if.end60 diff --git a/lib/Target/X86/X86FixupBWInsts.cpp b/lib/Target/X86/X86FixupBWInsts.cpp index ce559323efc..2e39cb0d797 100644 --- a/lib/Target/X86/X86FixupBWInsts.cpp +++ b/lib/Target/X86/X86FixupBWInsts.cpp @@ -188,16 +188,17 @@ bool FixupBWInstPass::runOnMachineFunction(MachineFunction &MF) { /// necessary (e.g. due to register coalescing with a "truncate" copy). /// So, it handles pattern like this: /// -/// BB#2: derived from LLVM BB %if.then +/// %bb.2: derived from LLVM BB %if.then /// Live Ins: %rdi -/// Predecessors according to CFG: BB#0 -/// %ax = MOV16rm %rdi, 1, %noreg, 0, %noreg, %eax; mem:LD2[%p] +/// Predecessors according to CFG: %bb.0 +/// %ax = MOV16rm %rdi, 1, %noreg, 0, %noreg, %eax; +/// mem:LD2[%p] /// No %eax -/// Successors according to CFG: BB#3(?%) +/// Successors according to CFG: %bb.3(?%) /// -/// BB#3: derived from LLVM BB %if.end +/// %bb.3: derived from LLVM BB %if.end /// Live Ins: %eax Only %ax is actually live -/// Predecessors according to CFG: BB#2 BB#1 +/// Predecessors according to CFG: %bb.2 %bb.1 /// %ax = KILL %ax, %eax /// RET 0, %ax static bool isLive(const MachineInstr &MI, diff --git a/lib/Target/X86/X86FloatingPoint.cpp b/lib/Target/X86/X86FloatingPoint.cpp index 6db02f0bd05..b73a08846e9 100644 --- a/lib/Target/X86/X86FloatingPoint.cpp +++ b/lib/Target/X86/X86FloatingPoint.cpp @@ -499,7 +499,7 @@ bool FPS::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) { /// setupBlockStack - Use the live bundles to set up our model of the stack /// to match predecessors' live out stack. void FPS::setupBlockStack() { - DEBUG(dbgs() << "\nSetting up live-ins for BB#" << MBB->getNumber() + DEBUG(dbgs() << "\nSetting up live-ins for " << printMBBReference(*MBB) << " derived from " << MBB->getName() << ".\n"); StackTop = 0; // Get the live-in bundle for MBB. @@ -538,7 +538,7 @@ void FPS::finishBlockStack() { if (MBB->succ_empty()) return; - DEBUG(dbgs() << "Setting up live-outs for BB#" << MBB->getNumber() + DEBUG(dbgs() << "Setting up live-outs for " << printMBBReference(*MBB) << " derived from " << MBB->getName() << ".\n"); // Get MBB's live-out bundle. diff --git a/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll b/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll index 7362bd817cc..e7868327975 100644 --- a/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll +++ b/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll @@ -58,19 +58,19 @@ define void @allocai64() { ; CHECK: body: ; ; ABI/constant lowering and IR-level entry basic block. -; CHECK: {{bb.[0-9]+}}.entry: +; CHECK: bb.{{[0-9]+}}.{{[a-zA-Z0-9.]+}}: ; ; Make sure we have one successor and only one. -; CHECK-NEXT: successors: %[[BB2:bb.[0-9]+.bb2]](0x80000000) +; CHECK-NEXT: successors: %[[BB2:bb.[0-9]+]](0x80000000) ; ; Check that we emit the correct branch. ; CHECK: G_BR %[[BB2]] ; ; Check that end contains the return instruction. -; CHECK: [[END:bb.[0-9]+.end]]: +; CHECK: [[END:bb.[0-9]+]].{{[a-zA-Z0-9.]+}}: ; CHECK-NEXT: RET_ReallyLR ; -; CHECK: {{bb.[0-9]+}}.bb2: +; CHECK: bb.{{[0-9]+}}.{{[a-zA-Z0-9.]+}}: ; CHECK-NEXT: successors: %[[END]](0x80000000) ; CHECK: G_BR %[[END]] define void @uncondbr() { @@ -84,11 +84,11 @@ bb2: ; CHECK-LABEL: name: uncondbr_fallthrough ; CHECK: body: -; CHECK: {{bb.[0-9]+}}.entry: -; CHECK-NEXT: successors: %[[END:bb.[0-9]+.end]](0x80000000) +; CHECK: bb.{{[0-9]+}}.{{[a-zA-Z0-9.]+}}: +; CHECK-NEXT: successors: %[[END:bb.[0-9]+]](0x80000000) ; We don't emit a branch here, as we can fallthrough to the successor. ; CHECK-NOT: G_BR -; CHECK: [[END]]: +; CHECK: [[END]].{{[a-zA-Z0-9.]+}}: ; CHECK-NEXT: RET_ReallyLR define void @uncondbr_fallthrough() { entry: @@ -102,10 +102,10 @@ end: ; CHECK: body: ; ; ABI/constant lowering and IR-level entry basic block. -; CHECK: {{bb.[0-9]+}} (%ir-block.{{[0-9]+}}): +; CHECK: bb.{{[0-9]+}} (%ir-block.{{[0-9]+}}): ; Make sure we have two successors -; CHECK-NEXT: successors: %[[TRUE:bb.[0-9]+.true]](0x40000000), -; CHECK: %[[FALSE:bb.[0-9]+.false]](0x40000000) +; CHECK-NEXT: successors: %[[TRUE:bb.[0-9]+]](0x40000000), +; CHECK: %[[FALSE:bb.[0-9]+]](0x40000000) ; ; CHECK: [[ADDR:%.*]]:_(p0) = COPY %x0 ; @@ -115,9 +115,9 @@ end: ; CHECK: G_BR %[[FALSE]] ; ; Check that each successor contains the return instruction. -; CHECK: [[TRUE]]: +; CHECK: [[TRUE]].{{[a-zA-Z0-9.]+}}: ; CHECK-NEXT: RET_ReallyLR -; CHECK: [[FALSE]]: +; CHECK: [[FALSE]].{{[a-zA-Z0-9.]+}}: ; CHECK-NEXT: RET_ReallyLR define void @condbr(i1* %tstaddr) { %tst = load i1, i1* %tstaddr @@ -133,8 +133,8 @@ false: ; CHECK-LABEL: name: switch ; CHECK: body: ; -; CHECK: {{bb.[0-9]+.entry}}: -; CHECK-NEXT: successors: %[[BB_CASE100:bb.[0-9]+.case100]](0x40000000), %[[BB_NOTCASE100_CHECKNEXT:bb.[0-9]+.entry]](0x40000000) +; CHECK: bb.{{[a-zA-Z0-9.]+}}: +; CHECK-NEXT: successors: %[[BB_CASE100:bb.[0-9]+]](0x40000000), %[[BB_NOTCASE100_CHECKNEXT:bb.[0-9]+]](0x40000000) ; CHECK: %0:_(s32) = COPY %w0 ; CHECK: %[[reg100:[0-9]+]]:_(s32) = G_CONSTANT i32 100 ; CHECK: %[[reg200:[0-9]+]]:_(s32) = G_CONSTANT i32 200 @@ -145,31 +145,31 @@ false: ; CHECK: G_BRCOND %[[regicmp100]](s1), %[[BB_CASE100]] ; CHECK: G_BR %[[BB_NOTCASE100_CHECKNEXT]] ; -; CHECK: [[BB_NOTCASE100_CHECKNEXT]]: -; CHECK-NEXT: successors: %[[BB_CASE200:bb.[0-9]+.case200]](0x40000000), %[[BB_NOTCASE200_CHECKNEXT:bb.[0-9]+.entry]](0x40000000) +; CHECK: [[BB_NOTCASE100_CHECKNEXT]].{{[a-zA-Z0-9.]+}}: +; CHECK-NEXT: successors: %[[BB_CASE200:bb.[0-9]+]](0x40000000), %[[BB_NOTCASE200_CHECKNEXT:bb.[0-9]+]](0x40000000) ; CHECK: %[[regicmp200:[0-9]+]]:_(s1) = G_ICMP intpred(eq), %[[reg200]](s32), %0 ; CHECK: G_BRCOND %[[regicmp200]](s1), %[[BB_CASE200]] ; CHECK: G_BR %[[BB_NOTCASE200_CHECKNEXT]] ; -; CHECK: [[BB_NOTCASE200_CHECKNEXT]]: -; CHECK-NEXT: successors: %[[BB_DEFAULT:bb.[0-9]+.default]](0x80000000) +; CHECK: [[BB_NOTCASE200_CHECKNEXT]].{{[a-zA-Z0-9.]+}}: +; CHECK-NEXT: successors: %[[BB_DEFAULT:bb.[0-9]+]](0x80000000) ; CHECK: G_BR %[[BB_DEFAULT]] ; -; CHECK: [[BB_DEFAULT]]: -; CHECK-NEXT: successors: %[[BB_RET:bb.[0-9]+.return]](0x80000000) +; CHECK: [[BB_DEFAULT]].{{[a-zA-Z0-9.]+}}: +; CHECK-NEXT: successors: %[[BB_RET:bb.[0-9]+]](0x80000000) ; CHECK: %[[regretdefault:[0-9]+]]:_(s32) = G_ADD %0, %[[reg0]] ; CHECK: G_BR %[[BB_RET]] ; -; CHECK: [[BB_CASE100]]: -; CHECK-NEXT: successors: %[[BB_RET:bb.[0-9]+.return]](0x80000000) +; CHECK: [[BB_CASE100]].{{[a-zA-Z0-9.]+}}: +; CHECK-NEXT: successors: %[[BB_RET:bb.[0-9]+]](0x80000000) ; CHECK: %[[regretc100:[0-9]+]]:_(s32) = G_ADD %0, %[[reg1]] ; CHECK: G_BR %[[BB_RET]] ; -; CHECK: [[BB_CASE200]]: +; CHECK: [[BB_CASE200]].{{[a-zA-Z0-9.]+}}: ; CHECK-NEXT: successors: %[[BB_RET]](0x80000000) ; CHECK: %[[regretc200:[0-9]+]]:_(s32) = G_ADD %0, %[[reg2]] ; -; CHECK: [[BB_RET]]: +; CHECK: [[BB_RET]].{{[a-zA-Z0-9.]+}}: ; CHECK-NEXT: %[[regret:[0-9]+]]:_(s32) = G_PHI %[[regretdefault]](s32), %[[BB_DEFAULT]], %[[regretc100]](s32), %[[BB_CASE100]] ; CHECK: %w0 = COPY %[[regret]](s32) ; CHECK: RET_ReallyLR implicit %w0 @@ -202,16 +202,16 @@ return: ; %entry block is no longer a predecessor for the phi instruction. We need to ; use the correct lowered MachineBasicBlock instead. ; CHECK-LABEL: name: test_cfg_remap -; CHECK: {{bb.[0-9]+.entry}}: -; CHECK-NEXT: successors: %{{bb.[0-9]+.next}}(0x40000000), %[[NOTCASE1_BLOCK:bb.[0-9]+.entry]](0x40000000) -; CHECK: [[NOTCASE1_BLOCK]]: -; CHECK-NEXT: successors: %{{bb.[0-9]+.other}}(0x40000000), %[[NOTCASE57_BLOCK:bb.[0-9]+.entry]](0x40000000) -; CHECK: [[NOTCASE57_BLOCK]]: -; CHECK-NEXT: successors: %[[PHI_BLOCK:bb.[0-9]+.phi.block]](0x80000000) +; CHECK: bb.{{[0-9]+.[a-zA-Z0-9.]+}}: +; CHECK-NEXT: successors: %{{bb.[0-9]+}}(0x40000000), %[[NOTCASE1_BLOCK:bb.[0-9]+]](0x40000000) +; CHECK: [[NOTCASE1_BLOCK]].{{[a-zA-Z0-9.]+}}: +; CHECK-NEXT: successors: %{{bb.[0-9]+}}(0x40000000), %[[NOTCASE57_BLOCK:bb.[0-9]+]](0x40000000) +; CHECK: [[NOTCASE57_BLOCK]].{{[a-zA-Z0-9.]+}}: +; CHECK-NEXT: successors: %[[PHI_BLOCK:bb.[0-9]+]](0x80000000) ; CHECK: G_BR %[[PHI_BLOCK]] ; -; CHECK: [[PHI_BLOCK]]: -; CHECK-NEXT: G_PHI %{{.*}}(s32), %[[NOTCASE57_BLOCK:bb.[0-9]+.entry]], %{{.*}}(s32), +; CHECK: [[PHI_BLOCK]].{{[a-zA-Z0-9.]+}}: +; CHECK-NEXT: G_PHI %{{.*}}(s32), %[[NOTCASE57_BLOCK:bb.[0-9]+]], %{{.*}}(s32), ; define i32 @test_cfg_remap(i32 %in) { entry: @@ -230,7 +230,7 @@ phi.block: } ; CHECK-LABEL: name: test_cfg_remap_multiple_preds -; CHECK: G_PHI [[ENTRY:%.*]](s32), %bb.{{[0-9]+}}.entry, [[ENTRY]](s32), %bb.{{[0-9]+}}.entry +; CHECK: G_PHI [[ENTRY:%.*]](s32), %bb.{{[0-9]+}}, [[ENTRY]](s32), %bb.{{[0-9]+}} define i32 @test_cfg_remap_multiple_preds(i32 %in) { entry: switch i32 %in, label %odd [i32 1, label %next @@ -256,19 +256,19 @@ phi.block: ; CHECK: body: ; ; ABI/constant lowering and IR-level entry basic block. -; CHECK: {{bb.[0-9]+.entry}}: +; CHECK: bb.{{[0-9]+.[a-zA-Z0-9.]+}}: ; Make sure we have one successor -; CHECK-NEXT: successors: %[[BB_L1:bb.[0-9]+.L1]](0x80000000) +; CHECK-NEXT: successors: %[[BB_L1:bb.[0-9]+]](0x80000000) ; CHECK-NOT: G_BR ; ; Check basic block L1 has 2 successors: BBL1 and BBL2 -; CHECK: [[BB_L1]] (address-taken): +; CHECK: [[BB_L1]].{{[a-zA-Z0-9.]+}} (address-taken): ; CHECK-NEXT: successors: %[[BB_L1]](0x40000000), -; CHECK: %[[BB_L2:bb.[0-9]+.L2]](0x40000000) +; CHECK: %[[BB_L2:bb.[0-9]+]](0x40000000) ; CHECK: G_BRINDIRECT %{{[0-9]+}}(p0) ; ; Check basic block L2 is the return basic block -; CHECK: [[BB_L2]] (address-taken): +; CHECK: [[BB_L2]].{{[a-zA-Z0-9.]+}} (address-taken): ; CHECK-NEXT: RET_ReallyLR @indirectbr.L = internal unnamed_addr constant [3 x i8*] [i8* blockaddress(@indirectbr, %L1), i8* blockaddress(@indirectbr, %L2), i8* null], align 8 @@ -410,11 +410,11 @@ define i64* @trivial_bitcast(i8* %a) { ; CHECK-LABEL: name: trivial_bitcast_with_copy ; CHECK: [[A:%[0-9]+]]:_(p0) = COPY %x0 -; CHECK: G_BR %[[CAST:bb\.[0-9]+.cast]] +; CHECK: G_BR %[[CAST:bb\.[0-9]+]] -; CHECK: [[END:bb\.[0-9]+.end]]: +; CHECK: [[END:bb\.[0-9]+]].{{[a-zA-Z0-9.]+}}: -; CHECK: [[CAST]]: +; CHECK: [[CAST]].{{[a-zA-Z0-9.]+}}: ; CHECK: {{%[0-9]+}}:_(p0) = COPY [[A]] ; CHECK: G_BR %[[END]] define i64* @trivial_bitcast_with_copy(i8* %a) { @@ -512,13 +512,13 @@ define void @intrinsics(i32 %cur, i32 %bits) { } ; CHECK-LABEL: name: test_phi -; CHECK: G_BRCOND {{%.*}}, %[[TRUE:bb\.[0-9]+.true]] -; CHECK: G_BR %[[FALSE:bb\.[0-9]+.false]] +; CHECK: G_BRCOND {{%.*}}, %[[TRUE:bb\.[0-9]+]] +; CHECK: G_BR %[[FALSE:bb\.[0-9]+]] -; CHECK: [[TRUE]]: +; CHECK: [[TRUE]].{{[a-zA-Z0-9.]+}}: ; CHECK: [[RES1:%[0-9]+]]:_(s32) = G_LOAD -; CHECK: [[FALSE]]: +; CHECK: [[FALSE]].{{[a-zA-Z0-9.]+}}: ; CHECK: [[RES2:%[0-9]+]]:_(s32) = G_LOAD ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_PHI [[RES1]](s32), %[[TRUE]], [[RES2]](s32), %[[FALSE]] @@ -554,7 +554,7 @@ define void @unreachable(i32 %a) { ; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY %w0 ; CHECK: [[ONE:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 -; CHECK: {{bb.[0-9]+}}.next: +; CHECK: bb.{{[0-9]+}}.{{[a-zA-Z0-9.]+}}: ; CHECK: [[SUM1:%[0-9]+]]:_(s32) = G_ADD [[IN]], [[ONE]] ; CHECK: [[SUM2:%[0-9]+]]:_(s32) = G_ADD [[IN]], [[ONE]] ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_ADD [[SUM1]], [[SUM2]] @@ -1226,7 +1226,7 @@ define i8* @test_const_placement() { ; CHECK: bb.{{[0-9]+}} (%ir-block.{{[0-9]+}}): ; CHECK: [[VAL_INT:%[0-9]+]]:_(s32) = G_CONSTANT i32 42 ; CHECK: [[VAL:%[0-9]+]]:_(p0) = G_INTTOPTR [[VAL_INT]](s32) -; CHECK: {{bb.[0-9]+}}.next: +; CHECK: bb.{{[0-9]+}}.{{[a-zA-Z0-9.]+}}: br label %next next: diff --git a/test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll b/test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll index 0e7fbd32c6f..827fdd26108 100644 --- a/test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll +++ b/test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll @@ -9,7 +9,7 @@ declare i32 @llvm.eh.typeid.for(i8*) ; CHECK-LABEL: name: bar ; CHECK: body: ; CHECK-NEXT: bb.1 (%ir-block.0): -; CHECK: successors: %[[GOOD:bb.[0-9]+.continue]]{{.*}}%[[BAD:bb.[0-9]+.broken]] +; CHECK: successors: %[[GOOD:bb.[0-9]+]]{{.*}}%[[BAD:bb.[0-9]+]] ; CHECK: EH_LABEL ; CHECK: %w0 = COPY ; CHECK: BL @foo, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %w0, implicit-def %w0 @@ -17,7 +17,7 @@ declare i32 @llvm.eh.typeid.for(i8*) ; CHECK: EH_LABEL ; CHECK: G_BR %[[GOOD]] -; CHECK: [[BAD]] (landing-pad): +; CHECK: [[BAD]].{{[a-z]+}} (landing-pad): ; CHECK: EH_LABEL ; CHECK: [[UNDEF:%[0-9]+]]:_(s128) = G_IMPLICIT_DEF ; CHECK: [[PTR:%[0-9]+]]:_(p0) = COPY %x0 @@ -30,7 +30,7 @@ declare i32 @llvm.eh.typeid.for(i8*) ; CHECK: %x0 = COPY [[PTR_RET]] ; CHECK: %w1 = COPY [[SEL_RET]] -; CHECK: [[GOOD]]: +; CHECK: [[GOOD]].{{[a-z]+}}: ; CHECK: [[SEL:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK: {{%[0-9]+}}:_(s128) = G_INSERT {{%[0-9]+}}, [[SEL]](s32), 64 diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll b/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll index da40b274aa6..01f955bc1d1 100644 --- a/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll +++ b/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll @@ -10,9 +10,9 @@ declare void @_Unwind_Resume(i8*) ; CHECK: name: bar ; CHECK: body: ; CHECK-NEXT: bb.1 (%ir-block.0): -; CHECK: successors: %{{bb.[0-9]+.continue.*}}%[[LP:bb.[0-9]+.cleanup]] +; CHECK: successors: %{{bb.[0-9]+.*}}%[[LP:bb.[0-9]+]] -; CHECK: [[LP]] (landing-pad): +; CHECK: [[LP]].{{[a-z]+}} (landing-pad): ; CHECK: EH_LABEL ; CHECK: [[PTR:%[0-9]+]]:_(p0) = COPY %x0 diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-simple.mir b/test/CodeGen/AArch64/GlobalISel/legalize-simple.mir index 9c028eb9d95..a7329916ea8 100644 --- a/test/CodeGen/AArch64/GlobalISel/legalize-simple.mir +++ b/test/CodeGen/AArch64/GlobalISel/legalize-simple.mir @@ -43,16 +43,16 @@ registers: - { id: 16, class: _ } body: | ; CHECK-LABEL: name: test_simple - ; CHECK: bb.0.entry: - ; CHECK: successors: %bb.1.next(0x80000000) + ; CHECK: bb.0.{{[a-zA-Z0-9]+}}: + ; CHECK: successors: %bb.1(0x80000000) ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0 ; CHECK: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s64) ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; CHECK: [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[COPY]](s64) ; CHECK: [[PTRTOINT:%[0-9]+]]:_(s64) = G_PTRTOINT [[INTTOPTR]](p0) ; CHECK: %x0 = COPY [[PTRTOINT]](s64) - ; CHECK: G_BRCOND [[TRUNC]](s1), %bb.1.next - ; CHECK: bb.1.next: + ; CHECK: G_BRCOND [[TRUNC]](s1), %bb.1 + ; CHECK: bb.1.{{[a-zA-Z0-9]+}}: ; CHECK: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; CHECK: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) ; CHECK: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[TRUNC]](s1), [[TRUNC2]], [[TRUNC3]] @@ -95,7 +95,7 @@ body: | %6(s64) = G_PTRTOINT %5 %x0 = COPY %6 - G_BRCOND %1, %bb.1.next + G_BRCOND %1, %bb.1 bb.1.next: diff --git a/test/CodeGen/AArch64/GlobalISel/localizer-in-O0-pipeline.mir b/test/CodeGen/AArch64/GlobalISel/localizer-in-O0-pipeline.mir index 997205bc0ef..d4ed70fa531 100644 --- a/test/CodeGen/AArch64/GlobalISel/localizer-in-O0-pipeline.mir +++ b/test/CodeGen/AArch64/GlobalISel/localizer-in-O0-pipeline.mir @@ -59,19 +59,19 @@ registers: # CHECK: %5:fpr(s32) = G_FCONSTANT float 2.000000e+00 # Second block will get the constant 1.0 when the localizer is enabled. -# CHECK: bb.1.true: +# CHECK: bb.1.{{[a-zA-Z0-9]+}}: # OPT-NOT: G_FCONSTANT # OPTNONE: [[FONE:%[0-9]+]]:fpr(s32) = G_FCONSTANT float 1.000000e+00 -# CHECK: G_BR %bb.3.end +# CHECK: G_BR %bb.3 # Thrid block will get the constant 2.0 when the localizer is enabled. -# CHECK: bb.2.false: +# CHECK: bb.2.{{[a-zA-Z0-9]+}}: # OPT-NOT: G_FCONSTANT # OPTNONE: [[FTWO:%[0-9]+]]:fpr(s32) = G_FCONSTANT float 2.000000e+00 # CHECK: bb.3.end -# OPTNONE: %2:fpr(s32) = PHI [[FONE]](s32), %bb.1.true, [[FTWO]](s32), %bb.2.false -# OPT: %2:fpr(s32) = PHI %4(s32), %bb.1.true, %5(s32), %bb.2.false +# OPTNONE: %2:fpr(s32) = PHI [[FONE]](s32), %bb.1, [[FTWO]](s32), %bb.2 +# OPT: %2:fpr(s32) = PHI %4(s32), %bb.1, %5(s32), %bb.2 # CHECK-NEXT: G_FADD %0, %2 body: | bb.0 (%ir-block.0): @@ -82,16 +82,16 @@ body: | %1(s1) = G_TRUNC %6 %4(s32) = G_FCONSTANT float 1.000000e+00 %5(s32) = G_FCONSTANT float 2.000000e+00 - G_BRCOND %1(s1), %bb.1.true - G_BR %bb.2.false + G_BRCOND %1(s1), %bb.1 + G_BR %bb.2 bb.1.true: - G_BR %bb.3.end + G_BR %bb.3 bb.2.false: bb.3.end: - %2(s32) = PHI %4(s32), %bb.1.true, %5(s32), %bb.2.false + %2(s32) = PHI %4(s32), %bb.1, %5(s32), %bb.2 %3(s32) = G_FADD %0, %2 %s0 = COPY %3(s32) RET_ReallyLR implicit %s0 diff --git a/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll b/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll index 51c32b409db..eafb4126807 100644 --- a/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll +++ b/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll @@ -508,12 +508,12 @@ block1: ; CHECK: ldr ; CHECK-NEXT: nop ; CHECK-NEXT: .Ltmp -; CHECK-NEXT: BB +; CHECK-NEXT: %bb. ; CHECK-NEXT: madd ; CHECK-NOWORKAROUND-LABEL: fall_through ; CHECK-NOWORKAROUND: ldr ; CHECK-NOWORKAROUND-NEXT: .Ltmp -; CHECK-NOWORKAROUND-NEXT: BB +; CHECK-NOWORKAROUND-NEXT: %bb. ; CHECK-NOWORKAROUND-NEXT: madd ; No checks for this, just check it doesn't crash diff --git a/test/CodeGen/AArch64/aarch64-stp-cluster.ll b/test/CodeGen/AArch64/aarch64-stp-cluster.ll index 0ee32f79a35..c6bdbe4f032 100644 --- a/test/CodeGen/AArch64/aarch64-stp-cluster.ll +++ b/test/CodeGen/AArch64/aarch64-stp-cluster.ll @@ -2,7 +2,7 @@ ; RUN: llc < %s -mtriple=arm64-linux-gnu -mcpu=cortex-a57 -verify-misched -debug-only=machine-scheduler -aarch64-enable-stp-suppress=false -o - 2>&1 > /dev/null | FileCheck %s ; CHECK: ********** MI Scheduling ********** -; CHECK-LABEL: stp_i64_scale:BB#0 +; CHECK-LABEL: stp_i64_scale:%bb.0 ; CHECK:Cluster ld/st SU(4) - SU(3) ; CHECK:Cluster ld/st SU(2) - SU(5) ; CHECK:SU(4): STRXui %1, %0, 1 @@ -23,7 +23,7 @@ entry: } ; CHECK: ********** MI Scheduling ********** -; CHECK-LABEL: stp_i32_scale:BB#0 +; CHECK-LABEL: stp_i32_scale:%bb.0 ; CHECK:Cluster ld/st SU(4) - SU(3) ; CHECK:Cluster ld/st SU(2) - SU(5) ; CHECK:SU(4): STRWui %1, %0, 1 @@ -44,7 +44,7 @@ entry: } ; CHECK:********** MI Scheduling ********** -; CHECK-LABEL:stp_i64_unscale:BB#0 entry +; CHECK-LABEL:stp_i64_unscale:%bb.0 entry ; CHECK:Cluster ld/st SU(5) - SU(2) ; CHECK:Cluster ld/st SU(4) - SU(3) ; CHECK:SU(5): STURXi %1, %0, -32 @@ -65,7 +65,7 @@ entry: } ; CHECK:********** MI Scheduling ********** -; CHECK-LABEL:stp_i32_unscale:BB#0 entry +; CHECK-LABEL:stp_i32_unscale:%bb.0 entry ; CHECK:Cluster ld/st SU(5) - SU(2) ; CHECK:Cluster ld/st SU(4) - SU(3) ; CHECK:SU(5): STURWi %1, %0, -16 @@ -86,7 +86,7 @@ entry: } ; CHECK:********** MI Scheduling ********** -; CHECK-LABEL:stp_double:BB#0 +; CHECK-LABEL:stp_double:%bb.0 ; CHECK:Cluster ld/st SU(3) - SU(4) ; CHECK:Cluster ld/st SU(2) - SU(5) ; CHECK:SU(3): STRDui %1, %0, 1 @@ -107,7 +107,7 @@ entry: } ; CHECK:********** MI Scheduling ********** -; CHECK-LABEL:stp_float:BB#0 +; CHECK-LABEL:stp_float:%bb.0 ; CHECK:Cluster ld/st SU(3) - SU(4) ; CHECK:Cluster ld/st SU(2) - SU(5) ; CHECK:SU(3): STRSui %1, %0, 1 @@ -128,7 +128,7 @@ entry: } ; CHECK: ********** MI Scheduling ********** -; CHECK-LABEL: stp_volatile:BB#0 +; CHECK-LABEL: stp_volatile:%bb.0 ; CHECK-NOT: Cluster ld/st ; CHECK:SU(2): STRXui %1, %0, 3; mem:Volatile ; CHECK:SU(3): STRXui %1, %0, 2; mem:Volatile diff --git a/test/CodeGen/AArch64/analyze-branch.ll b/test/CodeGen/AArch64/analyze-branch.ll index 932cd75052c..4f902ef4fc8 100644 --- a/test/CodeGen/AArch64/analyze-branch.ll +++ b/test/CodeGen/AArch64/analyze-branch.ll @@ -18,7 +18,7 @@ define void @test_Bcc_fallthrough_taken(i32 %in) nounwind { ; CHECK: cmp {{w[0-9]+}}, #42 ; CHECK: b.ne [[FALSE:.LBB[0-9]+_[0-9]+]] -; CHECK-NEXT: // BB# +; CHECK-NEXT: // %bb. ; CHECK-NEXT: bl test_true ; CHECK: [[FALSE]]: @@ -41,7 +41,7 @@ define void @test_Bcc_fallthrough_nottaken(i32 %in) nounwind { ; CHECK: cmp {{w[0-9]+}}, #42 ; CHECK: b.eq [[TRUE:.LBB[0-9]+_[0-9]+]] -; CHECK-NEXT: // BB# +; CHECK-NEXT: // %bb. ; CHECK-NEXT: bl test_false ; CHECK: [[TRUE]]: @@ -62,7 +62,7 @@ define void @test_CBZ_fallthrough_taken(i32 %in) nounwind { br i1 %tst, label %true, label %false, !prof !0 ; CHECK: cbnz {{w[0-9]+}}, [[FALSE:.LBB[0-9]+_[0-9]+]] -; CHECK-NEXT: // BB# +; CHECK-NEXT: // %bb. ; CHECK-NEXT: bl test_true ; CHECK: [[FALSE]]: @@ -83,7 +83,7 @@ define void @test_CBZ_fallthrough_nottaken(i64 %in) nounwind { br i1 %tst, label %true, label %false, !prof !1 ; CHECK: cbz {{x[0-9]+}}, [[TRUE:.LBB[0-9]+_[0-9]+]] -; CHECK-NEXT: // BB# +; CHECK-NEXT: // %bb. ; CHECK-NEXT: bl test_false ; CHECK: [[TRUE]]: @@ -104,7 +104,7 @@ define void @test_CBNZ_fallthrough_taken(i32 %in) nounwind { br i1 %tst, label %true, label %false, !prof !0 ; CHECK: cbz {{w[0-9]+}}, [[FALSE:.LBB[0-9]+_[0-9]+]] -; CHECK-NEXT: // BB# +; CHECK-NEXT: // %bb. ; CHECK-NEXT: bl test_true ; CHECK: [[FALSE]]: @@ -125,7 +125,7 @@ define void @test_CBNZ_fallthrough_nottaken(i64 %in) nounwind { br i1 %tst, label %true, label %false, !prof !1 ; CHECK: cbnz {{x[0-9]+}}, [[TRUE:.LBB[0-9]+_[0-9]+]] -; CHECK-NEXT: // BB# +; CHECK-NEXT: // %bb. ; CHECK-NEXT: bl test_false ; CHECK: [[TRUE]]: @@ -147,7 +147,7 @@ define void @test_TBZ_fallthrough_taken(i32 %in) nounwind { br i1 %tst, label %true, label %false, !prof !0 ; CHECK: tbnz {{w[0-9]+}}, #15, [[FALSE:.LBB[0-9]+_[0-9]+]] -; CHECK-NEXT: // BB# +; CHECK-NEXT: // %bb. ; CHECK-NEXT: bl test_true ; CHECK: [[FALSE]]: @@ -169,7 +169,7 @@ define void @test_TBZ_fallthrough_nottaken(i64 %in) nounwind { br i1 %tst, label %true, label %false, !prof !1 ; CHECK: tbz {{[wx][0-9]+}}, #15, [[TRUE:.LBB[0-9]+_[0-9]+]] -; CHECK-NEXT: // BB# +; CHECK-NEXT: // %bb. ; CHECK-NEXT: bl test_false ; CHECK: [[TRUE]]: @@ -192,7 +192,7 @@ define void @test_TBNZ_fallthrough_taken(i32 %in) nounwind { br i1 %tst, label %true, label %false, !prof !0 ; CHECK: tbz {{w[0-9]+}}, #15, [[FALSE:.LBB[0-9]+_[0-9]+]] -; CHECK-NEXT: // BB# +; CHECK-NEXT: // %bb. ; CHECK-NEXT: bl test_true ; CHECK: [[FALSE]]: @@ -214,7 +214,7 @@ define void @test_TBNZ_fallthrough_nottaken(i64 %in) nounwind { br i1 %tst, label %true, label %false, !prof !1 ; CHECK: tbnz {{[wx][0-9]+}}, #15, [[TRUE:.LBB[0-9]+_[0-9]+]] -; CHECK-NEXT: // BB# +; CHECK-NEXT: // %bb. ; CHECK-NEXT: bl test_false ; CHECK: [[TRUE]]: diff --git a/test/CodeGen/AArch64/arm64-ccmp.ll b/test/CodeGen/AArch64/arm64-ccmp.ll index a910585e7f5..b18e638a3a9 100644 --- a/test/CodeGen/AArch64/arm64-ccmp.ll +++ b/test/CodeGen/AArch64/arm64-ccmp.ll @@ -132,6 +132,7 @@ if.end: ; Floating point compare. ; CHECK: single_fcmp +; CHECK: ; %bb. ; CHECK: cmp ; CHECK-NOT: b. ; CHECK: fccmp {{.*}}, #8, ge @@ -448,7 +449,7 @@ define i32 @select_noccmp3(i32 %v0, i32 %v1, i32 %v2) { ; Test the IR CCs that expand to two cond codes. ; CHECK-LABEL: select_and_olt_one: -; CHECK-LABEL: ; BB#0: +; CHECK-LABEL: ; %bb.0: ; CHECK-NEXT: fcmp d0, d1 ; CHECK-NEXT: fccmp d2, d3, #4, mi ; CHECK-NEXT: fccmp d2, d3, #1, ne @@ -463,7 +464,7 @@ define i32 @select_and_olt_one(double %v0, double %v1, double %v2, double %v3, i } ; CHECK-LABEL: select_and_one_olt: -; CHECK-LABEL: ; BB#0: +; CHECK-LABEL: ; %bb.0: ; CHECK-NEXT: fcmp d0, d1 ; CHECK-NEXT: fccmp d0, d1, #1, ne ; CHECK-NEXT: fccmp d2, d3, #0, vc @@ -478,7 +479,7 @@ define i32 @select_and_one_olt(double %v0, double %v1, double %v2, double %v3, i } ; CHECK-LABEL: select_and_olt_ueq: -; CHECK-LABEL: ; BB#0: +; CHECK-LABEL: ; %bb.0: ; CHECK-NEXT: fcmp d0, d1 ; CHECK-NEXT: fccmp d2, d3, #0, mi ; CHECK-NEXT: fccmp d2, d3, #8, le @@ -493,7 +494,7 @@ define i32 @select_and_olt_ueq(double %v0, double %v1, double %v2, double %v3, i } ; CHECK-LABEL: select_and_ueq_olt: -; CHECK-LABEL: ; BB#0: +; CHECK-LABEL: ; %bb.0: ; CHECK-NEXT: fcmp d0, d1 ; CHECK-NEXT: fccmp d0, d1, #8, le ; CHECK-NEXT: fccmp d2, d3, #0, pl @@ -508,7 +509,7 @@ define i32 @select_and_ueq_olt(double %v0, double %v1, double %v2, double %v3, i } ; CHECK-LABEL: select_or_olt_one: -; CHECK-LABEL: ; BB#0: +; CHECK-LABEL: ; %bb.0: ; CHECK-NEXT: fcmp d0, d1 ; CHECK-NEXT: fccmp d2, d3, #0, pl ; CHECK-NEXT: fccmp d2, d3, #8, le @@ -523,7 +524,7 @@ define i32 @select_or_olt_one(double %v0, double %v1, double %v2, double %v3, i3 } ; CHECK-LABEL: select_or_one_olt: -; CHECK-LABEL: ; BB#0: +; CHECK-LABEL: ; %bb.0: ; CHECK-NEXT: fcmp d0, d1 ; CHECK-NEXT: fccmp d0, d1, #1, ne ; CHECK-NEXT: fccmp d2, d3, #8, vs @@ -538,7 +539,7 @@ define i32 @select_or_one_olt(double %v0, double %v1, double %v2, double %v3, i3 } ; CHECK-LABEL: select_or_olt_ueq: -; CHECK-LABEL: ; BB#0: +; CHECK-LABEL: ; %bb.0: ; CHECK-NEXT: fcmp d0, d1 ; CHECK-NEXT: fccmp d2, d3, #4, pl ; CHECK-NEXT: fccmp d2, d3, #1, ne @@ -553,7 +554,7 @@ define i32 @select_or_olt_ueq(double %v0, double %v1, double %v2, double %v3, i3 } ; CHECK-LABEL: select_or_ueq_olt: -; CHECK-LABEL: ; BB#0: +; CHECK-LABEL: ; %bb.0: ; CHECK-NEXT: fcmp d0, d1 ; CHECK-NEXT: fccmp d0, d1, #8, le ; CHECK-NEXT: fccmp d2, d3, #8, mi @@ -568,7 +569,7 @@ define i32 @select_or_ueq_olt(double %v0, double %v1, double %v2, double %v3, i3 } ; CHECK-LABEL: select_or_olt_ogt_ueq: -; CHECK-LABEL: ; BB#0: +; CHECK-LABEL: ; %bb.0: ; CHECK-NEXT: fcmp d0, d1 ; CHECK-NEXT: fccmp d2, d3, #0, pl ; CHECK-NEXT: fccmp d4, d5, #4, le @@ -586,7 +587,7 @@ define i32 @select_or_olt_ogt_ueq(double %v0, double %v1, double %v2, double %v3 } ; CHECK-LABEL: select_or_olt_ueq_ogt: -; CHECK-LABEL: ; BB#0: +; CHECK-LABEL: ; %bb.0: ; CHECK-NEXT: fcmp d0, d1 ; CHECK-NEXT: fccmp d2, d3, #4, pl ; CHECK-NEXT: fccmp d2, d3, #1, ne @@ -606,7 +607,7 @@ define i32 @select_or_olt_ueq_ogt(double %v0, double %v1, double %v2, double %v3 ; Verify that we correctly promote f16. ; CHECK-LABEL: half_select_and_olt_oge: -; CHECK-LABEL: ; BB#0: +; CHECK-LABEL: ; %bb.0: ; CHECK-DAG: fcvt [[S0:s[0-9]+]], h0 ; CHECK-DAG: fcvt [[S1:s[0-9]+]], h1 ; CHECK-NEXT: fcmp [[S0]], [[S1]] @@ -624,7 +625,7 @@ define i32 @half_select_and_olt_oge(half %v0, half %v1, half %v2, half %v3, i32 } ; CHECK-LABEL: half_select_and_olt_one: -; CHECK-LABEL: ; BB#0: +; CHECK-LABEL: ; %bb.0: ; CHECK-DAG: fcvt [[S0:s[0-9]+]], h0 ; CHECK-DAG: fcvt [[S1:s[0-9]+]], h1 ; CHECK-NEXT: fcmp [[S0]], [[S1]] diff --git a/test/CodeGen/AArch64/arm64-fp128.ll b/test/CodeGen/AArch64/arm64-fp128.ll index 2ae0da2d89d..3561d8fcdff 100644 --- a/test/CodeGen/AArch64/arm64-fp128.ll +++ b/test/CodeGen/AArch64/arm64-fp128.ll @@ -195,7 +195,7 @@ define i32 @test_br_cc() { iftrue: ret i32 42 -; CHECK-NEXT: BB# +; CHECK-NEXT: %bb. ; CHECK-NEXT: mov w0, #42 ; CHECK: ret iffalse: @@ -211,7 +211,7 @@ define void @test_select(i1 %cond, fp128 %lhs, fp128 %rhs) { store fp128 %val, fp128* @lhs, align 16 ; CHECK: tst w0, #0x1 ; CHECK-NEXT: b.eq [[IFFALSE:.LBB[0-9]+_[0-9]+]] -; CHECK-NEXT: BB# +; CHECK-NEXT: %bb. ; CHECK-NEXT: mov v[[VAL:[0-9]+]].16b, v0.16b ; CHECK-NEXT: [[IFFALSE]]: ; CHECK: str q[[VAL]], [{{x[0-9]+}}, :lo12:lhs] diff --git a/test/CodeGen/AArch64/arm64-icmp-opt.ll b/test/CodeGen/AArch64/arm64-icmp-opt.ll index 12eae0e88fb..1ed5c5ee135 100644 --- a/test/CodeGen/AArch64/arm64-icmp-opt.ll +++ b/test/CodeGen/AArch64/arm64-icmp-opt.ll @@ -7,7 +7,7 @@ define i32 @t1(i64 %a) { ; CHECK-LABEL: t1: -; CHECK: // BB#0: +; CHECK: // %bb.0: ; CHECK-NEXT: lsr x8, x0, #63 ; CHECK-NEXT: eor w0, w8, #0x1 ; CHECK-NEXT: ret diff --git a/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll b/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll index cdbadfe51f0..b63e739f577 100644 --- a/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll +++ b/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll @@ -6176,7 +6176,7 @@ define <2 x double> @test_v2f64_post_reg_ld1lane(double* %bar, double** %ptr, i6 ; Check for dependencies between the vector and the scalar load. define <4 x float> @test_v4f32_post_reg_ld1lane_dep_vec_on_load(float* %bar, float** %ptr, i64 %inc, <4 x float>* %dep_ptr_1, <4 x float>* %dep_ptr_2, <4 x float> %vec) { ; CHECK-LABEL: test_v4f32_post_reg_ld1lane_dep_vec_on_load: -; CHECK: BB#0: +; CHECK: %bb.0: ; CHECK-NEXT: ldr s[[LD:[0-9]+]], [x0] ; CHECK-NEXT: str q0, [x3] ; CHECK-NEXT: ldr q0, [x4] diff --git a/test/CodeGen/AArch64/arm64-ldp-cluster.ll b/test/CodeGen/AArch64/arm64-ldp-cluster.ll index ca50e110a88..370db233fcb 100644 --- a/test/CodeGen/AArch64/arm64-ldp-cluster.ll +++ b/test/CodeGen/AArch64/arm64-ldp-cluster.ll @@ -4,12 +4,12 @@ ; Test ldr clustering. ; CHECK: ********** MI Scheduling ********** -; CHECK-LABEL: ldr_int:BB#0 +; CHECK-LABEL: ldr_int:%bb.0 ; CHECK: Cluster ld/st SU(1) - SU(2) ; CHECK: SU(1): %{{[0-9]+}} = LDRWui ; CHECK: SU(2): %{{[0-9]+}} = LDRWui ; EXYNOS: ********** MI Scheduling ********** -; EXYNOS-LABEL: ldr_int:BB#0 +; EXYNOS-LABEL: ldr_int:%bb.0 ; EXYNOS: Cluster ld/st SU(1) - SU(2) ; EXYNOS: SU(1): %{{[0-9]+}} = LDRWui ; EXYNOS: SU(2): %{{[0-9]+}} = LDRWui @@ -24,12 +24,12 @@ define i32 @ldr_int(i32* %a) nounwind { ; Test ldpsw clustering ; CHECK: ********** MI Scheduling ********** -; CHECK-LABEL: ldp_sext_int:BB#0 +; CHECK-LABEL: ldp_sext_int:%bb.0 ; CHECK: Cluster ld/st SU(1) - SU(2) ; CHECK: SU(1): %{{[0-9]+}} = LDRSWui ; CHECK: SU(2): %{{[0-9]+}} = LDRSWui ; EXYNOS: ********** MI Scheduling ********** -; EXYNOS-LABEL: ldp_sext_int:BB#0 +; EXYNOS-LABEL: ldp_sext_int:%bb.0 ; EXYNOS: Cluster ld/st SU(1) - SU(2) ; EXYNOS: SU(1): %{{[0-9]+}} = LDRSWui ; EXYNOS: SU(2): %{{[0-9]+}} = LDRSWui @@ -45,12 +45,12 @@ define i64 @ldp_sext_int(i32* %p) nounwind { ; Test ldur clustering. ; CHECK: ********** MI Scheduling ********** -; CHECK-LABEL: ldur_int:BB#0 +; CHECK-LABEL: ldur_int:%bb.0 ; CHECK: Cluster ld/st SU(2) - SU(1) ; CHECK: SU(1): %{{[0-9]+}} = LDURWi ; CHECK: SU(2): %{{[0-9]+}} = LDURWi ; EXYNOS: ********** MI Scheduling ********** -; EXYNOS-LABEL: ldur_int:BB#0 +; EXYNOS-LABEL: ldur_int:%bb.0 ; EXYNOS: Cluster ld/st SU(2) - SU(1) ; EXYNOS: SU(1): %{{[0-9]+}} = LDURWi ; EXYNOS: SU(2): %{{[0-9]+}} = LDURWi @@ -65,12 +65,12 @@ define i32 @ldur_int(i32* %a) nounwind { ; Test sext + zext clustering. ; CHECK: ********** MI Scheduling ********** -; CHECK-LABEL: ldp_half_sext_zext_int:BB#0 +; CHECK-LABEL: ldp_half_sext_zext_int:%bb.0 ; CHECK: Cluster ld/st SU(3) - SU(4) ; CHECK: SU(3): %{{[0-9]+}} = LDRSWui ; CHECK: SU(4): %{{[0-9]+}}:sub_32 = LDRWui ; EXYNOS: ********** MI Scheduling ********** -; EXYNOS-LABEL: ldp_half_sext_zext_int:BB#0 +; EXYNOS-LABEL: ldp_half_sext_zext_int:%bb.0 ; EXYNOS: Cluster ld/st SU(3) - SU(4) ; EXYNOS: SU(3): %{{[0-9]+}} = LDRSWui ; EXYNOS: SU(4): %{{[0-9]+}}:sub_32 = LDRWui @@ -88,12 +88,12 @@ define i64 @ldp_half_sext_zext_int(i64* %q, i32* %p) nounwind { ; Test zext + sext clustering. ; CHECK: ********** MI Scheduling ********** -; CHECK-LABEL: ldp_half_zext_sext_int:BB#0 +; CHECK-LABEL: ldp_half_zext_sext_int:%bb.0 ; CHECK: Cluster ld/st SU(3) - SU(4) ; CHECK: SU(3): %{{[0-9]+}}:sub_32 = LDRWui ; CHECK: SU(4): %{{[0-9]+}} = LDRSWui ; EXYNOS: ********** MI Scheduling ********** -; EXYNOS-LABEL: ldp_half_zext_sext_int:BB#0 +; EXYNOS-LABEL: ldp_half_zext_sext_int:%bb.0 ; EXYNOS: Cluster ld/st SU(3) - SU(4) ; EXYNOS: SU(3): %{{[0-9]+}}:sub_32 = LDRWui ; EXYNOS: SU(4): %{{[0-9]+}} = LDRSWui @@ -111,12 +111,12 @@ define i64 @ldp_half_zext_sext_int(i64* %q, i32* %p) nounwind { ; Verify we don't cluster volatile loads. ; CHECK: ********** MI Scheduling ********** -; CHECK-LABEL: ldr_int_volatile:BB#0 +; CHECK-LABEL: ldr_int_volatile:%bb.0 ; CHECK-NOT: Cluster ld/st ; CHECK: SU(1): %{{[0-9]+}} = LDRWui ; CHECK: SU(2): %{{[0-9]+}} = LDRWui ; EXYNOS: ********** MI Scheduling ********** -; EXYNOS-LABEL: ldr_int_volatile:BB#0 +; EXYNOS-LABEL: ldr_int_volatile:%bb.0 ; EXYNOS-NOT: Cluster ld/st ; EXYNOS: SU(1): %{{[0-9]+}} = LDRWui ; EXYNOS: SU(2): %{{[0-9]+}} = LDRWui @@ -131,12 +131,12 @@ define i32 @ldr_int_volatile(i32* %a) nounwind { ; Test ldq clustering (no clustering for Exynos). ; CHECK: ********** MI Scheduling ********** -; CHECK-LABEL: ldq_cluster:BB#0 +; CHECK-LABEL: ldq_cluster:%bb.0 ; CHECK: Cluster ld/st SU(1) - SU(3) ; CHECK: SU(1): %{{[0-9]+}} = LDRQui ; CHECK: SU(3): %{{[0-9]+}} = LDRQui ; EXYNOS: ********** MI Scheduling ********** -; EXYNOS-LABEL: ldq_cluster:BB#0 +; EXYNOS-LABEL: ldq_cluster:%bb.0 ; EXYNOS-NOT: Cluster ld/st define <2 x i64> @ldq_cluster(i64* %p) { %a1 = bitcast i64* %p to <2 x i64>* diff --git a/test/CodeGen/AArch64/arm64-misched-basic-A53.ll b/test/CodeGen/AArch64/arm64-misched-basic-A53.ll index 307d1ec1aa8..07df9cb32db 100644 --- a/test/CodeGen/AArch64/arm64-misched-basic-A53.ll +++ b/test/CodeGen/AArch64/arm64-misched-basic-A53.ll @@ -8,7 +8,7 @@ ; ; CHECK: ********** MI Scheduling ********** ; CHECK: main -; CHECK: *** Final schedule for BB#2 *** +; CHECK: *** Final schedule for %bb.2 *** ; CHECK: MADDWrrr ; CHECK: ADDWri ; CHECK: ********** INTERVALS ********** @@ -83,8 +83,8 @@ for.end: ; preds = %for.cond ; after it, this test checks to make sure there are more than one. ; ; CHECK: ********** MI Scheduling ********** -; CHECK: neon4xfloat:BB#0 -; CHECK: *** Final schedule for BB#0 *** +; CHECK: neon4xfloat:%bb.0 +; CHECK: *** Final schedule for %bb.0 *** ; CHECK: FDIVv4f32 ; CHECK: FADDv4f32 ; CHECK: FADDv4f32 @@ -130,7 +130,7 @@ declare { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2.v16i8.p0i8(i8*) ; are otherwise ready are jammed in the pending queue. ; CHECK: ********** MI Scheduling ********** ; CHECK: testResourceConflict -; CHECK: *** Final schedule for BB#0 *** +; CHECK: *** Final schedule for %bb.0 *** ; CHECK: BRK ; CHECK: ********** INTERVALS ********** define void @testResourceConflict(float* %ptr) { @@ -178,7 +178,7 @@ declare void @llvm.trap() ; Resource contention on LDST. ; CHECK: ********** MI Scheduling ********** ; CHECK: testLdStConflict -; CHECK: *** Final schedule for BB#1 *** +; CHECK: *** Final schedule for %bb.1 *** ; CHECK: LD4Fourv2d ; CHECK: STRQui ; CHECK: ********** INTERVALS ********** diff --git a/test/CodeGen/AArch64/arm64-misched-basic-A57.ll b/test/CodeGen/AArch64/arm64-misched-basic-A57.ll index 82ba18ce72c..711d2f7397b 100644 --- a/test/CodeGen/AArch64/arm64-misched-basic-A57.ll +++ b/test/CodeGen/AArch64/arm64-misched-basic-A57.ll @@ -8,10 +8,10 @@ ; ; RUN: llc < %s -mtriple=arm64-linux-gnu -mcpu=cortex-a57 -enable-misched -verify-misched -debug-only=machine-scheduler -o - 2>&1 > /dev/null | FileCheck %s ; CHECK: ********** MI Scheduling ********** -; CHECK: main:BB#2 +; CHECK: main:%bb.2 ; CHECK: LDR ; CHECK: Latency : 4 -; CHECK: *** Final schedule for BB#2 *** +; CHECK: *** Final schedule for %bb.2 *** ; CHECK: LDR ; CHECK: LDR ; CHECK-NOT: LDR diff --git a/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll b/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll index b2bfc13967a..8c81cf43e68 100644 --- a/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll +++ b/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll @@ -4,7 +4,7 @@ ; Test for bug in misched memory dependency calculation. ; ; CHECK: ********** MI Scheduling ********** -; CHECK: misched_bug:BB#0 entry +; CHECK: misched_bug:%bb.0 entry ; CHECK: SU(2): %2 = LDRWui %0, 1; mem:LD4[%ptr1_plus1] GPR32:%2 GPR64common:%0 ; CHECK: Successors: ; CHECK-NEXT: SU(5): Data Latency=4 Reg=%2 diff --git a/test/CodeGen/AArch64/arm64-variadic-aapcs.ll b/test/CodeGen/AArch64/arm64-variadic-aapcs.ll index 375877c5179..c6c7a65e2c1 100644 --- a/test/CodeGen/AArch64/arm64-variadic-aapcs.ll +++ b/test/CodeGen/AArch64/arm64-variadic-aapcs.ll @@ -113,7 +113,7 @@ declare void @llvm.va_end(i8*) define void @test_va_end() nounwind { ; CHECK-LABEL: test_va_end: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 %addr = bitcast %va_list* @var to i8* call void @llvm.va_end(i8* %addr) diff --git a/test/CodeGen/AArch64/bics.ll b/test/CodeGen/AArch64/bics.ll index 53aa28ad913..244aacbc0df 100644 --- a/test/CodeGen/AArch64/bics.ll +++ b/test/CodeGen/AArch64/bics.ll @@ -2,7 +2,7 @@ define i1 @andn_cmp(i32 %x, i32 %y) { ; CHECK-LABEL: andn_cmp: -; CHECK: // BB#0: +; CHECK: // %bb.0: ; CHECK-NEXT: bics wzr, w1, w0 ; CHECK-NEXT: cset w0, eq ; CHECK-NEXT: ret @@ -15,7 +15,7 @@ define i1 @andn_cmp(i32 %x, i32 %y) { define i1 @and_cmp(i32 %x, i32 %y) { ; CHECK-LABEL: and_cmp: -; CHECK: // BB#0: +; CHECK: // %bb.0: ; CHECK-NEXT: bics wzr, w1, w0 ; CHECK-NEXT: cset w0, eq ; CHECK-NEXT: ret @@ -27,7 +27,7 @@ define i1 @and_cmp(i32 %x, i32 %y) { define i1 @and_cmp_const(i32 %x) { ; CHECK-LABEL: and_cmp_const: -; CHECK: // BB#0: +; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #43 ; CHECK-NEXT: bics wzr, w8, w0 ; CHECK-NEXT: cset w0, eq diff --git a/test/CodeGen/AArch64/branch-relax-cbz.ll b/test/CodeGen/AArch64/branch-relax-cbz.ll index d13c0f677bc..cddecbd9bab 100644 --- a/test/CodeGen/AArch64/branch-relax-cbz.ll +++ b/test/CodeGen/AArch64/branch-relax-cbz.ll @@ -4,7 +4,7 @@ ; CHECK: cmn x{{[0-9]+}}, #5 ; CHECK-NEXT: b.le [[B2:LBB[0-9]+_[0-9]+]] -; CHECK-NEXT: ; BB#1: ; %b3 +; CHECK-NEXT: ; %bb.1: ; %b3 ; CHECK: ldr [[LOAD:w[0-9]+]] ; CHECK: cbnz [[LOAD]], [[B8:LBB[0-9]+_[0-9]+]] ; CHECK-NEXT: b [[B7:LBB[0-9]+_[0-9]+]] diff --git a/test/CodeGen/AArch64/fast-isel-assume.ll b/test/CodeGen/AArch64/fast-isel-assume.ll index d39a907407d..50f510a09b6 100644 --- a/test/CodeGen/AArch64/fast-isel-assume.ll +++ b/test/CodeGen/AArch64/fast-isel-assume.ll @@ -3,7 +3,7 @@ ; Check that we ignore the assume intrinsic. ; CHECK-LABEL: test: -; CHECK: // BB#0: +; CHECK: // %bb.0: ; CHECK-NEXT: ret define void @test(i32 %a) { %tmp0 = icmp slt i32 %a, 0 diff --git a/test/CodeGen/AArch64/fast-isel-atomic.ll b/test/CodeGen/AArch64/fast-isel-atomic.ll index 195b8befc8e..ec612616ae2 100644 --- a/test/CodeGen/AArch64/fast-isel-atomic.ll +++ b/test/CodeGen/AArch64/fast-isel-atomic.ll @@ -5,7 +5,7 @@ ; currently match, so we might as well check both! Feel free to remove SDAG. ; CHECK-LABEL: atomic_store_monotonic_8: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: strb w1, [x0] ; CHECK-NEXT: ret define void @atomic_store_monotonic_8(i8* %p, i8 %val) #0 { @@ -14,7 +14,7 @@ define void @atomic_store_monotonic_8(i8* %p, i8 %val) #0 { } ; CHECK-LABEL: atomic_store_monotonic_8_off: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: strb w1, [x0, #1] ; CHECK-NEXT: ret define void @atomic_store_monotonic_8_off(i8* %p, i8 %val) #0 { @@ -24,7 +24,7 @@ define void @atomic_store_monotonic_8_off(i8* %p, i8 %val) #0 { } ; CHECK-LABEL: atomic_store_monotonic_16: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: strh w1, [x0] ; CHECK-NEXT: ret define void @atomic_store_monotonic_16(i16* %p, i16 %val) #0 { @@ -33,7 +33,7 @@ define void @atomic_store_monotonic_16(i16* %p, i16 %val) #0 { } ; CHECK-LABEL: atomic_store_monotonic_16_off: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: strh w1, [x0, #2] ; CHECK-NEXT: ret define void @atomic_store_monotonic_16_off(i16* %p, i16 %val) #0 { @@ -43,7 +43,7 @@ define void @atomic_store_monotonic_16_off(i16* %p, i16 %val) #0 { } ; CHECK-LABEL: atomic_store_monotonic_32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: str w1, [x0] ; CHECK-NEXT: ret define void @atomic_store_monotonic_32(i32* %p, i32 %val) #0 { @@ -52,7 +52,7 @@ define void @atomic_store_monotonic_32(i32* %p, i32 %val) #0 { } ; CHECK-LABEL: atomic_store_monotonic_32_off: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: str w1, [x0, #4] ; CHECK-NEXT: ret define void @atomic_store_monotonic_32_off(i32* %p, i32 %val) #0 { @@ -62,7 +62,7 @@ define void @atomic_store_monotonic_32_off(i32* %p, i32 %val) #0 { } ; CHECK-LABEL: atomic_store_monotonic_64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: str x1, [x0] ; CHECK-NEXT: ret define void @atomic_store_monotonic_64(i64* %p, i64 %val) #0 { @@ -71,7 +71,7 @@ define void @atomic_store_monotonic_64(i64* %p, i64 %val) #0 { } ; CHECK-LABEL: atomic_store_monotonic_64_off: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: str x1, [x0, #8] ; CHECK-NEXT: ret define void @atomic_store_monotonic_64_off(i64* %p, i64 %val) #0 { @@ -81,7 +81,7 @@ define void @atomic_store_monotonic_64_off(i64* %p, i64 %val) #0 { } ; CHECK-LABEL: atomic_store_release_8: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: stlrb w1, [x0] ; CHECK-NEXT: ret define void @atomic_store_release_8(i8* %p, i8 %val) #0 { @@ -90,7 +90,7 @@ define void @atomic_store_release_8(i8* %p, i8 %val) #0 { } ; CHECK-LABEL: atomic_store_release_8_off: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: add x0, x0, #1 ; CHECK-NEXT: stlrb w1, [x0] ; CHECK-NEXT: ret @@ -101,7 +101,7 @@ define void @atomic_store_release_8_off(i8* %p, i8 %val) #0 { } ; CHECK-LABEL: atomic_store_release_16: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: stlrh w1, [x0] ; CHECK-NEXT: ret define void @atomic_store_release_16(i16* %p, i16 %val) #0 { @@ -110,7 +110,7 @@ define void @atomic_store_release_16(i16* %p, i16 %val) #0 { } ; CHECK-LABEL: atomic_store_release_16_off: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: add x0, x0, #2 ; CHECK-NEXT: stlrh w1, [x0] ; CHECK-NEXT: ret @@ -121,7 +121,7 @@ define void @atomic_store_release_16_off(i16* %p, i16 %val) #0 { } ; CHECK-LABEL: atomic_store_release_32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: stlr w1, [x0] ; CHECK-NEXT: ret define void @atomic_store_release_32(i32* %p, i32 %val) #0 { @@ -130,7 +130,7 @@ define void @atomic_store_release_32(i32* %p, i32 %val) #0 { } ; CHECK-LABEL: atomic_store_release_32_off: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: add x0, x0, #4 ; CHECK-NEXT: stlr w1, [x0] ; CHECK-NEXT: ret @@ -141,7 +141,7 @@ define void @atomic_store_release_32_off(i32* %p, i32 %val) #0 { } ; CHECK-LABEL: atomic_store_release_64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: stlr x1, [x0] ; CHECK-NEXT: ret define void @atomic_store_release_64(i64* %p, i64 %val) #0 { @@ -150,7 +150,7 @@ define void @atomic_store_release_64(i64* %p, i64 %val) #0 { } ; CHECK-LABEL: atomic_store_release_64_off: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: add x0, x0, #8 ; CHECK-NEXT: stlr x1, [x0] ; CHECK-NEXT: ret @@ -162,7 +162,7 @@ define void @atomic_store_release_64_off(i64* %p, i64 %val) #0 { ; CHECK-LABEL: atomic_store_seq_cst_8: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: stlrb w1, [x0] ; CHECK-NEXT: ret define void @atomic_store_seq_cst_8(i8* %p, i8 %val) #0 { @@ -171,7 +171,7 @@ define void @atomic_store_seq_cst_8(i8* %p, i8 %val) #0 { } ; CHECK-LABEL: atomic_store_seq_cst_8_off: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: add x0, x0, #1 ; CHECK-NEXT: stlrb w1, [x0] ; CHECK-NEXT: ret @@ -182,7 +182,7 @@ define void @atomic_store_seq_cst_8_off(i8* %p, i8 %val) #0 { } ; CHECK-LABEL: atomic_store_seq_cst_16: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: stlrh w1, [x0] ; CHECK-NEXT: ret define void @atomic_store_seq_cst_16(i16* %p, i16 %val) #0 { @@ -191,7 +191,7 @@ define void @atomic_store_seq_cst_16(i16* %p, i16 %val) #0 { } ; CHECK-LABEL: atomic_store_seq_cst_16_off: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: add x0, x0, #2 ; CHECK-NEXT: stlrh w1, [x0] ; CHECK-NEXT: ret @@ -202,7 +202,7 @@ define void @atomic_store_seq_cst_16_off(i16* %p, i16 %val) #0 { } ; CHECK-LABEL: atomic_store_seq_cst_32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: stlr w1, [x0] ; CHECK-NEXT: ret define void @atomic_store_seq_cst_32(i32* %p, i32 %val) #0 { @@ -211,7 +211,7 @@ define void @atomic_store_seq_cst_32(i32* %p, i32 %val) #0 { } ; CHECK-LABEL: atomic_store_seq_cst_32_off: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: add x0, x0, #4 ; CHECK-NEXT: stlr w1, [x0] ; CHECK-NEXT: ret @@ -222,7 +222,7 @@ define void @atomic_store_seq_cst_32_off(i32* %p, i32 %val) #0 { } ; CHECK-LABEL: atomic_store_seq_cst_64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: stlr x1, [x0] ; CHECK-NEXT: ret define void @atomic_store_seq_cst_64(i64* %p, i64 %val) #0 { @@ -231,7 +231,7 @@ define void @atomic_store_seq_cst_64(i64* %p, i64 %val) #0 { } ; CHECK-LABEL: atomic_store_seq_cst_64_off: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: add x0, x0, #8 ; CHECK-NEXT: stlr x1, [x0] ; CHECK-NEXT: ret diff --git a/test/CodeGen/AArch64/fast-isel-cmp-vec.ll b/test/CodeGen/AArch64/fast-isel-cmp-vec.ll index 89b368fa19b..d5b64c5363e 100644 --- a/test/CodeGen/AArch64/fast-isel-cmp-vec.ll +++ b/test/CodeGen/AArch64/fast-isel-cmp-vec.ll @@ -8,9 +8,9 @@ define <2 x i32> @icmp_v2i32(<2 x i32> %a) { ; CHECK-LABEL: icmp_v2i32: -; CHECK: ; BB#0: +; CHECK: ; %bb.0: ; CHECK-NEXT: cmeq.2s [[CMP:v[0-9]+]], v0, #0 -; CHECK-NEXT: ; BB#1: +; CHECK-NEXT: ; %bb.1: ; CHECK-NEXT: movi.2s [[MASK:v[0-9]+]], #1 ; CHECK-NEXT: and.8b v0, [[CMP]], [[MASK]] ; CHECK-NEXT: ret @@ -23,9 +23,9 @@ bb2: define <2 x i32> @icmp_constfold_v2i32(<2 x i32> %a) { ; CHECK-LABEL: icmp_constfold_v2i32: -; CHECK: ; BB#0: +; CHECK: ; %bb.0: ; CHECK-NEXT: movi d[[CMP:[0-9]+]], #0xffffffffffffffff -; CHECK-NEXT: ; BB#1: +; CHECK-NEXT: ; %bb.1: ; CHECK-NEXT: movi.2s [[MASK:v[0-9]+]], #1 ; CHECK-NEXT: and.8b v0, v[[CMP]], [[MASK]] ; CHECK-NEXT: ret @@ -38,10 +38,10 @@ bb2: define <4 x i32> @icmp_v4i32(<4 x i32> %a) { ; CHECK-LABEL: icmp_v4i32: -; CHECK: ; BB#0: +; CHECK: ; %bb.0: ; CHECK-NEXT: cmeq.4s [[CMP:v[0-9]+]], v0, #0 ; CHECK-NEXT: xtn.4h [[CMPV4I16:v[0-9]+]], [[CMP]] -; CHECK-NEXT: ; BB#1: +; CHECK-NEXT: ; %bb.1: ; CHECK-NEXT: movi.4h [[MASK:v[0-9]+]], #1 ; CHECK-NEXT: and.8b [[ZEXT:v[0-9]+]], [[CMPV4I16]], [[MASK]] ; CHECK-NEXT: ushll.4s v0, [[ZEXT]], #0 @@ -55,9 +55,9 @@ bb2: define <4 x i32> @icmp_constfold_v4i32(<4 x i32> %a) { ; CHECK-LABEL: icmp_constfold_v4i32: -; CHECK: ; BB#0: +; CHECK: ; %bb.0: ; CHECK-NEXT: movi d[[CMP:[0-9]+]], #0xffffffffffffffff -; CHECK-NEXT: ; BB#1: +; CHECK-NEXT: ; %bb.1: ; CHECK-NEXT: movi.4h [[MASK:v[0-9]+]], #1 ; CHECK-NEXT: and.8b [[ZEXT:v[0-9]+]], v[[CMP]], [[MASK]] ; CHECK-NEXT: ushll.4s v0, [[ZEXT]], #0 @@ -71,9 +71,9 @@ bb2: define <16 x i8> @icmp_v16i8(<16 x i8> %a) { ; CHECK-LABEL: icmp_v16i8: -; CHECK: ; BB#0: +; CHECK: ; %bb.0: ; CHECK-NEXT: cmeq.16b [[CMP:v[0-9]+]], v0, #0 -; CHECK-NEXT: ; BB#1: +; CHECK-NEXT: ; %bb.1: ; CHECK-NEXT: movi.16b [[MASK:v[0-9]+]], #1 ; CHECK-NEXT: and.16b v0, [[CMP]], [[MASK]] ; CHECK-NEXT: ret @@ -86,9 +86,9 @@ bb2: define <16 x i8> @icmp_constfold_v16i8(<16 x i8> %a) { ; CHECK-LABEL: icmp_constfold_v16i8: -; CHECK: ; BB#0: +; CHECK: ; %bb.0: ; CHECK-NEXT: movi.2d [[CMP:v[0-9]+]], #0xffffffffffffffff -; CHECK-NEXT: ; BB#1: +; CHECK-NEXT: ; %bb.1: ; CHECK-NEXT: movi.16b [[MASK:v[0-9]+]], #1 ; CHECK-NEXT: and.16b v0, [[CMP]], [[MASK]] ; CHECK-NEXT: ret diff --git a/test/CodeGen/AArch64/fast-isel-cmpxchg.ll b/test/CodeGen/AArch64/fast-isel-cmpxchg.ll index 7ef625abab2..f03955c4dcd 100644 --- a/test/CodeGen/AArch64/fast-isel-cmpxchg.ll +++ b/test/CodeGen/AArch64/fast-isel-cmpxchg.ll @@ -6,7 +6,7 @@ ; CHECK-NEXT: ldaxr [[OLD:w[0-9]+]], [x0] ; CHECK-NEXT: cmp [[OLD]], w1 ; CHECK-NEXT: b.ne [[DONE:.LBB[0-9_]+]] -; CHECK-NEXT: // BB#2: +; CHECK-NEXT: // %bb.2: ; CHECK-NEXT: stlxr [[STATUS]], w2, [x0] ; CHECK-NEXT: cbnz [[STATUS]], [[RETRY]] ; CHECK-NEXT: [[DONE]]: @@ -25,14 +25,14 @@ define i32 @cmpxchg_monotonic_32(i32* %p, i32 %cmp, i32 %new, i32* %ps) #0 { } ; CHECK-LABEL: cmpxchg_acq_rel_32_load: -; CHECK: // BB#0: +; CHECK: // %bb.0: ; CHECK: ldr [[NEW:w[0-9]+]], [x2] ; CHECK-NEXT: [[RETRY:.LBB[0-9_]+]]: ; CHECK-NEXT: mov [[STATUS:w[0-9]+]], #0 ; CHECK-NEXT: ldaxr [[OLD:w[0-9]+]], [x0] ; CHECK-NEXT: cmp [[OLD]], w1 ; CHECK-NEXT: b.ne [[DONE:.LBB[0-9_]+]] -; CHECK-NEXT: // BB#2: +; CHECK-NEXT: // %bb.2: ; CHECK-NEXT: stlxr [[STATUS]], [[NEW]], [x0] ; CHECK-NEXT: cbnz [[STATUS]], [[RETRY]] ; CHECK-NEXT: [[DONE]]: @@ -57,7 +57,7 @@ define i32 @cmpxchg_acq_rel_32_load(i32* %p, i32 %cmp, i32* %pnew, i32* %ps) #0 ; CHECK-NEXT: ldaxr [[OLD:x[0-9]+]], [x0] ; CHECK-NEXT: cmp [[OLD]], x1 ; CHECK-NEXT: b.ne [[DONE:.LBB[0-9_]+]] -; CHECK-NEXT: // BB#2: +; CHECK-NEXT: // %bb.2: ; CHECK-NEXT: stlxr [[STATUS]], x2, [x0] ; CHECK-NEXT: cbnz [[STATUS]], [[RETRY]] ; CHECK-NEXT: [[DONE]]: diff --git a/test/CodeGen/AArch64/fcvt-int.ll b/test/CodeGen/AArch64/fcvt-int.ll index e52b601b145..aeafc127494 100644 --- a/test/CodeGen/AArch64/fcvt-int.ll +++ b/test/CodeGen/AArch64/fcvt-int.ll @@ -152,7 +152,7 @@ define double @test_bitcasti64todouble(i64 %in) { define double @bitcast_fabs(double %x) { ; CHECK-LABEL: bitcast_fabs: -; CHECK: ; BB#0: +; CHECK: ; %bb.0: ; CHECK-NEXT: fabs d0, d0 ; CHECK-NEXT: ret ; @@ -164,7 +164,7 @@ define double @bitcast_fabs(double %x) { define float @bitcast_fneg(float %x) { ; CHECK-LABEL: bitcast_fneg: -; CHECK: ; BB#0: +; CHECK: ; %bb.0: ; CHECK-NEXT: fneg s0, s0 ; CHECK-NEXT: ret ; diff --git a/test/CodeGen/AArch64/local_vars.ll b/test/CodeGen/AArch64/local_vars.ll index 6e33ab2d0be..a479572d2a3 100644 --- a/test/CodeGen/AArch64/local_vars.ll +++ b/test/CodeGen/AArch64/local_vars.ll @@ -17,7 +17,7 @@ declare void @foo() define void @trivial_func() nounwind { ; CHECK-LABEL: trivial_func: // @trivial_func -; CHECK-NEXT: // BB#0 +; CHECK-NEXT: // %bb.0 ; CHECK-NEXT: ret ret void diff --git a/test/CodeGen/AArch64/max-jump-table.ll b/test/CodeGen/AArch64/max-jump-table.ll index 070502052ff..1a7a418b31f 100644 --- a/test/CodeGen/AArch64/max-jump-table.ll +++ b/test/CodeGen/AArch64/max-jump-table.ll @@ -77,10 +77,10 @@ entry: ] ; CHECK-LABEL: function jt2: ; CHECK-NEXT: Jump Tables: -; CHECK0-NEXT: jt#0: BB#1 BB#2 BB#3 BB#4 BB#7 BB#7 BB#7 BB#7 BB#7 BB#7 BB#7 BB#7 BB#7 BB#5 BB#6{{$}} -; CHECK4-NEXT: jt#0: BB#1 BB#2 BB#3 BB#4{{$}} -; CHECK8-NEXT: jt#0: BB#1 BB#2 BB#3 BB#4{{$}} -; CHECKM1-NEXT: jt#0: BB#1 BB#2 BB#3 BB#4{{$}} +; CHECK0-NEXT: jt#0: %bb.1 %bb.2 %bb.3 %bb.4 %bb.7 %bb.7 %bb.7 %bb.7 %bb.7 %bb.7 %bb.7 %bb.7 %bb.7 %bb.5 %bb.6{{$}} +; CHECK4-NEXT: jt#0: %bb.1 %bb.2 %bb.3 %bb.4{{$}} +; CHECK8-NEXT: jt#0: %bb.1 %bb.2 %bb.3 %bb.4{{$}} +; CHECKM1-NEXT: jt#0: %bb.1 %bb.2 %bb.3 %bb.4{{$}} ; CHEC-NEXT: Function Live Ins: bb1: tail call void @ext(i32 1) br label %return diff --git a/test/CodeGen/AArch64/neon-bitcast.ll b/test/CodeGen/AArch64/neon-bitcast.ll index 61099d48fdd..8f67ff83ae1 100644 --- a/test/CodeGen/AArch64/neon-bitcast.ll +++ b/test/CodeGen/AArch64/neon-bitcast.ll @@ -4,7 +4,7 @@ define <1 x i64> @test_v8i8_to_v1i64(<8 x i8> %in) nounwind { ; CHECK: test_v8i8_to_v1i64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <8 x i8> %in to <1 x i64> @@ -13,7 +13,7 @@ define <1 x i64> @test_v8i8_to_v1i64(<8 x i8> %in) nounwind { define <2 x i32> @test_v8i8_to_v2i32(<8 x i8> %in) nounwind { ; CHECK: test_v8i8_to_v2i32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <8 x i8> %in to <2 x i32> @@ -22,7 +22,7 @@ define <2 x i32> @test_v8i8_to_v2i32(<8 x i8> %in) nounwind { define <2 x float> @test_v8i8_to_v2f32(<8 x i8> %in) nounwind{ ; CHECK: test_v8i8_to_v2f32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <8 x i8> %in to <2 x float> @@ -31,7 +31,7 @@ define <2 x float> @test_v8i8_to_v2f32(<8 x i8> %in) nounwind{ define <4 x i16> @test_v8i8_to_v4i16(<8 x i8> %in) nounwind{ ; CHECK: test_v8i8_to_v4i16: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <8 x i8> %in to <4 x i16> @@ -40,7 +40,7 @@ define <4 x i16> @test_v8i8_to_v4i16(<8 x i8> %in) nounwind{ define <8 x i8> @test_v8i8_to_v8i8(<8 x i8> %in) nounwind{ ; CHECK: test_v8i8_to_v8i8: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <8 x i8> %in to <8 x i8> @@ -51,7 +51,7 @@ define <8 x i8> @test_v8i8_to_v8i8(<8 x i8> %in) nounwind{ define <1 x i64> @test_v4i16_to_v1i64(<4 x i16> %in) nounwind { ; CHECK: test_v4i16_to_v1i64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <4 x i16> %in to <1 x i64> @@ -60,7 +60,7 @@ define <1 x i64> @test_v4i16_to_v1i64(<4 x i16> %in) nounwind { define <2 x i32> @test_v4i16_to_v2i32(<4 x i16> %in) nounwind { ; CHECK: test_v4i16_to_v2i32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <4 x i16> %in to <2 x i32> @@ -69,7 +69,7 @@ define <2 x i32> @test_v4i16_to_v2i32(<4 x i16> %in) nounwind { define <2 x float> @test_v4i16_to_v2f32(<4 x i16> %in) nounwind{ ; CHECK: test_v4i16_to_v2f32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <4 x i16> %in to <2 x float> @@ -78,7 +78,7 @@ define <2 x float> @test_v4i16_to_v2f32(<4 x i16> %in) nounwind{ define <4 x i16> @test_v4i16_to_v4i16(<4 x i16> %in) nounwind{ ; CHECK: test_v4i16_to_v4i16: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <4 x i16> %in to <4 x i16> @@ -87,7 +87,7 @@ define <4 x i16> @test_v4i16_to_v4i16(<4 x i16> %in) nounwind{ define <8 x i8> @test_v4i16_to_v8i8(<4 x i16> %in) nounwind{ ; CHECK: test_v4i16_to_v8i8: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <4 x i16> %in to <8 x i8> @@ -98,7 +98,7 @@ define <8 x i8> @test_v4i16_to_v8i8(<4 x i16> %in) nounwind{ define <1 x i64> @test_v2i32_to_v1i64(<2 x i32> %in) nounwind { ; CHECK: test_v2i32_to_v1i64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x i32> %in to <1 x i64> @@ -107,7 +107,7 @@ define <1 x i64> @test_v2i32_to_v1i64(<2 x i32> %in) nounwind { define <2 x i32> @test_v2i32_to_v2i32(<2 x i32> %in) nounwind { ; CHECK: test_v2i32_to_v2i32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x i32> %in to <2 x i32> @@ -116,7 +116,7 @@ define <2 x i32> @test_v2i32_to_v2i32(<2 x i32> %in) nounwind { define <2 x float> @test_v2i32_to_v2f32(<2 x i32> %in) nounwind{ ; CHECK: test_v2i32_to_v2f32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x i32> %in to <2 x float> @@ -125,7 +125,7 @@ define <2 x float> @test_v2i32_to_v2f32(<2 x i32> %in) nounwind{ define <4 x i16> @test_v2i32_to_v4i16(<2 x i32> %in) nounwind{ ; CHECK: test_v2i32_to_v4i16: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x i32> %in to <4 x i16> @@ -134,7 +134,7 @@ define <4 x i16> @test_v2i32_to_v4i16(<2 x i32> %in) nounwind{ define <8 x i8> @test_v2i32_to_v8i8(<2 x i32> %in) nounwind{ ; CHECK: test_v2i32_to_v8i8: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x i32> %in to <8 x i8> @@ -145,7 +145,7 @@ define <8 x i8> @test_v2i32_to_v8i8(<2 x i32> %in) nounwind{ define <1 x i64> @test_v2f32_to_v1i64(<2 x float> %in) nounwind { ; CHECK: test_v2f32_to_v1i64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x float> %in to <1 x i64> @@ -154,7 +154,7 @@ define <1 x i64> @test_v2f32_to_v1i64(<2 x float> %in) nounwind { define <2 x i32> @test_v2f32_to_v2i32(<2 x float> %in) nounwind { ; CHECK: test_v2f32_to_v2i32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x float> %in to <2 x i32> @@ -163,7 +163,7 @@ define <2 x i32> @test_v2f32_to_v2i32(<2 x float> %in) nounwind { define <2 x float> @test_v2f32_to_v2f32(<2 x float> %in) nounwind{ ; CHECK: test_v2f32_to_v2f32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x float> %in to <2 x float> @@ -172,7 +172,7 @@ define <2 x float> @test_v2f32_to_v2f32(<2 x float> %in) nounwind{ define <4 x i16> @test_v2f32_to_v4i16(<2 x float> %in) nounwind{ ; CHECK: test_v2f32_to_v4i16: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x float> %in to <4 x i16> @@ -181,7 +181,7 @@ define <4 x i16> @test_v2f32_to_v4i16(<2 x float> %in) nounwind{ define <8 x i8> @test_v2f32_to_v8i8(<2 x float> %in) nounwind{ ; CHECK: test_v2f32_to_v8i8: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x float> %in to <8 x i8> @@ -192,7 +192,7 @@ define <8 x i8> @test_v2f32_to_v8i8(<2 x float> %in) nounwind{ define <1 x i64> @test_v1i64_to_v1i64(<1 x i64> %in) nounwind { ; CHECK: test_v1i64_to_v1i64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <1 x i64> %in to <1 x i64> @@ -201,7 +201,7 @@ define <1 x i64> @test_v1i64_to_v1i64(<1 x i64> %in) nounwind { define <2 x i32> @test_v1i64_to_v2i32(<1 x i64> %in) nounwind { ; CHECK: test_v1i64_to_v2i32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <1 x i64> %in to <2 x i32> @@ -210,7 +210,7 @@ define <2 x i32> @test_v1i64_to_v2i32(<1 x i64> %in) nounwind { define <2 x float> @test_v1i64_to_v2f32(<1 x i64> %in) nounwind{ ; CHECK: test_v1i64_to_v2f32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <1 x i64> %in to <2 x float> @@ -219,7 +219,7 @@ define <2 x float> @test_v1i64_to_v2f32(<1 x i64> %in) nounwind{ define <4 x i16> @test_v1i64_to_v4i16(<1 x i64> %in) nounwind{ ; CHECK: test_v1i64_to_v4i16: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <1 x i64> %in to <4 x i16> @@ -228,7 +228,7 @@ define <4 x i16> @test_v1i64_to_v4i16(<1 x i64> %in) nounwind{ define <8 x i8> @test_v1i64_to_v8i8(<1 x i64> %in) nounwind{ ; CHECK: test_v1i64_to_v8i8: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <1 x i64> %in to <8 x i8> @@ -240,7 +240,7 @@ define <8 x i8> @test_v1i64_to_v8i8(<1 x i64> %in) nounwind{ define <2 x double> @test_v16i8_to_v2f64(<16 x i8> %in) nounwind { ; CHECK: test_v16i8_to_v2f64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <16 x i8> %in to <2 x double> @@ -249,7 +249,7 @@ define <2 x double> @test_v16i8_to_v2f64(<16 x i8> %in) nounwind { define <2 x i64> @test_v16i8_to_v2i64(<16 x i8> %in) nounwind { ; CHECK: test_v16i8_to_v2i64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <16 x i8> %in to <2 x i64> @@ -258,7 +258,7 @@ define <2 x i64> @test_v16i8_to_v2i64(<16 x i8> %in) nounwind { define <4 x i32> @test_v16i8_to_v4i32(<16 x i8> %in) nounwind { ; CHECK: test_v16i8_to_v4i32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <16 x i8> %in to <4 x i32> @@ -267,7 +267,7 @@ define <4 x i32> @test_v16i8_to_v4i32(<16 x i8> %in) nounwind { define <4 x float> @test_v16i8_to_v2f32(<16 x i8> %in) nounwind{ ; CHECK: test_v16i8_to_v2f32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <16 x i8> %in to <4 x float> @@ -276,7 +276,7 @@ define <4 x float> @test_v16i8_to_v2f32(<16 x i8> %in) nounwind{ define <8 x i16> @test_v16i8_to_v8i16(<16 x i8> %in) nounwind{ ; CHECK: test_v16i8_to_v8i16: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <16 x i8> %in to <8 x i16> @@ -285,7 +285,7 @@ define <8 x i16> @test_v16i8_to_v8i16(<16 x i8> %in) nounwind{ define <16 x i8> @test_v16i8_to_v16i8(<16 x i8> %in) nounwind{ ; CHECK: test_v16i8_to_v16i8: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <16 x i8> %in to <16 x i8> @@ -296,7 +296,7 @@ define <16 x i8> @test_v16i8_to_v16i8(<16 x i8> %in) nounwind{ define <2 x double> @test_v8i16_to_v2f64(<8 x i16> %in) nounwind { ; CHECK: test_v8i16_to_v2f64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <8 x i16> %in to <2 x double> @@ -305,7 +305,7 @@ define <2 x double> @test_v8i16_to_v2f64(<8 x i16> %in) nounwind { define <2 x i64> @test_v8i16_to_v2i64(<8 x i16> %in) nounwind { ; CHECK: test_v8i16_to_v2i64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <8 x i16> %in to <2 x i64> @@ -314,7 +314,7 @@ define <2 x i64> @test_v8i16_to_v2i64(<8 x i16> %in) nounwind { define <4 x i32> @test_v8i16_to_v4i32(<8 x i16> %in) nounwind { ; CHECK: test_v8i16_to_v4i32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <8 x i16> %in to <4 x i32> @@ -323,7 +323,7 @@ define <4 x i32> @test_v8i16_to_v4i32(<8 x i16> %in) nounwind { define <4 x float> @test_v8i16_to_v2f32(<8 x i16> %in) nounwind{ ; CHECK: test_v8i16_to_v2f32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <8 x i16> %in to <4 x float> @@ -332,7 +332,7 @@ define <4 x float> @test_v8i16_to_v2f32(<8 x i16> %in) nounwind{ define <8 x i16> @test_v8i16_to_v8i16(<8 x i16> %in) nounwind{ ; CHECK: test_v8i16_to_v8i16: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <8 x i16> %in to <8 x i16> @@ -341,7 +341,7 @@ define <8 x i16> @test_v8i16_to_v8i16(<8 x i16> %in) nounwind{ define <16 x i8> @test_v8i16_to_v16i8(<8 x i16> %in) nounwind{ ; CHECK: test_v8i16_to_v16i8: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <8 x i16> %in to <16 x i8> @@ -352,7 +352,7 @@ define <16 x i8> @test_v8i16_to_v16i8(<8 x i16> %in) nounwind{ define <2 x double> @test_v4i32_to_v2f64(<4 x i32> %in) nounwind { ; CHECK: test_v4i32_to_v2f64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <4 x i32> %in to <2 x double> @@ -361,7 +361,7 @@ define <2 x double> @test_v4i32_to_v2f64(<4 x i32> %in) nounwind { define <2 x i64> @test_v4i32_to_v2i64(<4 x i32> %in) nounwind { ; CHECK: test_v4i32_to_v2i64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <4 x i32> %in to <2 x i64> @@ -370,7 +370,7 @@ define <2 x i64> @test_v4i32_to_v2i64(<4 x i32> %in) nounwind { define <4 x i32> @test_v4i32_to_v4i32(<4 x i32> %in) nounwind { ; CHECK: test_v4i32_to_v4i32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <4 x i32> %in to <4 x i32> @@ -379,7 +379,7 @@ define <4 x i32> @test_v4i32_to_v4i32(<4 x i32> %in) nounwind { define <4 x float> @test_v4i32_to_v2f32(<4 x i32> %in) nounwind{ ; CHECK: test_v4i32_to_v2f32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <4 x i32> %in to <4 x float> @@ -388,7 +388,7 @@ define <4 x float> @test_v4i32_to_v2f32(<4 x i32> %in) nounwind{ define <8 x i16> @test_v4i32_to_v8i16(<4 x i32> %in) nounwind{ ; CHECK: test_v4i32_to_v8i16: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <4 x i32> %in to <8 x i16> @@ -397,7 +397,7 @@ define <8 x i16> @test_v4i32_to_v8i16(<4 x i32> %in) nounwind{ define <16 x i8> @test_v4i32_to_v16i8(<4 x i32> %in) nounwind{ ; CHECK: test_v4i32_to_v16i8: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <4 x i32> %in to <16 x i8> @@ -408,7 +408,7 @@ define <16 x i8> @test_v4i32_to_v16i8(<4 x i32> %in) nounwind{ define <2 x double> @test_v4f32_to_v2f64(<4 x float> %in) nounwind { ; CHECK: test_v4f32_to_v2f64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <4 x float> %in to <2 x double> @@ -417,7 +417,7 @@ define <2 x double> @test_v4f32_to_v2f64(<4 x float> %in) nounwind { define <2 x i64> @test_v4f32_to_v2i64(<4 x float> %in) nounwind { ; CHECK: test_v4f32_to_v2i64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <4 x float> %in to <2 x i64> @@ -426,7 +426,7 @@ define <2 x i64> @test_v4f32_to_v2i64(<4 x float> %in) nounwind { define <4 x i32> @test_v4f32_to_v4i32(<4 x float> %in) nounwind { ; CHECK: test_v4f32_to_v4i32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <4 x float> %in to <4 x i32> @@ -435,7 +435,7 @@ define <4 x i32> @test_v4f32_to_v4i32(<4 x float> %in) nounwind { define <4 x float> @test_v4f32_to_v4f32(<4 x float> %in) nounwind{ ; CHECK: test_v4f32_to_v4f32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <4 x float> %in to <4 x float> @@ -444,7 +444,7 @@ define <4 x float> @test_v4f32_to_v4f32(<4 x float> %in) nounwind{ define <8 x i16> @test_v4f32_to_v8i16(<4 x float> %in) nounwind{ ; CHECK: test_v4f32_to_v8i16: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <4 x float> %in to <8 x i16> @@ -453,7 +453,7 @@ define <8 x i16> @test_v4f32_to_v8i16(<4 x float> %in) nounwind{ define <16 x i8> @test_v4f32_to_v16i8(<4 x float> %in) nounwind{ ; CHECK: test_v4f32_to_v16i8: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <4 x float> %in to <16 x i8> @@ -464,7 +464,7 @@ define <16 x i8> @test_v4f32_to_v16i8(<4 x float> %in) nounwind{ define <2 x double> @test_v2i64_to_v2f64(<2 x i64> %in) nounwind { ; CHECK: test_v2i64_to_v2f64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x i64> %in to <2 x double> @@ -473,7 +473,7 @@ define <2 x double> @test_v2i64_to_v2f64(<2 x i64> %in) nounwind { define <2 x i64> @test_v2i64_to_v2i64(<2 x i64> %in) nounwind { ; CHECK: test_v2i64_to_v2i64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x i64> %in to <2 x i64> @@ -482,7 +482,7 @@ define <2 x i64> @test_v2i64_to_v2i64(<2 x i64> %in) nounwind { define <4 x i32> @test_v2i64_to_v4i32(<2 x i64> %in) nounwind { ; CHECK: test_v2i64_to_v4i32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x i64> %in to <4 x i32> @@ -491,7 +491,7 @@ define <4 x i32> @test_v2i64_to_v4i32(<2 x i64> %in) nounwind { define <4 x float> @test_v2i64_to_v4f32(<2 x i64> %in) nounwind{ ; CHECK: test_v2i64_to_v4f32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x i64> %in to <4 x float> @@ -500,7 +500,7 @@ define <4 x float> @test_v2i64_to_v4f32(<2 x i64> %in) nounwind{ define <8 x i16> @test_v2i64_to_v8i16(<2 x i64> %in) nounwind{ ; CHECK: test_v2i64_to_v8i16: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x i64> %in to <8 x i16> @@ -509,7 +509,7 @@ define <8 x i16> @test_v2i64_to_v8i16(<2 x i64> %in) nounwind{ define <16 x i8> @test_v2i64_to_v16i8(<2 x i64> %in) nounwind{ ; CHECK: test_v2i64_to_v16i8: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x i64> %in to <16 x i8> @@ -520,7 +520,7 @@ define <16 x i8> @test_v2i64_to_v16i8(<2 x i64> %in) nounwind{ define <2 x double> @test_v2f64_to_v2f64(<2 x double> %in) nounwind { ; CHECK: test_v2f64_to_v2f64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x double> %in to <2 x double> @@ -529,7 +529,7 @@ define <2 x double> @test_v2f64_to_v2f64(<2 x double> %in) nounwind { define <2 x i64> @test_v2f64_to_v2i64(<2 x double> %in) nounwind { ; CHECK: test_v2f64_to_v2i64: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x double> %in to <2 x i64> @@ -538,7 +538,7 @@ define <2 x i64> @test_v2f64_to_v2i64(<2 x double> %in) nounwind { define <4 x i32> @test_v2f64_to_v4i32(<2 x double> %in) nounwind { ; CHECK: test_v2f64_to_v4i32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x double> %in to <4 x i32> @@ -547,7 +547,7 @@ define <4 x i32> @test_v2f64_to_v4i32(<2 x double> %in) nounwind { define <4 x float> @test_v2f64_to_v4f32(<2 x double> %in) nounwind{ ; CHECK: test_v2f64_to_v4f32: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x double> %in to <4 x float> @@ -556,7 +556,7 @@ define <4 x float> @test_v2f64_to_v4f32(<2 x double> %in) nounwind{ define <8 x i16> @test_v2f64_to_v8i16(<2 x double> %in) nounwind{ ; CHECK: test_v2f64_to_v8i16: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x double> %in to <8 x i16> @@ -565,7 +565,7 @@ define <8 x i16> @test_v2f64_to_v8i16(<2 x double> %in) nounwind{ define <16 x i8> @test_v2f64_to_v16i8(<2 x double> %in) nounwind{ ; CHECK: test_v2f64_to_v16i8: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ret %val = bitcast <2 x double> %in to <16 x i8> diff --git a/test/CodeGen/AArch64/nest-register.ll b/test/CodeGen/AArch64/nest-register.ll index cc42913e10a..b8651714be3 100644 --- a/test/CodeGen/AArch64/nest-register.ll +++ b/test/CodeGen/AArch64/nest-register.ll @@ -5,7 +5,7 @@ define i8* @nest_receiver(i8* nest %arg) nounwind { ; CHECK-LABEL: nest_receiver: -; CHECK-NEXT: // BB#0: +; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: mov x0, x18 ; CHECK-NEXT: ret diff --git a/test/CodeGen/AArch64/recp-fastmath.ll b/test/CodeGen/AArch64/recp-fastmath.ll index 4776931cf06..9f00621eff6 100644 --- a/test/CodeGen/AArch64/recp-fastmath.ll +++ b/test/CodeGen/AArch64/recp-fastmath.ll @@ -5,7 +5,7 @@ define float @frecp0(float %x) #0 { ret float %div ; CHECK-LABEL: frecp0: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: fmov ; CHECK-NEXT: fdiv } @@ -15,7 +15,7 @@ define float @frecp1(float %x) #1 { ret float %div ; CHECK-LABEL: frecp1: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frecpe [[R:s[0-7]]] ; CHECK-NEXT: frecps {{s[0-7](, s[0-7])?}}, [[R]] ; CHECK: frecps {{s[0-7]}}, {{s[0-7]}}, {{s[0-7]}} @@ -27,7 +27,7 @@ define <2 x float> @f2recp0(<2 x float> %x) #0 { ret <2 x float> %div ; CHECK-LABEL: f2recp0: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: fmov ; CHECK-NEXT: fdiv } @@ -37,7 +37,7 @@ define <2 x float> @f2recp1(<2 x float> %x) #1 { ret <2 x float> %div ; CHECK-LABEL: f2recp1: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frecpe [[R:v[0-7]\.2s]] ; CHECK-NEXT: frecps {{v[0-7]\.2s(, v[0-7].2s)?}}, [[R]] ; CHECK: frecps {{v[0-7]\.2s}}, {{v[0-7]\.2s}}, {{v[0-7]\.2s}} @@ -49,7 +49,7 @@ define <4 x float> @f4recp0(<4 x float> %x) #0 { ret <4 x float> %div ; CHECK-LABEL: f4recp0: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: fmov ; CHECK-NEXT: fdiv } @@ -59,7 +59,7 @@ define <4 x float> @f4recp1(<4 x float> %x) #1 { ret <4 x float> %div ; CHECK-LABEL: f4recp1: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frecpe [[R:v[0-7]\.4s]] ; CHECK-NEXT: frecps {{v[0-7]\.4s(, v[0-7].4s)?}}, [[R]] ; CHECK: frecps {{v[0-7]\.4s}}, {{v[0-7]\.4s}}, {{v[0-7]\.4s}} @@ -71,7 +71,7 @@ define <8 x float> @f8recp0(<8 x float> %x) #0 { ret <8 x float> %div ; CHECK-LABEL: f8recp0: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: fmov ; CHECK-NEXT: fdiv ; CHECK-NEXT: fdiv @@ -82,7 +82,7 @@ define <8 x float> @f8recp1(<8 x float> %x) #1 { ret <8 x float> %div ; CHECK-LABEL: f8recp1: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frecpe [[R:v[0-7]\.4s]] ; CHECK: frecps {{v[0-7]\.4s(, v[0-7].4s)?}}, [[R]] ; CHECK: frecps {{v[0-7]\.4s(, v[0-7].4s)?}}, {{v[0-7]\.4s}} @@ -96,7 +96,7 @@ define double @drecp0(double %x) #0 { ret double %div ; CHECK-LABEL: drecp0: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: fmov ; CHECK-NEXT: fdiv } @@ -106,7 +106,7 @@ define double @drecp1(double %x) #1 { ret double %div ; CHECK-LABEL: drecp1: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frecpe [[R:d[0-7]]] ; CHECK-NEXT: frecps {{d[0-7](, d[0-7])?}}, [[R]] ; CHECK: frecps {{d[0-7]}}, {{d[0-7]}}, {{d[0-7]}} @@ -119,7 +119,7 @@ define <2 x double> @d2recp0(<2 x double> %x) #0 { ret <2 x double> %div ; CHECK-LABEL: d2recp0: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: fmov ; CHECK-NEXT: fdiv } @@ -129,7 +129,7 @@ define <2 x double> @d2recp1(<2 x double> %x) #1 { ret <2 x double> %div ; CHECK-LABEL: d2recp1: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frecpe [[R:v[0-7]\.2d]] ; CHECK-NEXT: frecps {{v[0-7]\.2d(, v[0-7].2d)?}}, [[R]] ; CHECK: frecps {{v[0-7]\.2d}}, {{v[0-7]\.2d}}, {{v[0-7]\.2d}} @@ -142,7 +142,7 @@ define <4 x double> @d4recp0(<4 x double> %x) #0 { ret <4 x double> %div ; CHECK-LABEL: d4recp0: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: fmov ; CHECK-NEXT: fdiv ; CHECK-NEXT: fdiv @@ -153,7 +153,7 @@ define <4 x double> @d4recp1(<4 x double> %x) #1 { ret <4 x double> %div ; CHECK-LABEL: d4recp1: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frecpe [[R:v[0-7]\.2d]] ; CHECK: frecps {{v[0-7]\.2d(, v[0-7].2d)?}}, [[R]] ; CHECK: frecps {{v[0-7]\.2d}}, {{v[0-7]\.2d}}, {{v[0-7]\.2d}} diff --git a/test/CodeGen/AArch64/selectcc-to-shiftand.ll b/test/CodeGen/AArch64/selectcc-to-shiftand.ll index 0d89cdedfa8..99190633547 100644 --- a/test/CodeGen/AArch64/selectcc-to-shiftand.ll +++ b/test/CodeGen/AArch64/selectcc-to-shiftand.ll @@ -4,7 +4,7 @@ define i32 @neg_sel_constants(i32 %a) { ; CHECK-LABEL: neg_sel_constants: -; CHECK: // BB#0: +; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #5 ; CHECK-NEXT: and w0, w8, w0, asr #31 ; CHECK-NEXT: ret @@ -18,7 +18,7 @@ define i32 @neg_sel_constants(i32 %a) { define i32 @neg_sel_special_constant(i32 %a) { ; CHECK-LABEL: neg_sel_special_constant: -; CHECK: // BB#0: +; CHECK: // %bb.0: ; CHECK-NEXT: lsr w8, w0, #22 ; CHECK-NEXT: and w0, w8, #0x200 ; CHECK-NEXT: ret @@ -32,7 +32,7 @@ define i32 @neg_sel_special_constant(i32 %a) { define i32 @neg_sel_variable_and_zero(i32 %a, i32 %b) { ; CHECK-LABEL: neg_sel_variable_and_zero: -; CHECK: // BB#0: +; CHECK: // %bb.0: ; CHECK-NEXT: and w0, w1, w0, asr #31 ; CHECK-NEXT: ret ; @@ -45,7 +45,7 @@ define i32 @neg_sel_variable_and_zero(i32 %a, i32 %b) { define i32 @not_pos_sel_same_variable(i32 %a) { ; CHECK-LABEL: not_pos_sel_same_variable: -; CHECK: // BB#0: +; CHECK: // %bb.0: ; CHECK-NEXT: and w0, w0, w0, asr #31 ; CHECK-NEXT: ret ; @@ -60,7 +60,7 @@ define i32 @not_pos_sel_same_variable(i32 %a) { define i32 @pos_sel_constants(i32 %a) { ; CHECK-LABEL: pos_sel_constants: -; CHECK: // BB#0: +; CHECK: // %bb.0: ; CHECK-NEXT: mov w8, #5 ; CHECK-NEXT: bic w0, w8, w0, asr #31 ; CHECK-NEXT: ret @@ -74,7 +74,7 @@ define i32 @pos_sel_constants(i32 %a) { define i32 @pos_sel_special_constant(i32 %a) { ; CHECK-LABEL: pos_sel_special_constant: -; CHECK: // BB#0: +; CHECK: // %bb.0: ; CHECK-NEXT: orr w8, wzr, #0x200 ; CHECK-NEXT: bic w0, w8, w0, lsr #22 ; CHECK-NEXT: ret @@ -88,7 +88,7 @@ define i32 @pos_sel_special_constant(i32 %a) { define i32 @pos_sel_variable_and_zero(i32 %a, i32 %b) { ; CHECK-LABEL: pos_sel_variable_and_zero: -; CHECK: // BB#0: +; CHECK: // %bb.0: ; CHECK-NEXT: bic w0, w1, w0, asr #31 ; CHECK-NEXT: ret ; @@ -101,7 +101,7 @@ define i32 @pos_sel_variable_and_zero(i32 %a, i32 %b) { define i32 @not_neg_sel_same_variable(i32 %a) { ; CHECK-LABEL: not_neg_sel_same_variable: -; CHECK: // BB#0: +; CHECK: // %bb.0: ; CHECK-NEXT: bic w0, w0, w0, asr #31 ; CHECK-NEXT: ret ; @@ -115,7 +115,7 @@ define i32 @not_neg_sel_same_variable(i32 %a) { ; ret = (x-y) > 0 ? x-y : 0 define i32 @PR31175(i32 %x, i32 %y) { ; CHECK-LABEL: PR31175: -; CHECK: // BB#0: +; CHECK: // %bb.0: ; CHECK-NEXT: sub w8, w0, w1 ; CHECK-NEXT: bic w0, w8, w8, asr #31 ; CHECK-NEXT: ret diff --git a/test/CodeGen/AArch64/sibling-call.ll b/test/CodeGen/AArch64/sibling-call.ll index 9a44b43d14e..be59f27fa85 100644 --- a/test/CodeGen/AArch64/sibling-call.ll +++ b/test/CodeGen/AArch64/sibling-call.ll @@ -6,7 +6,7 @@ declare void @callee_stack16([8 x i32], i64, i64) define void @caller_to0_from0() nounwind { ; CHECK-LABEL: caller_to0_from0: -; CHECK-NEXT: // BB +; CHECK-NEXT: // %bb. tail call void @callee_stack0() ret void ; CHECK-NEXT: b callee_stack0 @@ -14,7 +14,7 @@ define void @caller_to0_from0() nounwind { define void @caller_to0_from8([8 x i32], i64) nounwind{ ; CHECK-LABEL: caller_to0_from8: -; CHECK-NEXT: // BB +; CHECK-NEXT: // %bb. tail call void @callee_stack0() ret void diff --git a/test/CodeGen/AArch64/sqrt-fastmath.ll b/test/CodeGen/AArch64/sqrt-fastmath.ll index 4dd0516faf0..ade9e3d8df3 100644 --- a/test/CodeGen/AArch64/sqrt-fastmath.ll +++ b/test/CodeGen/AArch64/sqrt-fastmath.ll @@ -14,11 +14,11 @@ define float @fsqrt(float %a) #0 { ret float %1 ; FAULT-LABEL: fsqrt: -; FAULT-NEXT: BB#0 +; FAULT-NEXT: %bb.0 ; FAULT-NEXT: fsqrt ; CHECK-LABEL: fsqrt: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frsqrte [[RA:s[0-7]]] ; CHECK-NEXT: fmul [[RB:s[0-7]]], [[RA]], [[RA]] ; CHECK-NEXT: frsqrts {{s[0-7](, s[0-7])?}}, [[RB]] @@ -32,11 +32,11 @@ define <2 x float> @f2sqrt(<2 x float> %a) #0 { ret <2 x float> %1 ; FAULT-LABEL: f2sqrt: -; FAULT-NEXT: BB#0 +; FAULT-NEXT: %bb.0 ; FAULT-NEXT: fsqrt ; CHECK-LABEL: f2sqrt: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frsqrte [[RA:v[0-7]\.2s]] ; CHECK-NEXT: fmul [[RB:v[0-7]\.2s]], [[RA]], [[RA]] ; CHECK-NEXT: frsqrts {{v[0-7]\.2s(, v[0-7]\.2s)?}}, [[RB]] @@ -50,11 +50,11 @@ define <4 x float> @f4sqrt(<4 x float> %a) #0 { ret <4 x float> %1 ; FAULT-LABEL: f4sqrt: -; FAULT-NEXT: BB#0 +; FAULT-NEXT: %bb.0 ; FAULT-NEXT: fsqrt ; CHECK-LABEL: f4sqrt: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frsqrte [[RA:v[0-7]\.4s]] ; CHECK-NEXT: fmul [[RB:v[0-7]\.4s]], [[RA]], [[RA]] ; CHECK-NEXT: frsqrts {{v[0-7]\.4s(, v[0-7]\.4s)?}}, [[RB]] @@ -68,12 +68,12 @@ define <8 x float> @f8sqrt(<8 x float> %a) #0 { ret <8 x float> %1 ; FAULT-LABEL: f8sqrt: -; FAULT-NEXT: BB#0 +; FAULT-NEXT: %bb.0 ; FAULT-NEXT: fsqrt ; FAULT-NEXT: fsqrt ; CHECK-LABEL: f8sqrt: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frsqrte [[RA:v[0-7]\.4s]] ; CHECK-NEXT: fmul [[RB:v[0-7]\.4s]], [[RA]], [[RA]] ; CHECK-NEXT: frsqrts {{v[0-7]\.4s(, v[0-7]\.4s)?}}, [[RB]] @@ -92,11 +92,11 @@ define double @dsqrt(double %a) #0 { ret double %1 ; FAULT-LABEL: dsqrt: -; FAULT-NEXT: BB#0 +; FAULT-NEXT: %bb.0 ; FAULT-NEXT: fsqrt ; CHECK-LABEL: dsqrt: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frsqrte [[RA:d[0-7]]] ; CHECK-NEXT: fmul [[RB:d[0-7]]], [[RA]], [[RA]] ; CHECK-NEXT: frsqrts {{d[0-7](, d[0-7])?}}, [[RB]] @@ -111,11 +111,11 @@ define <2 x double> @d2sqrt(<2 x double> %a) #0 { ret <2 x double> %1 ; FAULT-LABEL: d2sqrt: -; FAULT-NEXT: BB#0 +; FAULT-NEXT: %bb.0 ; FAULT-NEXT: fsqrt ; CHECK-LABEL: d2sqrt: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frsqrte [[RA:v[0-7]\.2d]] ; CHECK-NEXT: fmul [[RB:v[0-7]\.2d]], [[RA]], [[RA]] ; CHECK-NEXT: frsqrts {{v[0-7]\.2d(, v[0-7]\.2d)?}}, [[RB]] @@ -130,12 +130,12 @@ define <4 x double> @d4sqrt(<4 x double> %a) #0 { ret <4 x double> %1 ; FAULT-LABEL: d4sqrt: -; FAULT-NEXT: BB#0 +; FAULT-NEXT: %bb.0 ; FAULT-NEXT: fsqrt ; FAULT-NEXT: fsqrt ; CHECK-LABEL: d4sqrt: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frsqrte [[RA:v[0-7]\.2d]] ; CHECK-NEXT: fmul [[RB:v[0-7]\.2d]], [[RA]], [[RA]] ; CHECK-NEXT: frsqrts {{v[0-7]\.2d(, v[0-7]\.2d)?}}, [[RB]] @@ -158,11 +158,11 @@ define float @frsqrt(float %a) #0 { ret float %2 ; FAULT-LABEL: frsqrt: -; FAULT-NEXT: BB#0 +; FAULT-NEXT: %bb.0 ; FAULT-NEXT: fsqrt ; CHECK-LABEL: frsqrt: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frsqrte [[RA:s[0-7]]] ; CHECK-NEXT: fmul [[RB:s[0-7]]], [[RA]], [[RA]] ; CHECK-NEXT: frsqrts {{s[0-7](, s[0-7])?}}, [[RB]] @@ -177,11 +177,11 @@ define <2 x float> @f2rsqrt(<2 x float> %a) #0 { ret <2 x float> %2 ; FAULT-LABEL: f2rsqrt: -; FAULT-NEXT: BB#0 +; FAULT-NEXT: %bb.0 ; FAULT-NEXT: fsqrt ; CHECK-LABEL: f2rsqrt: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frsqrte [[RA:v[0-7]\.2s]] ; CHECK-NEXT: fmul [[RB:v[0-7]\.2s]], [[RA]], [[RA]] ; CHECK-NEXT: frsqrts {{v[0-7]\.2s(, v[0-7]\.2s)?}}, [[RB]] @@ -196,11 +196,11 @@ define <4 x float> @f4rsqrt(<4 x float> %a) #0 { ret <4 x float> %2 ; FAULT-LABEL: f4rsqrt: -; FAULT-NEXT: BB#0 +; FAULT-NEXT: %bb.0 ; FAULT-NEXT: fsqrt ; CHECK-LABEL: f4rsqrt: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frsqrte [[RA:v[0-7]\.4s]] ; CHECK-NEXT: fmul [[RB:v[0-7]\.4s]], [[RA]], [[RA]] ; CHECK-NEXT: frsqrts {{v[0-7]\.4s(, v[0-7]\.4s)?}}, [[RB]] @@ -215,12 +215,12 @@ define <8 x float> @f8rsqrt(<8 x float> %a) #0 { ret <8 x float> %2 ; FAULT-LABEL: f8rsqrt: -; FAULT-NEXT: BB#0 +; FAULT-NEXT: %bb.0 ; FAULT-NEXT: fsqrt ; FAULT-NEXT: fsqrt ; CHECK-LABEL: f8rsqrt: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frsqrte [[RA:v[0-7]\.4s]] ; CHECK: fmul [[RB:v[0-7]\.4s]], [[RA]], [[RA]] ; CHECK: frsqrts {{v[0-7]\.4s(, v[0-7]\.4s)?}}, [[RB]] @@ -237,11 +237,11 @@ define double @drsqrt(double %a) #0 { ret double %2 ; FAULT-LABEL: drsqrt: -; FAULT-NEXT: BB#0 +; FAULT-NEXT: %bb.0 ; FAULT-NEXT: fsqrt ; CHECK-LABEL: drsqrt: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frsqrte [[RA:d[0-7]]] ; CHECK-NEXT: fmul [[RB:d[0-7]]], [[RA]], [[RA]] ; CHECK-NEXT: frsqrts {{d[0-7](, d[0-7])?}}, [[RB]] @@ -257,11 +257,11 @@ define <2 x double> @d2rsqrt(<2 x double> %a) #0 { ret <2 x double> %2 ; FAULT-LABEL: d2rsqrt: -; FAULT-NEXT: BB#0 +; FAULT-NEXT: %bb.0 ; FAULT-NEXT: fsqrt ; CHECK-LABEL: d2rsqrt: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frsqrte [[RA:v[0-7]\.2d]] ; CHECK-NEXT: fmul [[RB:v[0-7]\.2d]], [[RA]], [[RA]] ; CHECK-NEXT: frsqrts {{v[0-7]\.2d(, v[0-7]\.2d)?}}, [[RB]] @@ -277,12 +277,12 @@ define <4 x double> @d4rsqrt(<4 x double> %a) #0 { ret <4 x double> %2 ; FAULT-LABEL: d4rsqrt: -; FAULT-NEXT: BB#0 +; FAULT-NEXT: %bb.0 ; FAULT-NEXT: fsqrt ; FAULT-NEXT: fsqrt ; CHECK-LABEL: d4rsqrt: -; CHECK-NEXT: BB#0 +; CHECK-NEXT: %bb.0 ; CHECK-NEXT: frsqrte [[RA:v[0-7]\.2d]] ; CHECK: fmul [[RB:v[0-7]\.2d]], [[RA]], [[RA]] ; CHECK: frsqrts {{v[0-7]\.2d(, v[0-7]\.2d)?}}, [[RB]] diff --git a/test/CodeGen/AArch64/tail-call.ll b/test/CodeGen/AArch64/tail-call.ll index fa5d8b943b6..ab63413bd3f 100644 --- a/test/CodeGen/AArch64/tail-call.ll +++ b/test/CodeGen/AArch64/tail-call.ll @@ -7,7 +7,7 @@ declare extern_weak fastcc void @callee_weak() define fastcc void @caller_to0_from0() nounwind { ; CHECK-LABEL: caller_to0_from0: -; CHECK-NEXT: // BB +; CHECK-NEXT: // %bb. tail call fastcc void @callee_stack0() ret void diff --git a/test/CodeGen/AMDGPU/branch-relaxation.ll b/test/CodeGen/AMDGPU/branch-relaxation.ll index 9edf439b586..023baf1407e 100644 --- a/test/CodeGen/AMDGPU/branch-relaxation.ll +++ b/test/CodeGen/AMDGPU/branch-relaxation.ll @@ -24,7 +24,7 @@ declare i32 @llvm.amdgcn.workitem.id.x() #1 ; GCN-NEXT: s_cbranch_scc1 [[BB3:BB[0-9]+_[0-9]+]] -; GCN-NEXT: ; BB#1: ; %bb2 +; GCN-NEXT: ; %bb.1: ; %bb2 ; GCN-NEXT: ;;#ASMSTART ; GCN-NEXT: v_nop_e64 ; GCN-NEXT: v_nop_e64 @@ -275,7 +275,7 @@ bb4: } ; GCN-LABEL: {{^}}uniform_unconditional_min_long_backward_branch: -; GCN-NEXT: ; BB#0: ; %entry +; GCN-NEXT: ; %bb.0: ; %entry ; GCN-NEXT: [[LOOP:BB[0-9]_[0-9]+]]: ; %loop ; GCN-NEXT: ; =>This Inner Loop Header: Depth=1 @@ -311,7 +311,7 @@ loop: ; branch from %bb0 to %bb2 ; GCN-LABEL: {{^}}expand_requires_expand: -; GCN-NEXT: ; BB#0: ; %bb0 +; GCN-NEXT: ; %bb.0: ; %bb0 ; GCN: s_load_dword ; GCN: s_cmp_lt_i32 s{{[0-9]+}}, 0{{$}} ; GCN-NEXT: s_cbranch_scc0 [[BB1:BB[0-9]+_[0-9]+]] @@ -398,7 +398,7 @@ bb3: ; GCN: s_cmp_lg_u32 ; GCN: s_cbranch_scc1 [[ENDIF]] -; GCN-NEXT: ; BB#2: ; %if_uniform +; GCN-NEXT: ; %bb.2: ; %if_uniform ; GCN: buffer_store_dword ; GCN-NEXT: [[ENDIF]]: ; %endif diff --git a/test/CodeGen/AMDGPU/callee-frame-setup.ll b/test/CodeGen/AMDGPU/callee-frame-setup.ll index 9e01267150e..88d165144f9 100644 --- a/test/CodeGen/AMDGPU/callee-frame-setup.ll +++ b/test/CodeGen/AMDGPU/callee-frame-setup.ll @@ -2,7 +2,7 @@ ; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=GFX9 %s ; GCN-LABEL: {{^}}callee_no_stack: -; GCN: ; BB#0: +; GCN: ; %bb.0: ; GCN-NEXT: s_waitcnt ; GCN-NEXT: s_setpc_b64 define void @callee_no_stack() #0 { @@ -10,7 +10,7 @@ define void @callee_no_stack() #0 { } ; GCN-LABEL: {{^}}callee_no_stack_no_fp_elim: -; GCN: ; BB#0: +; GCN: ; %bb.0: ; GCN-NEXT: s_waitcnt ; GCN-NEXT: s_setpc_b64 define void @callee_no_stack_no_fp_elim() #1 { @@ -20,7 +20,7 @@ define void @callee_no_stack_no_fp_elim() #1 { ; Requires frame pointer for access to local regular object. ; GCN-LABEL: {{^}}callee_with_stack: -; GCN: ; BB#0: +; GCN: ; %bb.0: ; GCN-NEXT: s_waitcnt ; GCN-NEXT: s_mov_b32 s5, s32 ; GCN-NEXT: v_mov_b32_e32 v0, 0{{$}} @@ -34,7 +34,7 @@ define void @callee_with_stack() #0 { } ; GCN-LABEL: {{^}}callee_with_stack_and_call: -; GCN: ; BB#0: +; GCN: ; %bb.0: ; GCN-NEXT: s_waitcnt ; GCN: s_mov_b32 s5, s32 ; GCN: buffer_store_dword v32, off, s[0:3], s5 offset:8 diff --git a/test/CodeGen/AMDGPU/cf-loop-on-constant.ll b/test/CodeGen/AMDGPU/cf-loop-on-constant.ll index 697f26b83a4..1e0af2611b0 100644 --- a/test/CodeGen/AMDGPU/cf-loop-on-constant.ll +++ b/test/CodeGen/AMDGPU/cf-loop-on-constant.ll @@ -102,7 +102,7 @@ for.body: ; GCN: s_add_i32 s{{[0-9]+}}, s{{[0-9]+}}, 4 ; GCN: s_cbranch_vccnz [[LOOPBB]] -; GCN-NEXT: ; BB#2 +; GCN-NEXT: ; %bb.2 ; GCN-NEXT: s_endpgm define amdgpu_kernel void @loop_arg_0(float addrspace(3)* %ptr, i32 %n, i1 %cond) nounwind { entry: diff --git a/test/CodeGen/AMDGPU/control-flow-fastregalloc.ll b/test/CodeGen/AMDGPU/control-flow-fastregalloc.ll index 6f9c043f914..071bcbcf81b 100644 --- a/test/CodeGen/AMDGPU/control-flow-fastregalloc.ll +++ b/test/CodeGen/AMDGPU/control-flow-fastregalloc.ll @@ -13,7 +13,7 @@ ; VGPR: workitem_private_segment_byte_size = 12{{$}} -; GCN: {{^}}; BB#0: +; GCN: {{^}}; %bb.0: ; GCN: s_mov_b32 m0, -1 ; GCN: ds_read_b32 [[LOAD0:v[0-9]+]] @@ -91,7 +91,7 @@ endif: ; GCN-LABEL: {{^}}divergent_loop: ; VGPR: workitem_private_segment_byte_size = 12{{$}} -; GCN: {{^}}; BB#0: +; GCN: {{^}}; %bb.0: ; GCN: s_mov_b32 m0, -1 ; GCN: ds_read_b32 [[LOAD0:v[0-9]+]] @@ -167,7 +167,7 @@ end: } ; GCN-LABEL: {{^}}divergent_if_else_endif: -; GCN: {{^}}; BB#0: +; GCN: {{^}}; %bb.0: ; GCN: s_mov_b32 m0, -1 ; GCN: ds_read_b32 [[LOAD0:v[0-9]+]] diff --git a/test/CodeGen/AMDGPU/convergent-inlineasm.ll b/test/CodeGen/AMDGPU/convergent-inlineasm.ll index 0074a41e44c..80907bf1c1b 100644 --- a/test/CodeGen/AMDGPU/convergent-inlineasm.ll +++ b/test/CodeGen/AMDGPU/convergent-inlineasm.ll @@ -2,7 +2,7 @@ declare i32 @llvm.amdgcn.workitem.id.x() #0 ; GCN-LABEL: {{^}}convergent_inlineasm: -; GCN: BB#0: +; GCN: %bb.0: ; GCN: v_cmp_ne_u32_e64 ; GCN: ; mask branch ; GCN: BB{{[0-9]+_[0-9]+}}: diff --git a/test/CodeGen/AMDGPU/early-if-convert.ll b/test/CodeGen/AMDGPU/early-if-convert.ll index 792f0b1eaef..d129ca5c140 100644 --- a/test/CodeGen/AMDGPU/early-if-convert.ll +++ b/test/CodeGen/AMDGPU/early-if-convert.ll @@ -382,7 +382,7 @@ done: } ; GCN-LABEL: {{^}}ifcvt_undef_scc: -; GCN: {{^}}; BB#0: +; GCN: {{^}}; %bb.0: ; GCN-NEXT: s_load_dwordx2 ; GCN-NEXT: s_cselect_b32 s{{[0-9]+}}, 1, 0 define amdgpu_kernel void @ifcvt_undef_scc(i32 %cond, i32 addrspace(1)* %out) { diff --git a/test/CodeGen/AMDGPU/else.ll b/test/CodeGen/AMDGPU/else.ll index 22338e4f50e..c73ea936e8b 100644 --- a/test/CodeGen/AMDGPU/else.ll +++ b/test/CodeGen/AMDGPU/else.ll @@ -25,7 +25,7 @@ end: } ; CHECK-LABEL: {{^}}else_execfix_leave_wqm: -; CHECK: ; BB#0: +; CHECK: ; %bb.0: ; CHECK-NEXT: s_mov_b64 [[INIT_EXEC:s\[[0-9]+:[0-9]+\]]], exec ; CHECK: ; %Flow ; CHECK-NEXT: s_or_saveexec_b64 [[DST:s\[[0-9]+:[0-9]+\]]], diff --git a/test/CodeGen/AMDGPU/fence-amdgiz.ll b/test/CodeGen/AMDGPU/fence-amdgiz.ll index 3055f325f3f..0dd2a9241b2 100644 --- a/test/CodeGen/AMDGPU/fence-amdgiz.ll +++ b/test/CodeGen/AMDGPU/fence-amdgiz.ll @@ -3,7 +3,7 @@ target datalayout = "e-p:64:64-p1:64:64-p2:64:64-p3:32:32-p4:32:32-p5:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-A5" ; CHECK-LABEL: atomic_fence -; CHECK: BB#0: +; CHECK: %bb.0: ; CHECK-NOT: ATOMIC_FENCE ; CHECK-NEXT: s_waitcnt vmcnt(0) ; CHECK-NEXT: buffer_wbinvl1_vol diff --git a/test/CodeGen/AMDGPU/i1-copy-implicit-def.ll b/test/CodeGen/AMDGPU/i1-copy-implicit-def.ll index f6bf0b09486..37d05c7ac41 100644 --- a/test/CodeGen/AMDGPU/i1-copy-implicit-def.ll +++ b/test/CodeGen/AMDGPU/i1-copy-implicit-def.ll @@ -3,7 +3,7 @@ ; SILowerI1Copies was not handling IMPLICIT_DEF ; SI-LABEL: {{^}}br_implicit_def: -; SI: BB#0: +; SI: %bb.0: ; SI-NEXT: s_cbranch_scc1 define amdgpu_kernel void @br_implicit_def(i32 addrspace(1)* %out, i32 %arg) #0 { bb: diff --git a/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir b/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir index 67642282f75..61aa39fcc25 100644 --- a/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir +++ b/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir @@ -26,7 +26,7 @@ ... --- # CHECK-LABEL: name: invert_br_undef_vcc -# CHECK: S_CBRANCH_VCCZ %bb.1.else, implicit undef %vcc +# CHECK: S_CBRANCH_VCCZ %bb.1, implicit undef %vcc name: invert_br_undef_vcc alignment: 0 @@ -58,7 +58,7 @@ body: | %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed %sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) %sgpr7 = S_MOV_B32 61440 %sgpr6 = S_MOV_B32 -1 - S_CBRANCH_VCCNZ %bb.2.if, implicit undef %vcc + S_CBRANCH_VCCNZ %bb.2, implicit undef %vcc bb.1.else: liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 @@ -66,7 +66,7 @@ body: | %vgpr0 = V_MOV_B32_e32 100, implicit %exec BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `i32 addrspace(1)* undef`) %vgpr0 = V_MOV_B32_e32 1, implicit %exec - S_BRANCH %bb.3.done + S_BRANCH %bb.3 bb.2.if: liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.load.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.load.ll index 4f8c6191224..49ca7d40572 100644 --- a/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.load.ll +++ b/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.load.ll @@ -127,7 +127,7 @@ entry: } ;CHECK-LABEL: {{^}}buffer_load_x1_offen_merged: -;CHECK-NEXT: BB# +;CHECK-NEXT: %bb. ;CHECK-NEXT: buffer_load_dwordx4 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:4 ;CHECK-NEXT: buffer_load_dwordx2 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:28 ;CHECK: s_waitcnt @@ -151,7 +151,7 @@ main_body: } ;CHECK-LABEL: {{^}}buffer_load_x1_offen_merged_glc_slc: -;CHECK-NEXT: BB# +;CHECK-NEXT: %bb. ;CHECK-NEXT: buffer_load_dwordx2 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:4{{$}} ;CHECK-NEXT: buffer_load_dwordx2 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:12 glc{{$}} ;CHECK-NEXT: buffer_load_dwordx2 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:28 glc slc{{$}} @@ -176,7 +176,7 @@ main_body: } ;CHECK-LABEL: {{^}}buffer_load_x2_offen_merged: -;CHECK-NEXT: BB# +;CHECK-NEXT: %bb. ;CHECK-NEXT: buffer_load_dwordx4 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:4 ;CHECK: s_waitcnt define amdgpu_ps void @buffer_load_x2_offen_merged(<4 x i32> inreg %rsrc, i32 %a) { @@ -194,7 +194,7 @@ main_body: } ;CHECK-LABEL: {{^}}buffer_load_x1_offset_merged: -;CHECK-NEXT: BB# +;CHECK-NEXT: %bb. ;CHECK-NEXT: buffer_load_dwordx4 v[{{[0-9]}}:{{[0-9]}}], off, s[0:3], 0 offset:4 ;CHECK-NEXT: buffer_load_dwordx2 v[{{[0-9]}}:{{[0-9]}}], off, s[0:3], 0 offset:28 ;CHECK: s_waitcnt @@ -212,7 +212,7 @@ main_body: } ;CHECK-LABEL: {{^}}buffer_load_x2_offset_merged: -;CHECK-NEXT: BB# +;CHECK-NEXT: %bb. ;CHECK-NEXT: buffer_load_dwordx4 v[{{[0-9]}}:{{[0-9]}}], off, s[0:3], 0 offset:4 ;CHECK: s_waitcnt define amdgpu_ps void @buffer_load_x2_offset_merged(<4 x i32> inreg %rsrc) { diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.ll index 10bea8ea63b..69de9555035 100644 --- a/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.ll +++ b/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.ll @@ -4,7 +4,7 @@ declare void @llvm.amdgcn.buffer.wbinvl1() #0 ; GCN-LABEL: {{^}}test_buffer_wbinvl1: -; GCN-NEXT: ; BB#0: +; GCN-NEXT: ; %bb.0: ; SI-NEXT: buffer_wbinvl1 ; encoding: [0x00,0x00,0xc4,0xe1,0x00,0x00,0x00,0x00] ; VI-NEXT: buffer_wbinvl1 ; encoding: [0x00,0x00,0xf8,0xe0,0x00,0x00,0x00,0x00] ; GCN-NEXT: s_endpgm diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.sc.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.sc.ll index fe60d16d90f..d1c8f37b3d8 100644 --- a/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.sc.ll +++ b/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.sc.ll @@ -3,7 +3,7 @@ declare void @llvm.amdgcn.buffer.wbinvl1.sc() #0 ; SI-LABEL: {{^}}test_buffer_wbinvl1_sc: -; SI-NEXT: ; BB#0: +; SI-NEXT: ; %bb.0: ; SI-NEXT: buffer_wbinvl1_sc ; encoding: [0x00,0x00,0xc0,0xe1,0x00,0x00,0x00,0x00] ; SI-NEXT: s_endpgm define amdgpu_kernel void @test_buffer_wbinvl1_sc() #0 { diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.vol.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.vol.ll index 061c1469ed4..4dc938c9b0a 100644 --- a/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.vol.ll +++ b/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.wbinvl1.vol.ll @@ -4,7 +4,7 @@ declare void @llvm.amdgcn.buffer.wbinvl1.vol() #0 ; GCN-LABEL: {{^}}test_buffer_wbinvl1_vol: -; GCN-NEXT: ; BB#0: +; GCN-NEXT: ; %bb.0: ; CI-NEXT: buffer_wbinvl1_vol ; encoding: [0x00,0x00,0xc0,0xe1,0x00,0x00,0x00,0x00] ; VI-NEXT: buffer_wbinvl1_vol ; encoding: [0x00,0x00,0xfc,0xe0,0x00,0x00,0x00,0x00] ; GCN: s_endpgm diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pkrtz.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pkrtz.ll index 7b1cfa18721..16d0c237007 100644 --- a/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pkrtz.ll +++ b/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pkrtz.ll @@ -25,7 +25,7 @@ define amdgpu_kernel void @s_cvt_pkrtz_samereg_v2f16_f32(<2 x half> addrspace(1) ; FIXME: Folds to 0 on gfx9 ; GCN-LABEL: {{^}}s_cvt_pkrtz_undef_undef: -; GCN-NEXT: ; BB#0 +; GCN-NEXT: ; %bb.0 ; SI-NEXT: s_endpgm ; VI-NEXT: s_endpgm ; GFX9: v_mov_b32_e32 v{{[0-9]+}}, 0{{$}} diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll index a1ecb7f750c..d6b0628956a 100644 --- a/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll +++ b/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll @@ -31,8 +31,8 @@ define amdgpu_ps void @vcc_implicit_def(float %arg13, float %arg14) { } ; SI-LABEL: {{^}}true: -; SI-NEXT: BB# -; SI-NEXT: BB# +; SI-NEXT: %bb. +; SI-NEXT: %bb. ; SI-NEXT: s_endpgm define amdgpu_gs void @true() { call void @llvm.amdgcn.kill(i1 true) diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.ll index 224b2ed72e3..b7fb96a2d1a 100644 --- a/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.ll +++ b/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.ll @@ -5,7 +5,7 @@ declare void @llvm.amdgcn.s.dcache.inv() #0 declare void @llvm.amdgcn.s.waitcnt(i32) #0 ; GCN-LABEL: {{^}}test_s_dcache_inv: -; GCN-NEXT: ; BB#0: +; GCN-NEXT: ; %bb.0: ; SI-NEXT: s_dcache_inv ; encoding: [0x00,0x00,0xc0,0xc7] ; VI-NEXT: s_dcache_inv ; encoding: [0x00,0x00,0x80,0xc0,0x00,0x00,0x00,0x00] ; GCN-NEXT: s_endpgm @@ -15,7 +15,7 @@ define amdgpu_kernel void @test_s_dcache_inv() #0 { } ; GCN-LABEL: {{^}}test_s_dcache_inv_insert_wait: -; GCN-NEXT: ; BB#0: +; GCN-NEXT: ; %bb.0: ; GCN: s_dcache_inv ; GCN: s_waitcnt lgkmcnt(0) ; encoding define amdgpu_kernel void @test_s_dcache_inv_insert_wait() #0 { diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.vol.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.vol.ll index f96d5db5794..e8a363adde7 100644 --- a/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.vol.ll +++ b/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.vol.ll @@ -5,7 +5,7 @@ declare void @llvm.amdgcn.s.dcache.inv.vol() #0 declare void @llvm.amdgcn.s.waitcnt(i32) #0 ; GCN-LABEL: {{^}}test_s_dcache_inv_vol: -; GCN-NEXT: ; BB#0: +; GCN-NEXT: ; %bb.0: ; CI-NEXT: s_dcache_inv_vol ; encoding: [0x00,0x00,0x40,0xc7] ; VI-NEXT: s_dcache_inv_vol ; encoding: [0x00,0x00,0x88,0xc0,0x00,0x00,0x00,0x00] ; GCN-NEXT: s_endpgm @@ -15,7 +15,7 @@ define amdgpu_kernel void @test_s_dcache_inv_vol() #0 { } ; GCN-LABEL: {{^}}test_s_dcache_inv_vol_insert_wait: -; GCN-NEXT: ; BB#0: +; GCN-NEXT: ; %bb.0: ; GCN-NEXT: s_dcache_inv_vol ; GCN: s_waitcnt lgkmcnt(0) ; encoding define amdgpu_kernel void @test_s_dcache_inv_vol_insert_wait() #0 { diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.ll index 99b65135043..254a0fae3c3 100644 --- a/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.ll +++ b/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.ll @@ -4,7 +4,7 @@ declare void @llvm.amdgcn.s.dcache.wb() #0 declare void @llvm.amdgcn.s.waitcnt(i32) #0 ; VI-LABEL: {{^}}test_s_dcache_wb: -; VI-NEXT: ; BB#0: +; VI-NEXT: ; %bb.0: ; VI-NEXT: s_dcache_wb ; encoding: [0x00,0x00,0x84,0xc0,0x00,0x00,0x00,0x00] ; VI-NEXT: s_endpgm define amdgpu_kernel void @test_s_dcache_wb() #0 { @@ -13,7 +13,7 @@ define amdgpu_kernel void @test_s_dcache_wb() #0 { } ; VI-LABEL: {{^}}test_s_dcache_wb_insert_wait: -; VI-NEXT: ; BB#0: +; VI-NEXT: ; %bb.0: ; VI-NEXT: s_dcache_wb ; VI: s_waitcnt lgkmcnt(0) ; encoding define amdgpu_kernel void @test_s_dcache_wb_insert_wait() #0 { diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.vol.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.vol.ll index 844fcecdb48..929cd1c5f0b 100644 --- a/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.vol.ll +++ b/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.vol.ll @@ -4,7 +4,7 @@ declare void @llvm.amdgcn.s.dcache.wb.vol() #0 declare void @llvm.amdgcn.s.waitcnt(i32) #0 ; VI-LABEL: {{^}}test_s_dcache_wb_vol: -; VI-NEXT: ; BB#0: +; VI-NEXT: ; %bb.0: ; VI-NEXT: s_dcache_wb_vol ; encoding: [0x00,0x00,0x8c,0xc0,0x00,0x00,0x00,0x00] ; VI-NEXT: s_endpgm define amdgpu_kernel void @test_s_dcache_wb_vol() #0 { @@ -13,7 +13,7 @@ define amdgpu_kernel void @test_s_dcache_wb_vol() #0 { } ; VI-LABEL: {{^}}test_s_dcache_wb_vol_insert_wait: -; VI-NEXT: ; BB#0: +; VI-NEXT: ; %bb.0: ; VI-NEXT: s_dcache_wb_vol ; VI: s_waitcnt lgkmcnt(0) ; encoding define amdgpu_kernel void @test_s_dcache_wb_vol_insert_wait() #0 { diff --git a/test/CodeGen/AMDGPU/loop_break.ll b/test/CodeGen/AMDGPU/loop_break.ll index 4acd1b24795..b2641cd4d2e 100644 --- a/test/CodeGen/AMDGPU/loop_break.ll +++ b/test/CodeGen/AMDGPU/loop_break.ll @@ -31,7 +31,7 @@ ; GCN: s_and_b64 vcc, exec, vcc ; GCN-NEXT: s_cbranch_vccnz [[FLOW:BB[0-9]+_[0-9]+]] -; GCN: ; BB#2: ; %bb4 +; GCN: ; %bb.2: ; %bb4 ; GCN: buffer_load_dword ; GCN: v_cmp_ge_i32_e32 vcc, ; GCN: s_or_b64 [[MASK]], vcc, [[INITMASK]] @@ -41,7 +41,7 @@ ; GCN: s_andn2_b64 exec, exec, [[MASK]] ; GCN-NEXT: s_cbranch_execnz [[LOOP_ENTRY]] -; GCN: ; BB#4: ; %bb9 +; GCN: ; %bb.4: ; %bb9 ; GCN-NEXT: s_endpgm define amdgpu_kernel void @break_loop(i32 %arg) #0 { bb: diff --git a/test/CodeGen/AMDGPU/memory-legalizer-atomic-fence.ll b/test/CodeGen/AMDGPU/memory-legalizer-atomic-fence.ll index a563cfd0283..c8e920a1854 100644 --- a/test/CodeGen/AMDGPU/memory-legalizer-atomic-fence.ll +++ b/test/CodeGen/AMDGPU/memory-legalizer-atomic-fence.ll @@ -3,7 +3,7 @@ ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx803 -verify-machineinstrs < %s | FileCheck -check-prefix=FUNC -check-prefix=GCN -check-prefix=GFX8 %s ; FUNC-LABEL: {{^}}system_acquire -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GFX6: s_waitcnt vmcnt(0){{$}} ; GFX6-NEXT: buffer_wbinvl1{{$}} @@ -17,7 +17,7 @@ entry: } ; FUNC-LABEL: {{^}}system_release -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GCN: s_waitcnt vmcnt(0){{$}} ; GCN: s_endpgm @@ -28,7 +28,7 @@ entry: } ; FUNC-LABEL: {{^}}system_acq_rel -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GCN: s_waitcnt vmcnt(0){{$}} ; GFX6: buffer_wbinvl1{{$}} @@ -41,7 +41,7 @@ entry: } ; FUNC-LABEL: {{^}}system_seq_cst -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GCN: s_waitcnt vmcnt(0){{$}} ; GFX6: buffer_wbinvl1{{$}} @@ -54,7 +54,7 @@ entry: } ; FUNC-LABEL: {{^}}singlethread_acquire -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GCN: s_endpgm define amdgpu_kernel void @singlethread_acquire() { @@ -64,7 +64,7 @@ entry: } ; FUNC-LABEL: {{^}}singlethread_release -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GCN: s_endpgm define amdgpu_kernel void @singlethread_release() { @@ -74,7 +74,7 @@ entry: } ; FUNC-LABEL: {{^}}singlethread_acq_rel -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GCN: s_endpgm define amdgpu_kernel void @singlethread_acq_rel() { @@ -84,7 +84,7 @@ entry: } ; FUNC-LABEL: {{^}}singlethread_seq_cst -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GCN: s_endpgm define amdgpu_kernel void @singlethread_seq_cst() { @@ -94,7 +94,7 @@ entry: } ; FUNC-LABEL: {{^}}agent_acquire -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GFX6: s_waitcnt vmcnt(0){{$}} ; GFX6-NEXT: buffer_wbinvl1{{$}} @@ -108,7 +108,7 @@ entry: } ; FUNC-LABEL: {{^}}agent_release -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GCN: s_waitcnt vmcnt(0){{$}} ; GCN: s_endpgm @@ -119,7 +119,7 @@ entry: } ; FUNC-LABEL: {{^}}agent_acq_rel -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GCN: s_waitcnt vmcnt(0){{$}} ; GFX6: buffer_wbinvl1{{$}} @@ -132,7 +132,7 @@ entry: } ; FUNC-LABEL: {{^}}agent_seq_cst -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GCN: s_waitcnt vmcnt(0){{$}} ; GFX6: buffer_wbinvl1{{$}} @@ -145,7 +145,7 @@ entry: } ; FUNC-LABEL: {{^}}workgroup_acquire -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GCN: s_endpgm define amdgpu_kernel void @workgroup_acquire() { @@ -155,7 +155,7 @@ entry: } ; FUNC-LABEL: {{^}}workgroup_release -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GCN: s_endpgm define amdgpu_kernel void @workgroup_release() { @@ -165,7 +165,7 @@ entry: } ; FUNC-LABEL: {{^}}workgroup_acq_rel -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GCN: s_endpgm define amdgpu_kernel void @workgroup_acq_rel() { @@ -175,7 +175,7 @@ entry: } ; FUNC-LABEL: {{^}}workgroup_seq_cst -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GCN: s_endpgm define amdgpu_kernel void @workgroup_seq_cst() { @@ -185,7 +185,7 @@ entry: } ; FUNC-LABEL: {{^}}wavefront_acquire -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GCN: s_endpgm define amdgpu_kernel void @wavefront_acquire() { @@ -195,7 +195,7 @@ entry: } ; FUNC-LABEL: {{^}}wavefront_release -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GCN: s_endpgm define amdgpu_kernel void @wavefront_release() { @@ -205,7 +205,7 @@ entry: } ; FUNC-LABEL: {{^}}wavefront_acq_rel -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GCN: s_endpgm define amdgpu_kernel void @wavefront_acq_rel() { @@ -215,7 +215,7 @@ entry: } ; FUNC-LABEL: {{^}}wavefront_seq_cst -; GCN: BB#0 +; GCN: %bb.0 ; GCN-NOT: ATOMIC_FENCE ; GCN: s_endpgm define amdgpu_kernel void @wavefront_seq_cst() { diff --git a/test/CodeGen/AMDGPU/multilevel-break.ll b/test/CodeGen/AMDGPU/multilevel-break.ll index 15de689b953..8cc02d49709 100644 --- a/test/CodeGen/AMDGPU/multilevel-break.ll +++ b/test/CodeGen/AMDGPU/multilevel-break.ll @@ -34,7 +34,7 @@ ; GCN-NEXT: s_andn2_b64 exec, exec, [[OR_BREAK]] ; GCN-NEXT: s_cbranch_execnz [[INNER_LOOP]] -; GCN: ; BB#{{[0-9]+}}: ; %Flow1{{$}} +; GCN: ; %bb.{{[0-9]+}}: ; %Flow1{{$}} ; GCN-NEXT: ; in Loop: Header=[[OUTER_LOOP]] Depth=1 ; Ensure copy is eliminated diff --git a/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir b/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir index b5dc9d9dac8..24e8ed8e29c 100644 --- a/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir +++ b/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir @@ -184,8 +184,8 @@ body: | %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc %exec = S_MOV_B64_term killed %sgpr2_sgpr3 - SI_MASK_BRANCH %bb.2.end, implicit %exec - S_BRANCH %bb.1.if + SI_MASK_BRANCH %bb.2, implicit %exec + S_BRANCH %bb.1 bb.1.if: liveins: %sgpr0_sgpr1 @@ -241,8 +241,8 @@ body: | %vgpr0 = V_MOV_B32_e32 4, implicit %exec %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc %exec = S_MOV_B64_term killed %sgpr2_sgpr3 - SI_MASK_BRANCH %bb.2.end, implicit %exec - S_BRANCH %bb.1.if + SI_MASK_BRANCH %bb.2, implicit %exec + S_BRANCH %bb.1 bb.1.if: liveins: %sgpr0_sgpr1 @@ -298,8 +298,8 @@ body: | %vgpr0 = V_MOV_B32_e32 4, implicit %exec %sgpr2_sgpr3 = S_OR_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc %exec = S_MOV_B64_term killed %sgpr2_sgpr3 - SI_MASK_BRANCH %bb.2.end, implicit %exec - S_BRANCH %bb.1.if + SI_MASK_BRANCH %bb.2, implicit %exec + S_BRANCH %bb.1 bb.1.if: liveins: %sgpr0_sgpr1 @@ -359,8 +359,8 @@ body: | BUFFER_STORE_DWORD_OFFSET %vgpr0, undef %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`) %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc %exec = S_MOV_B64_term killed %sgpr2_sgpr3 - SI_MASK_BRANCH %bb.2.end, implicit %exec - S_BRANCH %bb.1.if + SI_MASK_BRANCH %bb.2, implicit %exec + S_BRANCH %bb.1 bb.1.if: liveins: %sgpr0_sgpr1 @@ -384,7 +384,7 @@ body: | # CHECK: %sgpr0_sgpr1 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc # CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 undef %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc # CHECK-NEXT: %exec = COPY %sgpr0_sgpr1 -# CHECK-NEXT: SI_MASK_BRANCH %bb.2.end, implicit %exec +# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit %exec name: optimize_if_and_saveexec_xor_wrong_reg alignment: 0 exposesReturnsTwice: false @@ -420,8 +420,8 @@ body: | %sgpr0_sgpr1 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc %sgpr0_sgpr1 = S_XOR_B64 undef %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc %exec = S_MOV_B64_term %sgpr0_sgpr1 - SI_MASK_BRANCH %bb.2.end, implicit %exec - S_BRANCH %bb.1.if + SI_MASK_BRANCH %bb.2, implicit %exec + S_BRANCH %bb.1 bb.1.if: liveins: %sgpr0_sgpr1 , %sgpr4_sgpr5_sgpr6_sgpr7 @@ -443,7 +443,7 @@ body: | # CHECK-NEXT: %sgpr2_sgpr3 = S_OR_B64 killed %sgpr2_sgpr3, 1, implicit-def %scc # CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc # CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3 -# CHECK-NEXT: SI_MASK_BRANCH %bb.2.end, implicit %exec +# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit %exec name: optimize_if_and_saveexec_xor_modify_copy_to_exec alignment: 0 @@ -479,8 +479,8 @@ body: | %sgpr2_sgpr3 = S_OR_B64 killed %sgpr2_sgpr3, 1, implicit-def %scc %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc %exec = S_MOV_B64_term killed %sgpr2_sgpr3 - SI_MASK_BRANCH %bb.2.end, implicit %exec - S_BRANCH %bb.1.if + SI_MASK_BRANCH %bb.2, implicit %exec + S_BRANCH %bb.1 bb.1.if: liveins: %sgpr0_sgpr1 @@ -540,8 +540,8 @@ body: | %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc %exec = S_MOV_B64_term %sgpr2_sgpr3 - SI_MASK_BRANCH %bb.2.end, implicit %exec - S_BRANCH %bb.1.if + SI_MASK_BRANCH %bb.2, implicit %exec + S_BRANCH %bb.1 bb.1.if: liveins: %sgpr0_sgpr1, %sgpr2_sgpr3 @@ -565,7 +565,7 @@ body: | # CHECK: %sgpr0_sgpr1 = COPY %exec # CHECK: %sgpr2_sgpr3 = S_LSHR_B64 %sgpr0_sgpr1, killed %vcc_lo, implicit-def %scc # CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3 -# CHECK-NEXT: SI_MASK_BRANCH %bb.2.end, implicit %exec +# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit %exec name: optimize_if_unknown_saveexec alignment: 0 @@ -599,8 +599,8 @@ body: | %vgpr0 = V_MOV_B32_e32 4, implicit %exec %sgpr2_sgpr3 = S_LSHR_B64 %sgpr0_sgpr1, killed %vcc_lo, implicit-def %scc %exec = S_MOV_B64_term killed %sgpr2_sgpr3 - SI_MASK_BRANCH %bb.2.end, implicit %exec - S_BRANCH %bb.1.if + SI_MASK_BRANCH %bb.2, implicit %exec + S_BRANCH %bb.1 bb.1.if: liveins: %sgpr0_sgpr1 @@ -656,8 +656,8 @@ body: | %vgpr0 = V_MOV_B32_e32 4, implicit %exec %sgpr2_sgpr3 = S_ANDN2_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc %exec = S_MOV_B64_term killed %sgpr2_sgpr3 - SI_MASK_BRANCH %bb.2.end, implicit %exec - S_BRANCH %bb.1.if + SI_MASK_BRANCH %bb.2, implicit %exec + S_BRANCH %bb.1 bb.1.if: liveins: %sgpr0_sgpr1 @@ -680,7 +680,7 @@ body: | # CHECK-LABEL: name: optimize_if_andn2_saveexec_no_commute{{$}} # CHECK: %sgpr2_sgpr3 = S_ANDN2_B64 killed %vcc, %sgpr0_sgpr1, implicit-def %scc # CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3 -# CHECK-NEXT: SI_MASK_BRANCH %bb.2.end, implicit %exec +# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit %exec name: optimize_if_andn2_saveexec_no_commute alignment: 0 exposesReturnsTwice: false @@ -713,8 +713,8 @@ body: | %vgpr0 = V_MOV_B32_e32 4, implicit %exec %sgpr2_sgpr3 = S_ANDN2_B64 killed %vcc, %sgpr0_sgpr1, implicit-def %scc %exec = S_MOV_B64_term killed %sgpr2_sgpr3 - SI_MASK_BRANCH %bb.2.end, implicit %exec - S_BRANCH %bb.1.if + SI_MASK_BRANCH %bb.2, implicit %exec + S_BRANCH %bb.1 bb.1.if: liveins: %sgpr0_sgpr1 diff --git a/test/CodeGen/AMDGPU/ret_jump.ll b/test/CodeGen/AMDGPU/ret_jump.ll index 7c2e28108df..f87e8cbea4f 100644 --- a/test/CodeGen/AMDGPU/ret_jump.ll +++ b/test/CodeGen/AMDGPU/ret_jump.ll @@ -57,7 +57,7 @@ ret.bb: ; preds = %else, %main_body ; GCN-LABEL: {{^}}uniform_br_nontrivial_ret_divergent_br_nontrivial_unreachable: ; GCN: s_cbranch_vccnz [[RET_BB:BB[0-9]+_[0-9]+]] -; GCN: ; BB#{{[0-9]+}}: ; %else +; GCN: ; %bb.{{[0-9]+}}: ; %else ; GCN: s_and_saveexec_b64 [[SAVE_EXEC:s\[[0-9]+:[0-9]+\]]], vcc ; GCN-NEXT: ; mask branch [[FLOW1:BB[0-9]+_[0-9]+]] diff --git a/test/CodeGen/AMDGPU/sgpr-control-flow.ll b/test/CodeGen/AMDGPU/sgpr-control-flow.ll index 8e18ab5554e..575938b5a5c 100644 --- a/test/CodeGen/AMDGPU/sgpr-control-flow.ll +++ b/test/CodeGen/AMDGPU/sgpr-control-flow.ll @@ -37,7 +37,7 @@ endif: ; SI: s_cmp_lg_u32 ; SI: s_cbranch_scc0 [[IF:BB[0-9]+_[0-9]+]] -; SI: ; BB#1: ; %else +; SI: ; %bb.1: ; %else ; SI: s_load_dword [[LOAD0:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xe ; SI: s_load_dword [[LOAD1:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xf ; SI-NOT: add diff --git a/test/CodeGen/AMDGPU/si-lower-control-flow-unreachable-block.ll b/test/CodeGen/AMDGPU/si-lower-control-flow-unreachable-block.ll index 7423a4a2753..ce85a666340 100644 --- a/test/CodeGen/AMDGPU/si-lower-control-flow-unreachable-block.ll +++ b/test/CodeGen/AMDGPU/si-lower-control-flow-unreachable-block.ll @@ -55,7 +55,7 @@ unreachable: ; GCN: s_cmp_lg_u32 ; GCN: s_cbranch_scc0 [[UNREACHABLE:BB[0-9]+_[0-9]+]] -; GCN-NEXT: BB#{{[0-9]+}}: ; %ret +; GCN-NEXT: %bb.{{[0-9]+}}: ; %ret ; GCN-NEXT: s_endpgm ; GCN: [[UNREACHABLE]]: diff --git a/test/CodeGen/AMDGPU/skip-if-dead.ll b/test/CodeGen/AMDGPU/skip-if-dead.ll index ed7e06ee4e2..9ae36b0a06c 100644 --- a/test/CodeGen/AMDGPU/skip-if-dead.ll +++ b/test/CodeGen/AMDGPU/skip-if-dead.ll @@ -1,7 +1,7 @@ ; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck %s ; CHECK-LABEL: {{^}}test_kill_depth_0_imm_pos: -; CHECK-NEXT: ; BB#0: +; CHECK-NEXT: ; %bb.0: ; CHECK-NEXT: s_endpgm define amdgpu_ps void @test_kill_depth_0_imm_pos() #0 { call void @llvm.AMDGPU.kill(float 0.0) @@ -9,9 +9,9 @@ define amdgpu_ps void @test_kill_depth_0_imm_pos() #0 { } ; CHECK-LABEL: {{^}}test_kill_depth_0_imm_neg: -; CHECK-NEXT: ; BB#0: +; CHECK-NEXT: ; %bb.0: ; CHECK-NEXT: s_mov_b64 exec, 0 -; CHECK-NEXT: ; BB#1: +; CHECK-NEXT: ; %bb.1: ; CHECK-NEXT: s_endpgm define amdgpu_ps void @test_kill_depth_0_imm_neg() #0 { call void @llvm.AMDGPU.kill(float -0.0) @@ -20,11 +20,11 @@ define amdgpu_ps void @test_kill_depth_0_imm_neg() #0 { ; FIXME: Ideally only one would be emitted ; CHECK-LABEL: {{^}}test_kill_depth_0_imm_neg_x2: -; CHECK-NEXT: ; BB#0: +; CHECK-NEXT: ; %bb.0: ; CHECK-NEXT: s_mov_b64 exec, 0 -; CHECK-NEXT: ; BB#1: +; CHECK-NEXT: ; %bb.1: ; CHECK-NEXT: s_mov_b64 exec, 0 -; CHECK-NEXT: ; BB#2: +; CHECK-NEXT: ; %bb.2: ; CHECK-NEXT: s_endpgm define amdgpu_ps void @test_kill_depth_0_imm_neg_x2() #0 { call void @llvm.AMDGPU.kill(float -0.0) @@ -33,9 +33,9 @@ define amdgpu_ps void @test_kill_depth_0_imm_neg_x2() #0 { } ; CHECK-LABEL: {{^}}test_kill_depth_var: -; CHECK-NEXT: ; BB#0: +; CHECK-NEXT: ; %bb.0: ; CHECK-NEXT: v_cmpx_le_f32_e32 vcc, 0, v0 -; CHECK-NEXT: ; BB#1: +; CHECK-NEXT: ; %bb.1: ; CHECK-NEXT: s_endpgm define amdgpu_ps void @test_kill_depth_var(float %x) #0 { call void @llvm.AMDGPU.kill(float %x) @@ -44,11 +44,11 @@ define amdgpu_ps void @test_kill_depth_var(float %x) #0 { ; FIXME: Ideally only one would be emitted ; CHECK-LABEL: {{^}}test_kill_depth_var_x2_same: -; CHECK-NEXT: ; BB#0: +; CHECK-NEXT: ; %bb.0: ; CHECK-NEXT: v_cmpx_le_f32_e32 vcc, 0, v0 -; CHECK-NEXT: ; BB#1: +; CHECK-NEXT: ; %bb.1: ; CHECK-NEXT: v_cmpx_le_f32_e32 vcc, 0, v0 -; CHECK-NEXT: ; BB#2: +; CHECK-NEXT: ; %bb.2: ; CHECK-NEXT: s_endpgm define amdgpu_ps void @test_kill_depth_var_x2_same(float %x) #0 { call void @llvm.AMDGPU.kill(float %x) @@ -57,11 +57,11 @@ define amdgpu_ps void @test_kill_depth_var_x2_same(float %x) #0 { } ; CHECK-LABEL: {{^}}test_kill_depth_var_x2: -; CHECK-NEXT: ; BB#0: +; CHECK-NEXT: ; %bb.0: ; CHECK-NEXT: v_cmpx_le_f32_e32 vcc, 0, v0 -; CHECK-NEXT: ; BB#1: +; CHECK-NEXT: ; %bb.1: ; CHECK-NEXT: v_cmpx_le_f32_e32 vcc, 0, v1 -; CHECK-NEXT: ; BB#2: +; CHECK-NEXT: ; %bb.2: ; CHECK-NEXT: s_endpgm define amdgpu_ps void @test_kill_depth_var_x2(float %x, float %y) #0 { call void @llvm.AMDGPU.kill(float %x) @@ -70,12 +70,12 @@ define amdgpu_ps void @test_kill_depth_var_x2(float %x, float %y) #0 { } ; CHECK-LABEL: {{^}}test_kill_depth_var_x2_instructions: -; CHECK-NEXT: ; BB#0: +; CHECK-NEXT: ; %bb.0: ; CHECK-NEXT: v_cmpx_le_f32_e32 vcc, 0, v0 -; CHECK-NEXT: ; BB#1: +; CHECK-NEXT: ; %bb.1: ; CHECK: v_mov_b32_e64 v7, -1 ; CHECK: v_cmpx_le_f32_e32 vcc, 0, v7 -; CHECK-NEXT: ; BB#2: +; CHECK-NEXT: ; %bb.2: ; CHECK-NEXT: s_endpgm define amdgpu_ps void @test_kill_depth_var_x2_instructions(float %x) #0 { call void @llvm.AMDGPU.kill(float %x) @@ -90,7 +90,7 @@ define amdgpu_ps void @test_kill_depth_var_x2_instructions(float %x) #0 { ; CHECK: s_cmp_lg_u32 s{{[0-9]+}}, 0 ; CHECK: s_cbranch_scc1 [[RETURN_BB:BB[0-9]+_[0-9]+]] -; CHECK-NEXT: ; BB#1: +; CHECK-NEXT: ; %bb.1: ; CHECK: v_mov_b32_e64 v7, -1 ; CHECK: v_nop_e64 ; CHECK: v_nop_e64 @@ -105,7 +105,7 @@ define amdgpu_ps void @test_kill_depth_var_x2_instructions(float %x) #0 { ; CHECK: v_cmpx_le_f32_e32 vcc, 0, v7 ; CHECK-NEXT: s_cbranch_execnz [[SPLIT_BB:BB[0-9]+_[0-9]+]] -; CHECK-NEXT: ; BB#2: +; CHECK-NEXT: ; %bb.2: ; CHECK-NEXT: exp null off, off, off, off done vm ; CHECK-NEXT: s_endpgm @@ -141,7 +141,7 @@ exit: ; CHECK-NEXT: v_mov_b32_e32 v{{[0-9]+}}, 0 ; CHECK-NEXT: s_cbranch_scc1 [[RETURN_BB:BB[0-9]+_[0-9]+]] -; CHECK-NEXT: ; BB#1: ; %bb +; CHECK-NEXT: ; %bb.1: ; %bb ; CHECK: v_mov_b32_e64 v7, -1 ; CHECK: v_nop_e64 ; CHECK: v_nop_e64 @@ -157,7 +157,7 @@ exit: ; CHECK: v_cmpx_le_f32_e32 vcc, 0, v7 ; CHECK-NEXT: s_cbranch_execnz [[SPLIT_BB:BB[0-9]+_[0-9]+]] -; CHECK-NEXT: ; BB#2: +; CHECK-NEXT: ; %bb.2: ; CHECK-NEXT: exp null off, off, off, off done vm ; CHECK-NEXT: s_endpgm @@ -215,7 +215,7 @@ exit: ; CHECK: v_nop_e64 ; CHECK: v_cmpx_le_f32_e32 vcc, 0, v7 -; CHECK-NEXT: ; BB#3: +; CHECK-NEXT: ; %bb.3: ; CHECK: buffer_load_dword [[LOAD:v[0-9]+]] ; CHECK: v_cmp_eq_u32_e32 vcc, 0, [[LOAD]] ; CHECK-NEXT: s_and_b64 vcc, exec, vcc @@ -309,7 +309,7 @@ end: ; CHECK: [[SKIPKILL]]: ; CHECK: v_cmp_nge_f32_e32 vcc -; CHECK-NEXT: BB#3: ; %bb5 +; CHECK-NEXT: %bb.3: ; %bb5 ; CHECK-NEXT: .Lfunc_end{{[0-9]+}} define amdgpu_ps void @no_skip_no_successors(float inreg %arg, float inreg %arg1) #0 { bb: @@ -335,7 +335,7 @@ bb7: ; preds = %bb4 } ; CHECK-LABEL: {{^}}if_after_kill_block: -; CHECK: ; BB#0: +; CHECK: ; %bb.0: ; CHECK: s_and_saveexec_b64 ; CHECK: s_xor_b64 ; CHECK-NEXT: mask branch [[BB4:BB[0-9]+_[0-9]+]] diff --git a/test/CodeGen/AMDGPU/smrd.ll b/test/CodeGen/AMDGPU/smrd.ll index 6f4592cabee..9fd20fd67b8 100644 --- a/test/CodeGen/AMDGPU/smrd.ll +++ b/test/CodeGen/AMDGPU/smrd.ll @@ -193,7 +193,7 @@ main_body: } ; GCN-LABEL: {{^}}smrd_vgpr_offset_imm: -; GCN-NEXT: BB# +; GCN-NEXT: %bb. ; SICIVI-NEXT: buffer_load_dword v{{[0-9]}}, v0, s[0:3], 0 offen offset:4095 ; @@ -207,7 +207,7 @@ main_body: } ; GCN-LABEL: {{^}}smrd_vgpr_offset_imm_too_large: -; GCN-NEXT: BB# +; GCN-NEXT: %bb. ; GCN-NEXT: v_add_{{i|u}}32_e32 v0, {{(vcc, )?}}0x1000, v0 ; GCN-NEXT: buffer_load_dword v{{[0-9]}}, v0, s[0:3], 0 offen ; define amdgpu_ps float @smrd_vgpr_offset_imm_too_large(<4 x i32> inreg %desc, i32 %offset) #0 { @@ -218,7 +218,7 @@ main_body: } ; GCN-LABEL: {{^}}smrd_imm_merged: -; GCN-NEXT: BB# +; GCN-NEXT: %bb. ; SICI-NEXT: s_buffer_load_dwordx4 s[{{[0-9]}}:{{[0-9]}}], s[0:3], 0x1 ; SICI-NEXT: s_buffer_load_dwordx2 s[{{[0-9]}}:{{[0-9]}}], s[0:3], 0x7 ; VI-NEXT: s_buffer_load_dwordx4 s[{{[0-9]}}:{{[0-9]}}], s[0:3], 0x4 @@ -243,7 +243,7 @@ main_body: } ; GCN-LABEL: {{^}}smrd_vgpr_merged: -; GCN-NEXT: BB# +; GCN-NEXT: %bb. ; SICIVI-NEXT: buffer_load_dwordx4 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:4 ; SICIVI-NEXT: buffer_load_dwordx2 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:28 diff --git a/test/CodeGen/AMDGPU/uniform-cfg.ll b/test/CodeGen/AMDGPU/uniform-cfg.ll index 247b9691aff..a247d7a343f 100644 --- a/test/CodeGen/AMDGPU/uniform-cfg.ll +++ b/test/CodeGen/AMDGPU/uniform-cfg.ll @@ -401,7 +401,7 @@ exit: ; GCN: s_cmp_lt_i32 [[COND]], 1 ; GCN: s_cbranch_scc1 BB[[FNNUM:[0-9]+]]_3 -; GCN: BB#1: +; GCN: %bb.1: ; GCN-NOT: cmp ; GCN: buffer_load_dword ; GCN: buffer_store_dword diff --git a/test/CodeGen/AMDGPU/valu-i1.ll b/test/CodeGen/AMDGPU/valu-i1.ll index 3b0f003f52b..4a3937e44f3 100644 --- a/test/CodeGen/AMDGPU/valu-i1.ll +++ b/test/CodeGen/AMDGPU/valu-i1.ll @@ -192,7 +192,7 @@ exit: ; Load loop limit from buffer ; Branch to exit if uniformly not taken -; SI: ; BB#0: +; SI: ; %bb.0: ; SI: buffer_load_dword [[VBOUND:v[0-9]+]] ; SI: v_cmp_lt_i32_e32 vcc ; SI: s_and_saveexec_b64 [[OUTER_CMP_SREG:s\[[0-9]+:[0-9]+\]]], vcc diff --git a/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir b/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir index 54991d3d953..ff9826baf48 100644 --- a/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir +++ b/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir @@ -48,7 +48,7 @@ # CHECK-LABEL: name: vccz_corrupt_workaround # CHECK: %vcc = V_CMP_EQ_F32 # CHECK-NEXT: %vcc = S_MOV_B64 %vcc -# CHECK-NEXT: S_CBRANCH_VCCZ %bb.2.else, implicit killed %vcc +# CHECK-NEXT: S_CBRANCH_VCCZ %bb.2, implicit killed %vcc name: vccz_corrupt_workaround alignment: 0 @@ -82,7 +82,7 @@ body: | %sgpr7 = S_MOV_B32 61440 %sgpr6 = S_MOV_B32 -1 %vcc = V_CMP_EQ_F32_e64 0, 0, 0, %sgpr2, 0, implicit %exec - S_CBRANCH_VCCZ %bb.1.else, implicit killed %vcc + S_CBRANCH_VCCZ %bb.1, implicit killed %vcc bb.2.if: liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 @@ -90,7 +90,7 @@ body: | %vgpr0 = V_MOV_B32_e32 9, implicit %exec BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `i32 addrspace(1)* undef`) %vgpr0 = V_MOV_B32_e32 0, implicit %exec - S_BRANCH %bb.3.done + S_BRANCH %bb.3 bb.1.else: liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 @@ -111,7 +111,7 @@ body: | --- # CHECK-LABEL: name: vccz_corrupt_undef_vcc # CHECK: S_WAITCNT -# CHECK-NEXT: S_CBRANCH_VCCZ %bb.2.else, implicit undef %vcc +# CHECK-NEXT: S_CBRANCH_VCCZ %bb.2, implicit undef %vcc name: vccz_corrupt_undef_vcc alignment: 0 @@ -143,7 +143,7 @@ body: | %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed %sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) %sgpr7 = S_MOV_B32 61440 %sgpr6 = S_MOV_B32 -1 - S_CBRANCH_VCCZ %bb.1.else, implicit undef %vcc + S_CBRANCH_VCCZ %bb.1, implicit undef %vcc bb.2.if: liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 @@ -151,7 +151,7 @@ body: | %vgpr0 = V_MOV_B32_e32 9, implicit %exec BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `i32 addrspace(1)* undef`) %vgpr0 = V_MOV_B32_e32 0, implicit %exec - S_BRANCH %bb.3.done + S_BRANCH %bb.3 bb.1.else: liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 diff --git a/test/CodeGen/ARM/Windows/dbzchk.ll b/test/CodeGen/ARM/Windows/dbzchk.ll index afe30b28a27..18e6e528057 100644 --- a/test/CodeGen/ARM/Windows/dbzchk.ll +++ b/test/CodeGen/ARM/Windows/dbzchk.ll @@ -32,13 +32,13 @@ return: ret i32 %2 } -; CHECK-DIV-DAG: BB#0 -; CHECK-DIV-DAG: Successors according to CFG: BB#1({{.*}}) BB#2 -; CHECK-DIV-DAG: BB#1 -; CHECK-DIV-DAG: Successors according to CFG: BB#3 -; CHECK-DIV-DAG: BB#2 -; CHECK-DIV-DAG: Successors according to CFG: BB#3 -; CHECK-DIV-DAG: BB#3 +; CHECK-DIV-DAG: %bb.0 +; CHECK-DIV-DAG: Successors according to CFG: %bb.1({{.*}}) %bb.2 +; CHECK-DIV-DAG: %bb.1 +; CHECK-DIV-DAG: Successors according to CFG: %bb.3 +; CHECK-DIV-DAG: %bb.2 +; CHECK-DIV-DAG: Successors according to CFG: %bb.3 +; CHECK-DIV-DAG: %bb.3 ; RUN: llc -mtriple thumbv7--windows-itanium -print-machineinstrs=expand-isel-pseudos -verify-machineinstrs -o /dev/null %s 2>&1 | FileCheck %s -check-prefix CHECK-MOD @@ -66,13 +66,13 @@ return: ret i32 %retval.0 } -; CHECK-MOD-DAG: BB#0 -; CHECK-MOD-DAG: Successors according to CFG: BB#2({{.*}}) BB#1 -; CHECK-MOD-DAG: BB#1 -; CHECK-MOD-DAG: Successors according to CFG: BB#3 -; CHECK-MOD-DAG: BB#3 -; CHECK-MOD-DAG: Successors according to CFG: BB#2 -; CHECK-MOD-DAG: BB#2 +; CHECK-MOD-DAG: %bb.0 +; CHECK-MOD-DAG: Successors according to CFG: %bb.2({{.*}}) %bb.1 +; CHECK-MOD-DAG: %bb.1 +; CHECK-MOD-DAG: Successors according to CFG: %bb.3 +; CHECK-MOD-DAG: %bb.3 +; CHECK-MOD-DAG: Successors according to CFG: %bb.2 +; CHECK-MOD-DAG: %bb.2 ; RUN: llc -mtriple thumbv7--windows-itanium -print-machineinstrs=expand-isel-pseudos -verify-machineinstrs -filetype asm -o /dev/null %s 2>&1 | FileCheck %s -check-prefix CHECK-CFG ; RUN: llc -mtriple thumbv7--windows-itanium -verify-machineinstrs -filetype asm -o - %s | FileCheck %s -check-prefix CHECK-CFG-ASM @@ -111,23 +111,23 @@ if.end: attributes #0 = { optsize } -; CHECK-CFG-DAG: BB#0 -; CHECK-CFG-DAG: t2Bcc -; CHECK-CFG-DAG: t2B +; CHECK-CFG-DAG: %bb.0 +; CHECK-CFG-DAG: t2Bcc %bb.2 +; CHECK-CFG-DAG: t2B %bb.1 -; CHECK-CFG-DAG: BB#1 -; CHECK-CFG-DAG: t2B +; CHECK-CFG-DAG: %bb.1 +; CHECK-CFG-DAG: t2B %bb.3 -; CHECK-CFG-DAG: BB#2 +; CHECK-CFG-DAG: %bb.2 ; CHECK-CFG-DAG: tCMPi8 %{{[0-9]}}, 0 -; CHECK-CFG-DAG: t2Bcc +; CHECK-CFG-DAG: t2Bcc %bb.5 -; CHECK-CFG-DAG: BB#4 +; CHECK-CFG-DAG: %bb.4 -; CHECK-CFG-DAG: BB#3 +; CHECK-CFG-DAG: %bb.3 ; CHECK-CFG-DAG: tBX_RET -; CHECK-CFG-DAG: BB#5 +; CHECK-CFG-DAG: %bb.5 ; CHECK-CFG-DAG: t__brkdiv0 ; CHECK-CFG-ASM-LABEL: h: diff --git a/test/CodeGen/ARM/and-load-combine.ll b/test/CodeGen/ARM/and-load-combine.ll index f4ea7ebcf62..6f0c1235959 100644 --- a/test/CodeGen/ARM/and-load-combine.ll +++ b/test/CodeGen/ARM/and-load-combine.ll @@ -6,7 +6,7 @@ define arm_aapcscc zeroext i1 @cmp_xor8_short_short(i16* nocapture readonly %a, ; ARM-LABEL: cmp_xor8_short_short: -; ARM: @ BB#0: @ %entry +; ARM: @ %bb.0: @ %entry ; ARM-NEXT: ldrh r0, [r0] ; ARM-NEXT: ldrh r1, [r1] ; ARM-NEXT: eor r1, r1, r0 @@ -16,7 +16,7 @@ define arm_aapcscc zeroext i1 @cmp_xor8_short_short(i16* nocapture readonly %a, ; ARM-NEXT: bx lr ; ; ARMEB-LABEL: cmp_xor8_short_short: -; ARMEB: @ BB#0: @ %entry +; ARMEB: @ %bb.0: @ %entry ; ARMEB-NEXT: ldrh r0, [r0] ; ARMEB-NEXT: ldrh r1, [r1] ; ARMEB-NEXT: eor r1, r1, r0 @@ -26,7 +26,7 @@ define arm_aapcscc zeroext i1 @cmp_xor8_short_short(i16* nocapture readonly %a, ; ARMEB-NEXT: bx lr ; ; THUMB1-LABEL: cmp_xor8_short_short: -; THUMB1: @ BB#0: @ %entry +; THUMB1: @ %bb.0: @ %entry ; THUMB1-NEXT: ldrh r0, [r0] ; THUMB1-NEXT: ldrh r2, [r1] ; THUMB1-NEXT: eors r2, r0 @@ -34,13 +34,13 @@ define arm_aapcscc zeroext i1 @cmp_xor8_short_short(i16* nocapture readonly %a, ; THUMB1-NEXT: movs r1, #0 ; THUMB1-NEXT: lsls r2, r2, #24 ; THUMB1-NEXT: beq .LBB0_2 -; THUMB1-NEXT: @ BB#1: @ %entry +; THUMB1-NEXT: @ %bb.1: @ %entry ; THUMB1-NEXT: mov r0, r1 ; THUMB1-NEXT: .LBB0_2: @ %entry ; THUMB1-NEXT: bx lr ; ; THUMB2-LABEL: cmp_xor8_short_short: -; THUMB2: @ BB#0: @ %entry +; THUMB2: @ %bb.0: @ %entry ; THUMB2-NEXT: ldrh r0, [r0] ; THUMB2-NEXT: ldrh r1, [r1] ; THUMB2-NEXT: eors r0, r1 @@ -61,7 +61,7 @@ entry: define arm_aapcscc zeroext i1 @cmp_xor8_short_int(i16* nocapture readonly %a, ; ARM-LABEL: cmp_xor8_short_int: -; ARM: @ BB#0: @ %entry +; ARM: @ %bb.0: @ %entry ; ARM-NEXT: ldrh r0, [r0] ; ARM-NEXT: ldr r1, [r1] ; ARM-NEXT: eor r1, r1, r0 @@ -71,7 +71,7 @@ define arm_aapcscc zeroext i1 @cmp_xor8_short_int(i16* nocapture readonly %a, ; ARM-NEXT: bx lr ; ; ARMEB-LABEL: cmp_xor8_short_int: -; ARMEB: @ BB#0: @ %entry +; ARMEB: @ %bb.0: @ %entry ; ARMEB-NEXT: ldrh r0, [r0] ; ARMEB-NEXT: ldr r1, [r1] ; ARMEB-NEXT: eor r1, r1, r0 @@ -81,7 +81,7 @@ define arm_aapcscc zeroext i1 @cmp_xor8_short_int(i16* nocapture readonly %a, ; ARMEB-NEXT: bx lr ; ; THUMB1-LABEL: cmp_xor8_short_int: -; THUMB1: @ BB#0: @ %entry +; THUMB1: @ %bb.0: @ %entry ; THUMB1-NEXT: ldrh r0, [r0] ; THUMB1-NEXT: ldr r2, [r1] ; THUMB1-NEXT: eors r2, r0 @@ -89,13 +89,13 @@ define arm_aapcscc zeroext i1 @cmp_xor8_short_int(i16* nocapture readonly %a, ; THUMB1-NEXT: movs r1, #0 ; THUMB1-NEXT: lsls r2, r2, #24 ; THUMB1-NEXT: beq .LBB1_2 -; THUMB1-NEXT: @ BB#1: @ %entry +; THUMB1-NEXT: @ %bb.1: @ %entry ; THUMB1-NEXT: mov r0, r1 ; THUMB1-NEXT: .LBB1_2: @ %entry ; THUMB1-NEXT: bx lr ; ; THUMB2-LABEL: cmp_xor8_short_int: -; THUMB2: @ BB#0: @ %entry +; THUMB2: @ %bb.0: @ %entry ; THUMB2-NEXT: ldrh r0, [r0] ; THUMB2-NEXT: ldr r1, [r1] ; THUMB2-NEXT: eors r0, r1 @@ -117,7 +117,7 @@ entry: define arm_aapcscc zeroext i1 @cmp_xor8_int_int(i32* nocapture readonly %a, ; ARM-LABEL: cmp_xor8_int_int: -; ARM: @ BB#0: @ %entry +; ARM: @ %bb.0: @ %entry ; ARM-NEXT: ldr r0, [r0] ; ARM-NEXT: ldr r1, [r1] ; ARM-NEXT: eor r1, r1, r0 @@ -127,7 +127,7 @@ define arm_aapcscc zeroext i1 @cmp_xor8_int_int(i32* nocapture readonly %a, ; ARM-NEXT: bx lr ; ; ARMEB-LABEL: cmp_xor8_int_int: -; ARMEB: @ BB#0: @ %entry +; ARMEB: @ %bb.0: @ %entry ; ARMEB-NEXT: ldr r0, [r0] ; ARMEB-NEXT: ldr r1, [r1] ; ARMEB-NEXT: eor r1, r1, r0 @@ -137,7 +137,7 @@ define arm_aapcscc zeroext i1 @cmp_xor8_int_int(i32* nocapture readonly %a, ; ARMEB-NEXT: bx lr ; ; THUMB1-LABEL: cmp_xor8_int_int: -; THUMB1: @ BB#0: @ %entry +; THUMB1: @ %bb.0: @ %entry ; THUMB1-NEXT: ldr r0, [r0] ; THUMB1-NEXT: ldr r2, [r1] ; THUMB1-NEXT: eors r2, r0 @@ -145,13 +145,13 @@ define arm_aapcscc zeroext i1 @cmp_xor8_int_int(i32* nocapture readonly %a, ; THUMB1-NEXT: movs r1, #0 ; THUMB1-NEXT: lsls r2, r2, #24 ; THUMB1-NEXT: beq .LBB2_2 -; THUMB1-NEXT: @ BB#1: @ %entry +; THUMB1-NEXT: @ %bb.1: @ %entry ; THUMB1-NEXT: mov r0, r1 ; THUMB1-NEXT: .LBB2_2: @ %entry ; THUMB1-NEXT: bx lr ; ; THUMB2-LABEL: cmp_xor8_int_int: -; THUMB2: @ BB#0: @ %entry +; THUMB2: @ %bb.0: @ %entry ; THUMB2-NEXT: ldr r0, [r0] ; THUMB2-NEXT: ldr r1, [r1] ; THUMB2-NEXT: eors r0, r1 @@ -172,7 +172,7 @@ entry: define arm_aapcscc zeroext i1 @cmp_xor16(i32* nocapture readonly %a, ; ARM-LABEL: cmp_xor16: -; ARM: @ BB#0: @ %entry +; ARM: @ %bb.0: @ %entry ; ARM-NEXT: ldr r0, [r0] ; ARM-NEXT: movw r2, #65535 ; ARM-NEXT: ldr r1, [r1] @@ -183,7 +183,7 @@ define arm_aapcscc zeroext i1 @cmp_xor16(i32* nocapture readonly %a, ; ARM-NEXT: bx lr ; ; ARMEB-LABEL: cmp_xor16: -; ARMEB: @ BB#0: @ %entry +; ARMEB: @ %bb.0: @ %entry ; ARMEB-NEXT: ldr r0, [r0] ; ARMEB-NEXT: movw r2, #65535 ; ARMEB-NEXT: ldr r1, [r1] @@ -194,7 +194,7 @@ define arm_aapcscc zeroext i1 @cmp_xor16(i32* nocapture readonly %a, ; ARMEB-NEXT: bx lr ; ; THUMB1-LABEL: cmp_xor16: -; THUMB1: @ BB#0: @ %entry +; THUMB1: @ %bb.0: @ %entry ; THUMB1-NEXT: ldr r0, [r0] ; THUMB1-NEXT: ldr r2, [r1] ; THUMB1-NEXT: eors r2, r0 @@ -202,13 +202,13 @@ define arm_aapcscc zeroext i1 @cmp_xor16(i32* nocapture readonly %a, ; THUMB1-NEXT: movs r1, #0 ; THUMB1-NEXT: lsls r2, r2, #16 ; THUMB1-NEXT: beq .LBB3_2 -; THUMB1-NEXT: @ BB#1: @ %entry +; THUMB1-NEXT: @ %bb.1: @ %entry ; THUMB1-NEXT: mov r0, r1 ; THUMB1-NEXT: .LBB3_2: @ %entry ; THUMB1-NEXT: bx lr ; ; THUMB2-LABEL: cmp_xor16: -; THUMB2: @ BB#0: @ %entry +; THUMB2: @ %bb.0: @ %entry ; THUMB2-NEXT: ldr r0, [r0] ; THUMB2-NEXT: ldr r1, [r1] ; THUMB2-NEXT: eors r0, r1 @@ -229,7 +229,7 @@ entry: define arm_aapcscc zeroext i1 @cmp_or8_short_short(i16* nocapture readonly %a, ; ARM-LABEL: cmp_or8_short_short: -; ARM: @ BB#0: @ %entry +; ARM: @ %bb.0: @ %entry ; ARM-NEXT: ldrh r0, [r0] ; ARM-NEXT: ldrh r1, [r1] ; ARM-NEXT: orr r1, r1, r0 @@ -239,7 +239,7 @@ define arm_aapcscc zeroext i1 @cmp_or8_short_short(i16* nocapture readonly %a, ; ARM-NEXT: bx lr ; ; ARMEB-LABEL: cmp_or8_short_short: -; ARMEB: @ BB#0: @ %entry +; ARMEB: @ %bb.0: @ %entry ; ARMEB-NEXT: ldrh r0, [r0] ; ARMEB-NEXT: ldrh r1, [r1] ; ARMEB-NEXT: orr r1, r1, r0 @@ -249,7 +249,7 @@ define arm_aapcscc zeroext i1 @cmp_or8_short_short(i16* nocapture readonly %a, ; ARMEB-NEXT: bx lr ; ; THUMB1-LABEL: cmp_or8_short_short: -; THUMB1: @ BB#0: @ %entry +; THUMB1: @ %bb.0: @ %entry ; THUMB1-NEXT: ldrh r0, [r0] ; THUMB1-NEXT: ldrh r2, [r1] ; THUMB1-NEXT: orrs r2, r0 @@ -257,13 +257,13 @@ define arm_aapcscc zeroext i1 @cmp_or8_short_short(i16* nocapture readonly %a, ; THUMB1-NEXT: movs r1, #0 ; THUMB1-NEXT: lsls r2, r2, #24 ; THUMB1-NEXT: beq .LBB4_2 -; THUMB1-NEXT: @ BB#1: @ %entry +; THUMB1-NEXT: @ %bb.1: @ %entry ; THUMB1-NEXT: mov r0, r1 ; THUMB1-NEXT: .LBB4_2: @ %entry ; THUMB1-NEXT: bx lr ; ; THUMB2-LABEL: cmp_or8_short_short: -; THUMB2: @ BB#0: @ %entry +; THUMB2: @ %bb.0: @ %entry ; THUMB2-NEXT: ldrh r0, [r0] ; THUMB2-NEXT: ldrh r1, [r1] ; THUMB2-NEXT: orrs r0, r1 @@ -284,7 +284,7 @@ entry: define arm_aapcscc zeroext i1 @cmp_or8_short_int(i16* nocapture readonly %a, ; ARM-LABEL: cmp_or8_short_int: -; ARM: @ BB#0: @ %entry +; ARM: @ %bb.0: @ %entry ; ARM-NEXT: ldrh r0, [r0] ; ARM-NEXT: ldr r1, [r1] ; ARM-NEXT: orr r1, r1, r0 @@ -294,7 +294,7 @@ define arm_aapcscc zeroext i1 @cmp_or8_short_int(i16* nocapture readonly %a, ; ARM-NEXT: bx lr ; ; ARMEB-LABEL: cmp_or8_short_int: -; ARMEB: @ BB#0: @ %entry +; ARMEB: @ %bb.0: @ %entry ; ARMEB-NEXT: ldrh r0, [r0] ; ARMEB-NEXT: ldr r1, [r1] ; ARMEB-NEXT: orr r1, r1, r0 @@ -304,7 +304,7 @@ define arm_aapcscc zeroext i1 @cmp_or8_short_int(i16* nocapture readonly %a, ; ARMEB-NEXT: bx lr ; ; THUMB1-LABEL: cmp_or8_short_int: -; THUMB1: @ BB#0: @ %entry +; THUMB1: @ %bb.0: @ %entry ; THUMB1-NEXT: ldrh r0, [r0] ; THUMB1-NEXT: ldr r2, [r1] ; THUMB1-NEXT: orrs r2, r0 @@ -312,13 +312,13 @@ define arm_aapcscc zeroext i1 @cmp_or8_short_int(i16* nocapture readonly %a, ; THUMB1-NEXT: movs r1, #0 ; THUMB1-NEXT: lsls r2, r2, #24 ; THUMB1-NEXT: beq .LBB5_2 -; THUMB1-NEXT: @ BB#1: @ %entry +; THUMB1-NEXT: @ %bb.1: @ %entry ; THUMB1-NEXT: mov r0, r1 ; THUMB1-NEXT: .LBB5_2: @ %entry ; THUMB1-NEXT: bx lr ; ; THUMB2-LABEL: cmp_or8_short_int: -; THUMB2: @ BB#0: @ %entry +; THUMB2: @ %bb.0: @ %entry ; THUMB2-NEXT: ldrh r0, [r0] ; THUMB2-NEXT: ldr r1, [r1] ; THUMB2-NEXT: orrs r0, r1 @@ -340,7 +340,7 @@ entry: define arm_aapcscc zeroext i1 @cmp_or8_int_int(i32* nocapture readonly %a, ; ARM-LABEL: cmp_or8_int_int: -; ARM: @ BB#0: @ %entry +; ARM: @ %bb.0: @ %entry ; ARM-NEXT: ldr r0, [r0] ; ARM-NEXT: ldr r1, [r1] ; ARM-NEXT: orr r1, r1, r0 @@ -350,7 +350,7 @@ define arm_aapcscc zeroext i1 @cmp_or8_int_int(i32* nocapture readonly %a, ; ARM-NEXT: bx lr ; ; ARMEB-LABEL: cmp_or8_int_int: -; ARMEB: @ BB#0: @ %entry +; ARMEB: @ %bb.0: @ %entry ; ARMEB-NEXT: ldr r0, [r0] ; ARMEB-NEXT: ldr r1, [r1] ; ARMEB-NEXT: orr r1, r1, r0 @@ -360,7 +360,7 @@ define arm_aapcscc zeroext i1 @cmp_or8_int_int(i32* nocapture readonly %a, ; ARMEB-NEXT: bx lr ; ; THUMB1-LABEL: cmp_or8_int_int: -; THUMB1: @ BB#0: @ %entry +; THUMB1: @ %bb.0: @ %entry ; THUMB1-NEXT: ldr r0, [r0] ; THUMB1-NEXT: ldr r2, [r1] ; THUMB1-NEXT: orrs r2, r0 @@ -368,13 +368,13 @@ define arm_aapcscc zeroext i1 @cmp_or8_int_int(i32* nocapture readonly %a, ; THUMB1-NEXT: movs r1, #0 ; THUMB1-NEXT: lsls r2, r2, #24 ; THUMB1-NEXT: beq .LBB6_2 -; THUMB1-NEXT: @ BB#1: @ %entry +; THUMB1-NEXT: @ %bb.1: @ %entry ; THUMB1-NEXT: mov r0, r1 ; THUMB1-NEXT: .LBB6_2: @ %entry ; THUMB1-NEXT: bx lr ; ; THUMB2-LABEL: cmp_or8_int_int: -; THUMB2: @ BB#0: @ %entry +; THUMB2: @ %bb.0: @ %entry ; THUMB2-NEXT: ldr r0, [r0] ; THUMB2-NEXT: ldr r1, [r1] ; THUMB2-NEXT: orrs r0, r1 @@ -395,7 +395,7 @@ entry: define arm_aapcscc zeroext i1 @cmp_or16(i32* nocapture readonly %a, ; ARM-LABEL: cmp_or16: -; ARM: @ BB#0: @ %entry +; ARM: @ %bb.0: @ %entry ; ARM-NEXT: ldr r0, [r0] ; ARM-NEXT: movw r2, #65535 ; ARM-NEXT: ldr r1, [r1] @@ -406,7 +406,7 @@ define arm_aapcscc zeroext i1 @cmp_or16(i32* nocapture readonly %a, ; ARM-NEXT: bx lr ; ; ARMEB-LABEL: cmp_or16: -; ARMEB: @ BB#0: @ %entry +; ARMEB: @ %bb.0: @ %entry ; ARMEB-NEXT: ldr r0, [r0] ; ARMEB-NEXT: movw r2, #65535 ; ARMEB-NEXT: ldr r1, [r1] @@ -417,7 +417,7 @@ define arm_aapcscc zeroext i1 @cmp_or16(i32* nocapture readonly %a, ; ARMEB-NEXT: bx lr ; ; THUMB1-LABEL: cmp_or16: -; THUMB1: @ BB#0: @ %entry +; THUMB1: @ %bb.0: @ %entry ; THUMB1-NEXT: ldr r0, [r0] ; THUMB1-NEXT: ldr r2, [r1] ; THUMB1-NEXT: orrs r2, r0 @@ -425,13 +425,13 @@ define arm_aapcscc zeroext i1 @cmp_or16(i32* nocapture readonly %a, ; THUMB1-NEXT: movs r1, #0 ; THUMB1-NEXT: lsls r2, r2, #16 ; THUMB1-NEXT: beq .LBB7_2 -; THUMB1-NEXT: @ BB#1: @ %entry +; THUMB1-NEXT: @ %bb.1: @ %entry ; THUMB1-NEXT: mov r0, r1 ; THUMB1-NEXT: .LBB7_2: @ %entry ; THUMB1-NEXT: bx lr ; ; THUMB2-LABEL: cmp_or16: -; THUMB2: @ BB#0: @ %entry +; THUMB2: @ %bb.0: @ %entry ; THUMB2-NEXT: ldr r0, [r0] ; THUMB2-NEXT: ldr r1, [r1] ; THUMB2-NEXT: orrs r0, r1 @@ -452,7 +452,7 @@ entry: define arm_aapcscc zeroext i1 @cmp_and8_short_short(i16* nocapture readonly %a, ; ARM-LABEL: cmp_and8_short_short: -; ARM: @ BB#0: @ %entry +; ARM: @ %bb.0: @ %entry ; ARM-NEXT: ldrh r1, [r1] ; ARM-NEXT: ldrh r0, [r0] ; ARM-NEXT: and r1, r0, r1 @@ -462,7 +462,7 @@ define arm_aapcscc zeroext i1 @cmp_and8_short_short(i16* nocapture readonly %a, ; ARM-NEXT: bx lr ; ; ARMEB-LABEL: cmp_and8_short_short: -; ARMEB: @ BB#0: @ %entry +; ARMEB: @ %bb.0: @ %entry ; ARMEB-NEXT: ldrh r1, [r1] ; ARMEB-NEXT: ldrh r0, [r0] ; ARMEB-NEXT: and r1, r0, r1 @@ -472,7 +472,7 @@ define arm_aapcscc zeroext i1 @cmp_and8_short_short(i16* nocapture readonly %a, ; ARMEB-NEXT: bx lr ; ; THUMB1-LABEL: cmp_and8_short_short: -; THUMB1: @ BB#0: @ %entry +; THUMB1: @ %bb.0: @ %entry ; THUMB1-NEXT: ldrh r1, [r1] ; THUMB1-NEXT: ldrh r2, [r0] ; THUMB1-NEXT: ands r2, r1 @@ -480,13 +480,13 @@ define arm_aapcscc zeroext i1 @cmp_and8_short_short(i16* nocapture readonly %a, ; THUMB1-NEXT: movs r1, #0 ; THUMB1-NEXT: lsls r2, r2, #24 ; THUMB1-NEXT: beq .LBB8_2 -; THUMB1-NEXT: @ BB#1: @ %entry +; THUMB1-NEXT: @ %bb.1: @ %entry ; THUMB1-NEXT: mov r0, r1 ; THUMB1-NEXT: .LBB8_2: @ %entry ; THUMB1-NEXT: bx lr ; ; THUMB2-LABEL: cmp_and8_short_short: -; THUMB2: @ BB#0: @ %entry +; THUMB2: @ %bb.0: @ %entry ; THUMB2-NEXT: ldrh r1, [r1] ; THUMB2-NEXT: ldrh r0, [r0] ; THUMB2-NEXT: ands r0, r1 @@ -507,7 +507,7 @@ entry: define arm_aapcscc zeroext i1 @cmp_and8_short_int(i16* nocapture readonly %a, ; ARM-LABEL: cmp_and8_short_int: -; ARM: @ BB#0: @ %entry +; ARM: @ %bb.0: @ %entry ; ARM-NEXT: ldrh r0, [r0] ; ARM-NEXT: ldr r1, [r1] ; ARM-NEXT: and r1, r1, r0 @@ -517,7 +517,7 @@ define arm_aapcscc zeroext i1 @cmp_and8_short_int(i16* nocapture readonly %a, ; ARM-NEXT: bx lr ; ; ARMEB-LABEL: cmp_and8_short_int: -; ARMEB: @ BB#0: @ %entry +; ARMEB: @ %bb.0: @ %entry ; ARMEB-NEXT: ldrh r0, [r0] ; ARMEB-NEXT: ldr r1, [r1] ; ARMEB-NEXT: and r1, r1, r0 @@ -527,7 +527,7 @@ define arm_aapcscc zeroext i1 @cmp_and8_short_int(i16* nocapture readonly %a, ; ARMEB-NEXT: bx lr ; ; THUMB1-LABEL: cmp_and8_short_int: -; THUMB1: @ BB#0: @ %entry +; THUMB1: @ %bb.0: @ %entry ; THUMB1-NEXT: ldrh r0, [r0] ; THUMB1-NEXT: ldr r2, [r1] ; THUMB1-NEXT: ands r2, r0 @@ -535,13 +535,13 @@ define arm_aapcscc zeroext i1 @cmp_and8_short_int(i16* nocapture readonly %a, ; THUMB1-NEXT: movs r1, #0 ; THUMB1-NEXT: lsls r2, r2, #24 ; THUMB1-NEXT: beq .LBB9_2 -; THUMB1-NEXT: @ BB#1: @ %entry +; THUMB1-NEXT: @ %bb.1: @ %entry ; THUMB1-NEXT: mov r0, r1 ; THUMB1-NEXT: .LBB9_2: @ %entry ; THUMB1-NEXT: bx lr ; ; THUMB2-LABEL: cmp_and8_short_int: -; THUMB2: @ BB#0: @ %entry +; THUMB2: @ %bb.0: @ %entry ; THUMB2-NEXT: ldrh r0, [r0] ; THUMB2-NEXT: ldr r1, [r1] ; THUMB2-NEXT: ands r0, r1 @@ -563,7 +563,7 @@ entry: define arm_aapcscc zeroext i1 @cmp_and8_int_int(i32* nocapture readonly %a, ; ARM-LABEL: cmp_and8_int_int: -; ARM: @ BB#0: @ %entry +; ARM: @ %bb.0: @ %entry ; ARM-NEXT: ldr r1, [r1] ; ARM-NEXT: ldr r0, [r0] ; ARM-NEXT: and r1, r0, r1 @@ -573,7 +573,7 @@ define arm_aapcscc zeroext i1 @cmp_and8_int_int(i32* nocapture readonly %a, ; ARM-NEXT: bx lr ; ; ARMEB-LABEL: cmp_and8_int_int: -; ARMEB: @ BB#0: @ %entry +; ARMEB: @ %bb.0: @ %entry ; ARMEB-NEXT: ldr r1, [r1] ; ARMEB-NEXT: ldr r0, [r0] ; ARMEB-NEXT: and r1, r0, r1 @@ -583,7 +583,7 @@ define arm_aapcscc zeroext i1 @cmp_and8_int_int(i32* nocapture readonly %a, ; ARMEB-NEXT: bx lr ; ; THUMB1-LABEL: cmp_and8_int_int: -; THUMB1: @ BB#0: @ %entry +; THUMB1: @ %bb.0: @ %entry ; THUMB1-NEXT: ldr r1, [r1] ; THUMB1-NEXT: ldr r2, [r0] ; THUMB1-NEXT: ands r2, r1 @@ -591,13 +591,13 @@ define arm_aapcscc zeroext i1 @cmp_and8_int_int(i32* nocapture readonly %a, ; THUMB1-NEXT: movs r1, #0 ; THUMB1-NEXT: lsls r2, r2, #24 ; THUMB1-NEXT: beq .LBB10_2 -; THUMB1-NEXT: @ BB#1: @ %entry +; THUMB1-NEXT: @ %bb.1: @ %entry ; THUMB1-NEXT: mov r0, r1 ; THUMB1-NEXT: .LBB10_2: @ %entry ; THUMB1-NEXT: bx lr ; ; THUMB2-LABEL: cmp_and8_int_int: -; THUMB2: @ BB#0: @ %entry +; THUMB2: @ %bb.0: @ %entry ; THUMB2-NEXT: ldr r1, [r1] ; THUMB2-NEXT: ldr r0, [r0] ; THUMB2-NEXT: ands r0, r1 @@ -618,7 +618,7 @@ entry: define arm_aapcscc zeroext i1 @cmp_and16(i32* nocapture readonly %a, ; ARM-LABEL: cmp_and16: -; ARM: @ BB#0: @ %entry +; ARM: @ %bb.0: @ %entry ; ARM-NEXT: ldr r1, [r1] ; ARM-NEXT: movw r2, #65535 ; ARM-NEXT: ldr r0, [r0] @@ -629,7 +629,7 @@ define arm_aapcscc zeroext i1 @cmp_and16(i32* nocapture readonly %a, ; ARM-NEXT: bx lr ; ; ARMEB-LABEL: cmp_and16: -; ARMEB: @ BB#0: @ %entry +; ARMEB: @ %bb.0: @ %entry ; ARMEB-NEXT: ldr r1, [r1] ; ARMEB-NEXT: movw r2, #65535 ; ARMEB-NEXT: ldr r0, [r0] @@ -640,7 +640,7 @@ define arm_aapcscc zeroext i1 @cmp_and16(i32* nocapture readonly %a, ; ARMEB-NEXT: bx lr ; ; THUMB1-LABEL: cmp_and16: -; THUMB1: @ BB#0: @ %entry +; THUMB1: @ %bb.0: @ %entry ; THUMB1-NEXT: ldr r1, [r1] ; THUMB1-NEXT: ldr r2, [r0] ; THUMB1-NEXT: ands r2, r1 @@ -648,13 +648,13 @@ define arm_aapcscc zeroext i1 @cmp_and16(i32* nocapture readonly %a, ; THUMB1-NEXT: movs r1, #0 ; THUMB1-NEXT: lsls r2, r2, #16 ; THUMB1-NEXT: beq .LBB11_2 -; THUMB1-NEXT: @ BB#1: @ %entry +; THUMB1-NEXT: @ %bb.1: @ %entry ; THUMB1-NEXT: mov r0, r1 ; THUMB1-NEXT: .LBB11_2: @ %entry ; THUMB1-NEXT: bx lr ; ; THUMB2-LABEL: cmp_and16: -; THUMB2: @ BB#0: @ %entry +; THUMB2: @ %bb.0: @ %entry ; THUMB2-NEXT: ldr r1, [r1] ; THUMB2-NEXT: ldr r0, [r0] ; THUMB2-NEXT: ands r0, r1 @@ -675,7 +675,7 @@ entry: define arm_aapcscc i32 @add_and16(i32* nocapture readonly %a, i32 %y, i32 %z) { ; ARM-LABEL: add_and16: -; ARM: @ BB#0: @ %entry +; ARM: @ %bb.0: @ %entry ; ARM-NEXT: ldr r0, [r0] ; ARM-NEXT: add r1, r1, r2 ; ARM-NEXT: orr r0, r0, r1 @@ -683,7 +683,7 @@ define arm_aapcscc i32 @add_and16(i32* nocapture readonly %a, i32 %y, i32 %z) { ; ARM-NEXT: bx lr ; ; ARMEB-LABEL: add_and16: -; ARMEB: @ BB#0: @ %entry +; ARMEB: @ %bb.0: @ %entry ; ARMEB-NEXT: ldr r0, [r0] ; ARMEB-NEXT: add r1, r1, r2 ; ARMEB-NEXT: orr r0, r0, r1 @@ -691,7 +691,7 @@ define arm_aapcscc i32 @add_and16(i32* nocapture readonly %a, i32 %y, i32 %z) { ; ARMEB-NEXT: bx lr ; ; THUMB1-LABEL: add_and16: -; THUMB1: @ BB#0: @ %entry +; THUMB1: @ %bb.0: @ %entry ; THUMB1-NEXT: adds r1, r1, r2 ; THUMB1-NEXT: ldr r0, [r0] ; THUMB1-NEXT: orrs r0, r1 @@ -699,7 +699,7 @@ define arm_aapcscc i32 @add_and16(i32* nocapture readonly %a, i32 %y, i32 %z) { ; THUMB1-NEXT: bx lr ; ; THUMB2-LABEL: add_and16: -; THUMB2: @ BB#0: @ %entry +; THUMB2: @ %bb.0: @ %entry ; THUMB2-NEXT: ldr r0, [r0] ; THUMB2-NEXT: add r1, r2 ; THUMB2-NEXT: orrs r0, r1 @@ -715,7 +715,7 @@ entry: define arm_aapcscc i32 @test1(i32* %a, i32* %b, i32 %x, i32 %y) { ; ARM-LABEL: test1: -; ARM: @ BB#0: @ %entry +; ARM: @ %bb.0: @ %entry ; ARM-NEXT: mul r2, r2, r3 ; ARM-NEXT: ldr r1, [r1] ; ARM-NEXT: ldr r0, [r0] @@ -725,7 +725,7 @@ define arm_aapcscc i32 @test1(i32* %a, i32* %b, i32 %x, i32 %y) { ; ARM-NEXT: bx lr ; ; ARMEB-LABEL: test1: -; ARMEB: @ BB#0: @ %entry +; ARMEB: @ %bb.0: @ %entry ; ARMEB-NEXT: mul r2, r2, r3 ; ARMEB-NEXT: ldr r1, [r1] ; ARMEB-NEXT: ldr r0, [r0] @@ -735,7 +735,7 @@ define arm_aapcscc i32 @test1(i32* %a, i32* %b, i32 %x, i32 %y) { ; ARMEB-NEXT: bx lr ; ; THUMB1-LABEL: test1: -; THUMB1: @ BB#0: @ %entry +; THUMB1: @ %bb.0: @ %entry ; THUMB1-NEXT: muls r2, r3, r2 ; THUMB1-NEXT: ldr r1, [r1] ; THUMB1-NEXT: ldr r0, [r0] @@ -745,7 +745,7 @@ define arm_aapcscc i32 @test1(i32* %a, i32* %b, i32 %x, i32 %y) { ; THUMB1-NEXT: bx lr ; ; THUMB2-LABEL: test1: -; THUMB2: @ BB#0: @ %entry +; THUMB2: @ %bb.0: @ %entry ; THUMB2-NEXT: muls r2, r3, r2 ; THUMB2-NEXT: ldr r1, [r1] ; THUMB2-NEXT: ldr r0, [r0] @@ -765,7 +765,7 @@ entry: define arm_aapcscc i32 @test2(i32* %a, i32* %b, i32 %x, i32 %y) { ; ARM-LABEL: test2: -; ARM: @ BB#0: @ %entry +; ARM: @ %bb.0: @ %entry ; ARM-NEXT: ldr r1, [r1] ; ARM-NEXT: ldr r0, [r0] ; ARM-NEXT: mul r1, r2, r1 @@ -775,7 +775,7 @@ define arm_aapcscc i32 @test2(i32* %a, i32* %b, i32 %x, i32 %y) { ; ARM-NEXT: bx lr ; ; ARMEB-LABEL: test2: -; ARMEB: @ BB#0: @ %entry +; ARMEB: @ %bb.0: @ %entry ; ARMEB-NEXT: ldr r1, [r1] ; ARMEB-NEXT: ldr r0, [r0] ; ARMEB-NEXT: mul r1, r2, r1 @@ -785,7 +785,7 @@ define arm_aapcscc i32 @test2(i32* %a, i32* %b, i32 %x, i32 %y) { ; ARMEB-NEXT: bx lr ; ; THUMB1-LABEL: test2: -; THUMB1: @ BB#0: @ %entry +; THUMB1: @ %bb.0: @ %entry ; THUMB1-NEXT: ldr r1, [r1] ; THUMB1-NEXT: muls r1, r2, r1 ; THUMB1-NEXT: ldr r0, [r0] @@ -795,7 +795,7 @@ define arm_aapcscc i32 @test2(i32* %a, i32* %b, i32 %x, i32 %y) { ; THUMB1-NEXT: bx lr ; ; THUMB2-LABEL: test2: -; THUMB2: @ BB#0: @ %entry +; THUMB2: @ %bb.0: @ %entry ; THUMB2-NEXT: ldr r1, [r1] ; THUMB2-NEXT: ldr r0, [r0] ; THUMB2-NEXT: muls r1, r2, r1 @@ -815,7 +815,7 @@ entry: define arm_aapcscc i32 @test3(i32* %a, i32* %b, i32 %x, i16* %y) { ; ARM-LABEL: test3: -; ARM: @ BB#0: @ %entry +; ARM: @ %bb.0: @ %entry ; ARM-NEXT: ldr r0, [r0] ; ARM-NEXT: mul r1, r2, r0 ; ARM-NEXT: ldrh r2, [r3] @@ -825,7 +825,7 @@ define arm_aapcscc i32 @test3(i32* %a, i32* %b, i32 %x, i16* %y) { ; ARM-NEXT: bx lr ; ; ARMEB-LABEL: test3: -; ARMEB: @ BB#0: @ %entry +; ARMEB: @ %bb.0: @ %entry ; ARMEB-NEXT: ldr r0, [r0] ; ARMEB-NEXT: mul r1, r2, r0 ; ARMEB-NEXT: ldrh r2, [r3] @@ -835,7 +835,7 @@ define arm_aapcscc i32 @test3(i32* %a, i32* %b, i32 %x, i16* %y) { ; ARMEB-NEXT: bx lr ; ; THUMB1-LABEL: test3: -; THUMB1: @ BB#0: @ %entry +; THUMB1: @ %bb.0: @ %entry ; THUMB1-NEXT: ldr r0, [r0] ; THUMB1-NEXT: muls r2, r0, r2 ; THUMB1-NEXT: ldrh r1, [r3] @@ -845,7 +845,7 @@ define arm_aapcscc i32 @test3(i32* %a, i32* %b, i32 %x, i16* %y) { ; THUMB1-NEXT: bx lr ; ; THUMB2-LABEL: test3: -; THUMB2: @ BB#0: @ %entry +; THUMB2: @ %bb.0: @ %entry ; THUMB2-NEXT: ldr r0, [r0] ; THUMB2-NEXT: mul r1, r2, r0 ; THUMB2-NEXT: ldrh r2, [r3] @@ -866,7 +866,7 @@ entry: define arm_aapcscc i32 @test4(i32* %a, i32* %b, i32 %x, i32 %y) { ; ARM-LABEL: test4: -; ARM: @ BB#0: @ %entry +; ARM: @ %bb.0: @ %entry ; ARM-NEXT: mul r2, r2, r3 ; ARM-NEXT: ldr r1, [r1] ; ARM-NEXT: ldr r0, [r0] @@ -876,7 +876,7 @@ define arm_aapcscc i32 @test4(i32* %a, i32* %b, i32 %x, i32 %y) { ; ARM-NEXT: bx lr ; ; ARMEB-LABEL: test4: -; ARMEB: @ BB#0: @ %entry +; ARMEB: @ %bb.0: @ %entry ; ARMEB-NEXT: mul r2, r2, r3 ; ARMEB-NEXT: ldr r1, [r1] ; ARMEB-NEXT: ldr r0, [r0] @@ -886,7 +886,7 @@ define arm_aapcscc i32 @test4(i32* %a, i32* %b, i32 %x, i32 %y) { ; ARMEB-NEXT: bx lr ; ; THUMB1-LABEL: test4: -; THUMB1: @ BB#0: @ %entry +; THUMB1: @ %bb.0: @ %entry ; THUMB1-NEXT: muls r2, r3, r2 ; THUMB1-NEXT: ldr r1, [r1] ; THUMB1-NEXT: ldr r0, [r0] @@ -896,7 +896,7 @@ define arm_aapcscc i32 @test4(i32* %a, i32* %b, i32 %x, i32 %y) { ; THUMB1-NEXT: bx lr ; ; THUMB2-LABEL: test4: -; THUMB2: @ BB#0: @ %entry +; THUMB2: @ %bb.0: @ %entry ; THUMB2-NEXT: muls r2, r3, r2 ; THUMB2-NEXT: ldr r1, [r1] ; THUMB2-NEXT: ldr r0, [r0] @@ -916,7 +916,7 @@ entry: define arm_aapcscc i32 @test5(i32* %a, i32* %b, i32 %x, i16 zeroext %y) { ; ARM-LABEL: test5: -; ARM: @ BB#0: @ %entry +; ARM: @ %bb.0: @ %entry ; ARM-NEXT: ldr r1, [r1] ; ARM-NEXT: ldr r0, [r0] ; ARM-NEXT: mul r1, r2, r1 @@ -926,7 +926,7 @@ define arm_aapcscc i32 @test5(i32* %a, i32* %b, i32 %x, i16 zeroext %y) { ; ARM-NEXT: bx lr ; ; ARMEB-LABEL: test5: -; ARMEB: @ BB#0: @ %entry +; ARMEB: @ %bb.0: @ %entry ; ARMEB-NEXT: ldr r1, [r1] ; ARMEB-NEXT: ldr r0, [r0] ; ARMEB-NEXT: mul r1, r2, r1 @@ -936,7 +936,7 @@ define arm_aapcscc i32 @test5(i32* %a, i32* %b, i32 %x, i16 zeroext %y) { ; ARMEB-NEXT: bx lr ; ; THUMB1-LABEL: test5: -; THUMB1: @ BB#0: @ %entry +; THUMB1: @ %bb.0: @ %entry ; THUMB1-NEXT: ldr r1, [r1] ; THUMB1-NEXT: muls r1, r2, r1 ; THUMB1-NEXT: ldr r0, [r0] @@ -946,7 +946,7 @@ define arm_aapcscc i32 @test5(i32* %a, i32* %b, i32 %x, i16 zeroext %y) { ; THUMB1-NEXT: bx lr ; ; THUMB2-LABEL: test5: -; THUMB2: @ BB#0: @ %entry +; THUMB2: @ %bb.0: @ %entry ; THUMB2-NEXT: ldr r1, [r1] ; THUMB2-NEXT: ldr r0, [r0] ; THUMB2-NEXT: muls r1, r2, r1 diff --git a/test/CodeGen/ARM/arm-and-tst-peephole.ll b/test/CodeGen/ARM/arm-and-tst-peephole.ll index a24808004ef..c6ca6a624b1 100644 --- a/test/CodeGen/ARM/arm-and-tst-peephole.ll +++ b/test/CodeGen/ARM/arm-and-tst-peephole.ll @@ -142,27 +142,27 @@ return: ; preds = %bb2, %bb, %entry define i32 @test_tst_assessment(i32 %a, i32 %b) { ; ARM-LABEL: test_tst_assessment: -; ARM: @ BB#0: +; ARM: @ %bb.0: ; ARM-NEXT: and r0, r0, #1 ; ARM-NEXT: tst r1, #1 ; ARM-NEXT: subne r0, r0, #1 ; ARM-NEXT: mov pc, lr ; ; THUMB-LABEL: test_tst_assessment: -; THUMB: @ BB#0: +; THUMB: @ %bb.0: ; THUMB-NEXT: movs r2, r0 ; THUMB-NEXT: movs r0, #1 ; THUMB-NEXT: ands r0, r2 ; THUMB-NEXT: subs r2, r0, #1 ; THUMB-NEXT: lsls r1, r1, #31 ; THUMB-NEXT: beq .LBB2_2 -; THUMB-NEXT: @ BB#1: +; THUMB-NEXT: @ %bb.1: ; THUMB-NEXT: movs r0, r2 ; THUMB-NEXT: .LBB2_2: ; THUMB-NEXT: bx lr ; ; T2-LABEL: test_tst_assessment: -; T2: @ BB#0: +; T2: @ %bb.0: ; T2-NEXT: lsls r1, r1, #31 ; T2-NEXT: and r0, r0, #1 ; T2-NEXT: it ne @@ -170,7 +170,7 @@ define i32 @test_tst_assessment(i32 %a, i32 %b) { ; T2-NEXT: bx lr ; ; V8-LABEL: test_tst_assessment: -; V8: @ BB#0: +; V8: @ %bb.0: ; V8-NEXT: and r0, r0, #1 ; V8-NEXT: lsls r1, r1, #31 ; V8-NEXT: it ne diff --git a/test/CodeGen/ARM/atomic-ops-v8.ll b/test/CodeGen/ARM/atomic-ops-v8.ll index d1575ed12e4..192ed8f8db7 100644 --- a/test/CodeGen/ARM/atomic-ops-v8.ll +++ b/test/CodeGen/ARM/atomic-ops-v8.ll @@ -1046,7 +1046,7 @@ define i8 @test_atomic_cmpxchg_i8(i8 zeroext %wanted, i8 zeroext %new) nounwind ; CHECK-ARM-NEXT: cmp r[[OLD]], r0 ; CHECK-THUMB-NEXT: cmp r[[OLD]], r[[WANTED]] ; CHECK-NEXT: bne .LBB{{[0-9]+}}_4 -; CHECK-NEXT: BB#2: +; CHECK-NEXT: %bb.2: ; As above, r1 is a reasonable guess. ; CHECK: strexb [[STATUS:r[0-9]+]], r1, [r[[ADDR]]] ; CHECK-NEXT: cmp [[STATUS]], #0 @@ -1080,7 +1080,7 @@ define i16 @test_atomic_cmpxchg_i16(i16 zeroext %wanted, i16 zeroext %new) nounw ; CHECK-ARM-NEXT: cmp r[[OLD]], r0 ; CHECK-THUMB-NEXT: cmp r[[OLD]], r[[WANTED]] ; CHECK-NEXT: bne .LBB{{[0-9]+}}_4 -; CHECK-NEXT: BB#2: +; CHECK-NEXT: %bb.2: ; As above, r1 is a reasonable guess. ; CHECK: stlexh [[STATUS:r[0-9]+]], r1, [r[[ADDR]]] ; CHECK-NEXT: cmp [[STATUS]], #0 @@ -1113,7 +1113,7 @@ define void @test_atomic_cmpxchg_i32(i32 %wanted, i32 %new) nounwind { ; function there. ; CHECK-NEXT: cmp r[[OLD]], r0 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_4 -; CHECK-NEXT: BB#2: +; CHECK-NEXT: %bb.2: ; As above, r1 is a reasonable guess. ; CHECK: stlex [[STATUS:r[0-9]+]], r1, [r[[ADDR]]] ; CHECK-NEXT: cmp [[STATUS]], #0 @@ -1152,7 +1152,7 @@ define void @test_atomic_cmpxchg_i64(i64 %wanted, i64 %new) nounwind { ; CHECK-ARM-BE: orrs{{(\.w)?}} {{r[0-9]+}}, [[MISMATCH_HI]], [[MISMATCH_LO]] ; CHECK-THUMB-BE: orrs{{(\.w)?}} {{(r[0-9]+, )?}}[[MISMATCH_LO]], [[MISMATCH_HI]] ; CHECK-NEXT: bne .LBB{{[0-9]+}}_4 -; CHECK-NEXT: BB#2: +; CHECK-NEXT: %bb.2: ; As above, r2, r3 is a reasonable guess. ; CHECK: strexd [[STATUS:r[0-9]+]], r2, r3, [r[[ADDR]]] ; CHECK-NEXT: cmp [[STATUS]], #0 diff --git a/test/CodeGen/ARM/bool-ext-inc.ll b/test/CodeGen/ARM/bool-ext-inc.ll index ca9c9ab079d..00a7fcdee3c 100644 --- a/test/CodeGen/ARM/bool-ext-inc.ll +++ b/test/CodeGen/ARM/bool-ext-inc.ll @@ -3,7 +3,7 @@ define i32 @sext_inc(i1 zeroext %x) { ; CHECK-LABEL: sext_inc: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: eor r0, r0, #1 ; CHECK-NEXT: mov pc, lr %ext = sext i1 %x to i32 @@ -13,7 +13,7 @@ define i32 @sext_inc(i1 zeroext %x) { define <4 x i32> @sext_inc_vec(<4 x i1> %x) { ; CHECK-LABEL: sext_inc_vec: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov.i16 d16, #0x1 ; CHECK-NEXT: vmov d17, r0, r1 ; CHECK-NEXT: veor d16, d17, d16 @@ -30,7 +30,7 @@ define <4 x i32> @sext_inc_vec(<4 x i1> %x) { define <4 x i32> @cmpgt_sext_inc_vec(<4 x i32> %x, <4 x i32> %y) { ; CHECK-LABEL: cmpgt_sext_inc_vec: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d17, r2, r3 ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: mov r0, sp @@ -49,7 +49,7 @@ define <4 x i32> @cmpgt_sext_inc_vec(<4 x i32> %x, <4 x i32> %y) { define <4 x i32> @cmpne_sext_inc_vec(<4 x i32> %x, <4 x i32> %y) { ; CHECK-LABEL: cmpne_sext_inc_vec: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d17, r2, r3 ; CHECK-NEXT: mov r12, sp ; CHECK-NEXT: vld1.64 {d18, d19}, [r12] diff --git a/test/CodeGen/ARM/cmpxchg-weak.ll b/test/CodeGen/ARM/cmpxchg-weak.ll index 29d97fef060..5ee07828526 100644 --- a/test/CodeGen/ARM/cmpxchg-weak.ll +++ b/test/CodeGen/ARM/cmpxchg-weak.ll @@ -5,16 +5,16 @@ define void @test_cmpxchg_weak(i32 *%addr, i32 %desired, i32 %new) { %pair = cmpxchg weak i32* %addr, i32 %desired, i32 %new seq_cst monotonic %oldval = extractvalue { i32, i1 } %pair, 0 -; CHECK-NEXT: BB#0: +; CHECK-NEXT: %bb.0: ; CHECK-NEXT: ldrex [[LOADED:r[0-9]+]], [r0] ; CHECK-NEXT: cmp [[LOADED]], r1 ; CHECK-NEXT: bne [[LDFAILBB:LBB[0-9]+_[0-9]+]] -; CHECK-NEXT: BB#1: +; CHECK-NEXT: %bb.1: ; CHECK-NEXT: dmb ish ; CHECK-NEXT: strex [[SUCCESS:r[0-9]+]], r2, [r0] ; CHECK-NEXT: cmp [[SUCCESS]], #0 ; CHECK-NEXT: beq [[SUCCESSBB:LBB[0-9]+_[0-9]+]] -; CHECK-NEXT: BB#2: +; CHECK-NEXT: %bb.2: ; CHECK-NEXT: str r3, [r0] ; CHECK-NEXT: bx lr ; CHECK-NEXT: [[LDFAILBB]]: @@ -37,11 +37,11 @@ define i1 @test_cmpxchg_weak_to_bool(i32, i32 *%addr, i32 %desired, i32 %new) { %pair = cmpxchg weak i32* %addr, i32 %desired, i32 %new seq_cst monotonic %success = extractvalue { i32, i1 } %pair, 1 -; CHECK-NEXT: BB#0: +; CHECK-NEXT: %bb.0: ; CHECK-NEXT: ldrex [[LOADED:r[0-9]+]], [r1] ; CHECK-NEXT: cmp [[LOADED]], r2 ; CHECK-NEXT: bne [[LDFAILBB:LBB[0-9]+_[0-9]+]] -; CHECK-NEXT: BB#1: +; CHECK-NEXT: %bb.1: ; CHECK-NEXT: dmb ish ; CHECK-NEXT: mov r0, #0 ; CHECK-NEXT: strex [[SUCCESS:r[0-9]+]], r3, [r1] diff --git a/test/CodeGen/ARM/cortex-a57-misched-alu.ll b/test/CodeGen/ARM/cortex-a57-misched-alu.ll index 2ced60fbf0d..7d50a2023ed 100644 --- a/test/CodeGen/ARM/cortex-a57-misched-alu.ll +++ b/test/CodeGen/ARM/cortex-a57-misched-alu.ll @@ -5,7 +5,7 @@ ; Check the latency for ALU shifted operand variants. ; ; CHECK: ********** MI Scheduling ********** -; CHECK: foo:BB#0 entry +; CHECK: foo:%bb.0 entry ; ALU, basic - 1 cyc I0/I1 ; CHECK: EORrr diff --git a/test/CodeGen/ARM/cortex-a57-misched-basic.ll b/test/CodeGen/ARM/cortex-a57-misched-basic.ll index cfbef7bd429..ad729c2ff2a 100644 --- a/test/CodeGen/ARM/cortex-a57-misched-basic.ll +++ b/test/CodeGen/ARM/cortex-a57-misched-basic.ll @@ -6,7 +6,7 @@ ; SDIV should be scheduled at the block's begin (20 cyc of independent M unit). ; ; CHECK: ********** MI Scheduling ********** -; CHECK: foo:BB#0 entry +; CHECK: foo:%bb.0 entry ; GENERIC: LDRi12 ; GENERIC: Latency : 1 @@ -30,7 +30,7 @@ ; A57_SCHED: SUBrr ; A57_SCHED: Latency : 1 -; CHECK: ** Final schedule for BB#0 *** +; CHECK: ** Final schedule for %bb.0 *** ; GENERIC: LDRi12 ; GENERIC: SDIV ; A57_SCHED: SDIV diff --git a/test/CodeGen/ARM/cortex-a57-misched-vadd.ll b/test/CodeGen/ARM/cortex-a57-misched-vadd.ll index eb8d1c85523..cb7490856ab 100644 --- a/test/CodeGen/ARM/cortex-a57-misched-vadd.ll +++ b/test/CodeGen/ARM/cortex-a57-misched-vadd.ll @@ -1,7 +1,7 @@ ; REQUIRES: asserts ; RUN: llc < %s -mtriple=armv8r-eabi -mcpu=cortex-a57 -misched-postra -enable-misched -verify-misched -debug-only=machine-scheduler -o - 2>&1 > /dev/null | FileCheck %s -; CHECK-LABEL: addv_i32:BB#0 +; CHECK-LABEL: addv_i32:%bb.0 ; CHECK: SU(8): {{.*}} VADDv4i32 ; CHECK-NEXT: # preds left ; CHECK-NEXT: # succs left @@ -13,7 +13,7 @@ define <4 x i32> @addv_i32(<4 x i32>, <4 x i32>) { ret <4 x i32> %3 } -; CHECK-LABEL: addv_f32:BB#0 +; CHECK-LABEL: addv_f32:%bb.0 ; CHECK: SU(8): {{.*}} VADDfq ; CHECK-NEXT: # preds left ; CHECK-NEXT: # succs left diff --git a/test/CodeGen/ARM/cortex-a57-misched-vfma.ll b/test/CodeGen/ARM/cortex-a57-misched-vfma.ll index 372b2e2f5dc..a3e07ba17b9 100644 --- a/test/CodeGen/ARM/cortex-a57-misched-vfma.ll +++ b/test/CodeGen/ARM/cortex-a57-misched-vfma.ll @@ -5,7 +5,7 @@ define float @Test1(float %f1, float %f2, float %f3, float %f4, float %f5, float %f6) { ; CHECK: ********** MI Scheduling ********** -; CHECK: Test1:BB#0 +; CHECK: Test1:%bb.0 ; CHECK: VMULS ; > VMULS common latency = 5 @@ -44,7 +44,7 @@ define float @Test1(float %f1, float %f2, float %f3, float %f4, float %f5, float ; ASIMD form define <2 x float> @Test2(<2 x float> %f1, <2 x float> %f2, <2 x float> %f3, <2 x float> %f4, <2 x float> %f5, <2 x float> %f6) { ; CHECK: ********** MI Scheduling ********** -; CHECK: Test2:BB#0 +; CHECK: Test2:%bb.0 ; CHECK: VMULfd ; > VMULfd common latency = 5 @@ -82,7 +82,7 @@ define <2 x float> @Test2(<2 x float> %f1, <2 x float> %f2, <2 x float> %f3, <2 define float @Test3(float %f1, float %f2, float %f3, float %f4, float %f5, float %f6) { ; CHECK: ********** MI Scheduling ********** -; CHECK: Test3:BB#0 +; CHECK: Test3:%bb.0 ; CHECK: VMULS ; > VMULS common latency = 5 @@ -121,7 +121,7 @@ define float @Test3(float %f1, float %f2, float %f3, float %f4, float %f5, float ; ASIMD form define <2 x float> @Test4(<2 x float> %f1, <2 x float> %f2, <2 x float> %f3, <2 x float> %f4, <2 x float> %f5, <2 x float> %f6) { ; CHECK: ********** MI Scheduling ********** -; CHECK: Test4:BB#0 +; CHECK: Test4:%bb.0 ; CHECK: VMULfd ; > VMULfd common latency = 5 @@ -159,7 +159,7 @@ define <2 x float> @Test4(<2 x float> %f1, <2 x float> %f2, <2 x float> %f3, <2 define float @Test5(float %f1, float %f2, float %f3) { ; CHECK: ********** MI Scheduling ********** -; CHECK: Test5:BB#0 +; CHECK: Test5:%bb.0 ; CHECK-DEFAULT: VNMLS ; CHECK-FAST: VFNMS @@ -178,7 +178,7 @@ define float @Test5(float %f1, float %f2, float %f3) { define float @Test6(float %f1, float %f2, float %f3) { ; CHECK: ********** MI Scheduling ********** -; CHECK: Test6:BB#0 +; CHECK: Test6:%bb.0 ; CHECK-DEFAULT: VNMLA ; CHECK-FAST: VFNMA diff --git a/test/CodeGen/ARM/cortex-a57-misched-vsub.ll b/test/CodeGen/ARM/cortex-a57-misched-vsub.ll index c3c445d3f0e..fe14c861f8e 100644 --- a/test/CodeGen/ARM/cortex-a57-misched-vsub.ll +++ b/test/CodeGen/ARM/cortex-a57-misched-vsub.ll @@ -1,7 +1,7 @@ ; REQUIRES: asserts ; RUN: llc < %s -mtriple=armv8r-eabi -mcpu=cortex-a57 -misched-postra -enable-misched -verify-misched -debug-only=machine-scheduler -o - 2>&1 > /dev/null | FileCheck %s -; CHECK-LABEL: subv_i32:BB#0 +; CHECK-LABEL: subv_i32:%bb.0 ; CHECK: SU(8): {{.*}} VSUBv4i32 ; CHECK-NEXT: # preds left ; CHECK-NEXT: # succs left @@ -13,7 +13,7 @@ define <4 x i32> @subv_i32(<4 x i32>, <4 x i32>) { ret <4 x i32> %3 } -; CHECK-LABEL: subv_f32:BB#0 +; CHECK-LABEL: subv_f32:%bb.0 ; CHECK: SU(8): {{.*}} VSUBfq ; CHECK-NEXT: # preds left ; CHECK-NEXT: # succs left diff --git a/test/CodeGen/ARM/cortexr52-misched-basic.ll b/test/CodeGen/ARM/cortexr52-misched-basic.ll index 614157eb0e1..0edc6653a03 100644 --- a/test/CodeGen/ARM/cortexr52-misched-basic.ll +++ b/test/CodeGen/ARM/cortexr52-misched-basic.ll @@ -7,7 +7,7 @@ ; as div takes more cycles to compute than eor. ; ; CHECK: ********** MI Scheduling ********** -; CHECK: foo:BB#0 entry +; CHECK: foo:%bb.0 entry ; CHECK: EORrr ; GENERIC: Latency : 1 ; R52_SCHED: Latency : 3 @@ -17,7 +17,7 @@ ; CHECK: SDIV ; GENERIC: Latency : 0 ; R52_SCHED: Latency : 8 -; CHECK: ** Final schedule for BB#0 *** +; CHECK: ** Final schedule for %bb.0 *** ; GENERIC: EORrr ; GENERIC: SDIV ; R52_SCHED: SDIV diff --git a/test/CodeGen/ARM/crash-on-pow2-shufflevector.ll b/test/CodeGen/ARM/crash-on-pow2-shufflevector.ll index 8395674e880..4f6055dee62 100644 --- a/test/CodeGen/ARM/crash-on-pow2-shufflevector.ll +++ b/test/CodeGen/ARM/crash-on-pow2-shufflevector.ll @@ -6,7 +6,7 @@ define i32 @foo(%struct.desc* %descs, i32 %num, i32 %cw) local_unnamed_addr #0 { ; CHECK-LABEL: foo: -; CHECK: @ BB#0: @ %entry +; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: mov r1, #32 ; CHECK-NEXT: vld1.32 {d16, d17}, [r0], r1 ; CHECK-NEXT: vld1.32 {d18, d19}, [r0] diff --git a/test/CodeGen/ARM/deprecated-asm.s b/test/CodeGen/ARM/deprecated-asm.s index 7318e6a68c5..465da40c1c1 100644 --- a/test/CodeGen/ARM/deprecated-asm.s +++ b/test/CodeGen/ARM/deprecated-asm.s @@ -25,7 +25,7 @@ .type foo,%function foo: @ @foo .fnstart -@ BB#0: @ %entry +@ %bb.0: @ %entry mov r0, #0 bx lr stmia r4!, {r12-r14} diff --git a/test/CodeGen/ARM/ifcvt-branch-weight-bug.ll b/test/CodeGen/ARM/ifcvt-branch-weight-bug.ll index 1c8142e5ddd..b69f121d10c 100644 --- a/test/CodeGen/ARM/ifcvt-branch-weight-bug.ll +++ b/test/CodeGen/ARM/ifcvt-branch-weight-bug.ll @@ -21,8 +21,8 @@ entry: ; Afer if conversion, we have ; for.body -> for.cond.backedge (100%) ; -> cond.false.i (0%) -; CHECK: BB#1: derived from LLVM BB %for.body -; CHECK: Successors according to CFG: BB#2(0x80000000 / 0x80000000 = 100.00%) BB#4(0x00000001 / 0x80000000 = 0.00%) +; CHECK: %bb.1: derived from LLVM BB %for.body +; CHECK: Successors according to CFG: %bb.2(0x80000000 / 0x80000000 = 100.00%) %bb.4(0x00000001 / 0x80000000 = 0.00%) for.body: br i1 undef, label %for.cond.backedge, label %lor.lhs.false.i, !prof !1 diff --git a/test/CodeGen/ARM/ifcvt-branch-weight.ll b/test/CodeGen/ARM/ifcvt-branch-weight.ll index 5c39d63fda1..6f6f8bc1834 100644 --- a/test/CodeGen/ARM/ifcvt-branch-weight.ll +++ b/test/CodeGen/ARM/ifcvt-branch-weight.ll @@ -18,8 +18,8 @@ bb: %9 = icmp eq i32 %8, 0 br i1 %9, label %return, label %bb2 -; CHECK: BB#2: derived from LLVM BB %bb2 -; CHECK: Successors according to CFG: BB#4({{[0-9a-fx/= ]+}}50.00%) BB#3({{[0-9a-fx/= ]+}}50.00%) +; CHECK: %bb.2: derived from LLVM BB %bb2 +; CHECK: Successors according to CFG: %bb.4({{[0-9a-fx/= ]+}}50.00%) %bb.3({{[0-9a-fx/= ]+}}50.00%) bb2: %v10 = icmp eq i32 %3, 16 diff --git a/test/CodeGen/ARM/ifcvt-iter-indbr.ll b/test/CodeGen/ARM/ifcvt-iter-indbr.ll index 73496257306..ccc6ded49f1 100644 --- a/test/CodeGen/ARM/ifcvt-iter-indbr.ll +++ b/test/CodeGen/ARM/ifcvt-iter-indbr.ll @@ -30,10 +30,10 @@ declare i8* @bar(i32, i8*, i8*) ; CHECK-NEXT: [[FOOCALL]]: ; CHECK-NEXT: bl _foo ; -; CHECK-PROB: BB#0: -; CHECK-PROB: Successors according to CFG: BB#1({{[0-9a-fx/= ]+}}50.00%) BB#3({{[0-9a-fx/= ]+}}25.00%) BB#5({{[0-9a-fx/= ]+}}25.00%) -; CHECK-PROB: BB#2: -; CHECK-PROB: Successors according to CFG: BB#3({{[0-9a-fx/= ]+}}50.00%) BB#5({{[0-9a-fx/= ]+}}50.00%) +; CHECK-PROB: %bb.0: +; CHECK-PROB: Successors according to CFG: %bb.1({{[0-9a-fx/= ]+}}50.00%) %bb.3({{[0-9a-fx/= ]+}}25.00%) %bb.5({{[0-9a-fx/= ]+}}25.00%) +; CHECK-PROB: %bb.2: +; CHECK-PROB: Successors according to CFG: %bb.3({{[0-9a-fx/= ]+}}50.00%) %bb.5({{[0-9a-fx/= ]+}}50.00%) define i32 @test(i32 %a, i32 %a2, i32* %p, i32* %p2) "no-frame-pointer-elim"="true" { entry: diff --git a/test/CodeGen/ARM/illegal-bitfield-loadstore.ll b/test/CodeGen/ARM/illegal-bitfield-loadstore.ll index 6d62fd31f97..6f1e18ffdfc 100644 --- a/test/CodeGen/ARM/illegal-bitfield-loadstore.ll +++ b/test/CodeGen/ARM/illegal-bitfield-loadstore.ll @@ -4,14 +4,14 @@ define void @i24_or(i24* %a) { ; LE-LABEL: i24_or: -; LE: @ BB#0: +; LE: @ %bb.0: ; LE-NEXT: ldrh r1, [r0] ; LE-NEXT: orr r1, r1, #384 ; LE-NEXT: strh r1, [r0] ; LE-NEXT: mov pc, lr ; ; BE-LABEL: i24_or: -; BE: @ BB#0: +; BE: @ %bb.0: ; BE-NEXT: ldrh r1, [r0] ; BE-NEXT: ldrb r2, [r0, #2] ; BE-NEXT: orr r1, r2, r1, lsl #8 @@ -28,7 +28,7 @@ define void @i24_or(i24* %a) { define void @i24_and_or(i24* %a) { ; LE-LABEL: i24_and_or: -; LE: @ BB#0: +; LE: @ %bb.0: ; LE-NEXT: ldrh r1, [r0] ; LE-NEXT: mov r2, #16256 ; LE-NEXT: orr r2, r2, #49152 @@ -38,7 +38,7 @@ define void @i24_and_or(i24* %a) { ; LE-NEXT: mov pc, lr ; ; BE-LABEL: i24_and_or: -; BE: @ BB#0: +; BE: @ %bb.0: ; BE-NEXT: mov r1, #128 ; BE-NEXT: strb r1, [r0, #2] ; BE-NEXT: ldrh r1, [r0] @@ -54,7 +54,7 @@ define void @i24_and_or(i24* %a) { define void @i24_insert_bit(i24* %a, i1 zeroext %bit) { ; LE-LABEL: i24_insert_bit: -; LE: @ BB#0: +; LE: @ %bb.0: ; LE-NEXT: mov r3, #255 ; LE-NEXT: ldrh r2, [r0] ; LE-NEXT: orr r3, r3, #57088 @@ -64,7 +64,7 @@ define void @i24_insert_bit(i24* %a, i1 zeroext %bit) { ; LE-NEXT: mov pc, lr ; ; BE-LABEL: i24_insert_bit: -; BE: @ BB#0: +; BE: @ %bb.0: ; BE-NEXT: ldrh r2, [r0] ; BE-NEXT: mov r3, #57088 ; BE-NEXT: orr r3, r3, #16711680 @@ -84,14 +84,14 @@ define void @i24_insert_bit(i24* %a, i1 zeroext %bit) { define void @i56_or(i56* %a) { ; LE-LABEL: i56_or: -; LE: @ BB#0: +; LE: @ %bb.0: ; LE-NEXT: ldr r1, [r0] ; LE-NEXT: orr r1, r1, #384 ; LE-NEXT: str r1, [r0] ; LE-NEXT: mov pc, lr ; ; BE-LABEL: i56_or: -; BE: @ BB#0: +; BE: @ %bb.0: ; BE-NEXT: mov r1, r0 ; BE-NEXT: ldr r12, [r0] ; BE-NEXT: ldrh r2, [r1, #4]! @@ -114,7 +114,7 @@ define void @i56_or(i56* %a) { define void @i56_and_or(i56* %a) { ; LE-LABEL: i56_and_or: -; LE: @ BB#0: +; LE: @ %bb.0: ; LE-NEXT: ldr r1, [r0] ; LE-NEXT: orr r1, r1, #384 ; LE-NEXT: bic r1, r1, #127 @@ -122,7 +122,7 @@ define void @i56_and_or(i56* %a) { ; LE-NEXT: mov pc, lr ; ; BE-LABEL: i56_and_or: -; BE: @ BB#0: +; BE: @ %bb.0: ; BE-NEXT: mov r1, r0 ; BE-NEXT: ldr r12, [r0] ; BE-NEXT: ldrh r2, [r1, #4]! @@ -147,7 +147,7 @@ define void @i56_and_or(i56* %a) { define void @i56_insert_bit(i56* %a, i1 zeroext %bit) { ; LE-LABEL: i56_insert_bit: -; LE: @ BB#0: +; LE: @ %bb.0: ; LE-NEXT: ldr r2, [r0] ; LE-NEXT: bic r2, r2, #8192 ; LE-NEXT: orr r1, r2, r1, lsl #13 @@ -155,7 +155,7 @@ define void @i56_insert_bit(i56* %a, i1 zeroext %bit) { ; LE-NEXT: mov pc, lr ; ; BE-LABEL: i56_insert_bit: -; BE: @ BB#0: +; BE: @ %bb.0: ; BE-NEXT: .save {r11, lr} ; BE-NEXT: push {r11, lr} ; BE-NEXT: mov r2, r0 diff --git a/test/CodeGen/ARM/jump-table-tbh.ll b/test/CodeGen/ARM/jump-table-tbh.ll index b3ee68ea075..ab2c579e514 100644 --- a/test/CodeGen/ARM/jump-table-tbh.ll +++ b/test/CodeGen/ARM/jump-table-tbh.ll @@ -10,7 +10,7 @@ define i32 @test_tbh(i1 %tst, i32 %sw, i32 %l) { ; T2-LABEL: test_tbh: ; T2: [[ANCHOR:.LCPI[0-9_]+]]: ; T2: tbh [pc, r{{[0-9]+}}, lsl #1] -; T2-NEXT: @ BB#{{[0-9]+}} +; T2-NEXT: @ %bb.{{[0-9]+}} ; T2-NEXT: LJTI ; T2-NEXT: .short (.LBB0_[[x:[0-9]+]]-([[ANCHOR]]+4))/2 ; T2-NEXT: .short (.LBB0_{{[0-9]+}}-([[ANCHOR]]+4))/2 @@ -24,7 +24,7 @@ define i32 @test_tbh(i1 %tst, i32 %sw, i32 %l) { ; T1: lsls [[x]], [[x]], #1 ; T1: [[ANCHOR:.LCPI[0-9_]+]]: ; T1: add pc, [[x]] -; T1-NEXT: @ BB#2 +; T1-NEXT: @ %bb.2 ; T1-NEXT: .p2align 2 ; T1-NEXT: LJTI ; T1-NEXT: .short (.LBB0_[[x:[0-9]+]]-([[ANCHOR]]+4))/2 diff --git a/test/CodeGen/ARM/machine-licm.ll b/test/CodeGen/ARM/machine-licm.ll index a1eec78e453..9ed1a57616c 100644 --- a/test/CodeGen/ARM/machine-licm.ll +++ b/test/CodeGen/ARM/machine-licm.ll @@ -31,7 +31,7 @@ bb.nph: ; preds = %entry ; ARM-NOT: LCPI0_1: ; ARM: .section -; THUMB: BB#1 +; THUMB: %bb.1 ; THUMB: ldr r2, LCPI0_0 ; THUMB: add r2, pc ; THUMB: ldr r{{[0-9]+}}, [r2] diff --git a/test/CodeGen/ARM/misched-copy-arm.ll b/test/CodeGen/ARM/misched-copy-arm.ll index bc20939d0f7..ae0b127a6f8 100644 --- a/test/CodeGen/ARM/misched-copy-arm.ll +++ b/test/CodeGen/ARM/misched-copy-arm.ll @@ -4,7 +4,7 @@ ; Loop counter copies should be eliminated. ; There is also a MUL here, but we don't care where it is scheduled. ; CHECK: postinc -; CHECK: *** Final schedule for BB#2 *** +; CHECK: *** Final schedule for %bb.2 *** ; CHECK: t2LDRs ; CHECK: t2ADDrr ; CHECK: t2CMPrr @@ -32,7 +32,7 @@ for.end: ; preds = %for.body, %entry ; This case was a crasher in constrainLocalCopy. ; The problem was the t2LDR_PRE defining both the global and local lrg. -; CHECK-LABEL: *** Final schedule for BB#5 *** +; CHECK-LABEL: *** Final schedule for %bb.5 *** ; CHECK: %[[R4:[0-9]+]], %[[R1:[0-9]+]] = t2LDR_PRE %[[R1]] ; CHECK: %{{[0-9]+}} = COPY %[[R1]] ; CHECK: %{{[0-9]+}} = COPY %[[R4]] diff --git a/test/CodeGen/ARM/negate-i1.ll b/test/CodeGen/ARM/negate-i1.ll index 0503763e674..493b26a5a84 100644 --- a/test/CodeGen/ARM/negate-i1.ll +++ b/test/CodeGen/ARM/negate-i1.ll @@ -4,7 +4,7 @@ define i32 @select_i32_neg1_or_0(i1 %a) { ; CHECK-LABEL: select_i32_neg1_or_0: -; CHECK-NEXT: @ BB#0: +; CHECK-NEXT: @ %bb.0: ; CHECK-NEXT: and r0, r0, #1 ; CHECK-NEXT: rsb r0, r0, #0 ; CHECK-NEXT: mov pc, lr @@ -15,7 +15,7 @@ define i32 @select_i32_neg1_or_0(i1 %a) { define i32 @select_i32_neg1_or_0_zeroext(i1 zeroext %a) { ; CHECK-LABEL: select_i32_neg1_or_0_zeroext: -; CHECK-NEXT: @ BB#0: +; CHECK-NEXT: @ %bb.0: ; CHECK-NEXT: rsb r0, r0, #0 ; CHECK-NEXT: mov pc, lr ; diff --git a/test/CodeGen/ARM/neon_vabs.ll b/test/CodeGen/ARM/neon_vabs.ll index 109d09582af..4064aae65f6 100644 --- a/test/CodeGen/ARM/neon_vabs.ll +++ b/test/CodeGen/ARM/neon_vabs.ll @@ -3,7 +3,7 @@ define <4 x i32> @test1(<4 x i32> %a) nounwind { ; CHECK-LABEL: test1: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d17, r2, r3 ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vabs.s32 q8, q8 @@ -18,7 +18,7 @@ define <4 x i32> @test1(<4 x i32> %a) nounwind { define <4 x i32> @test2(<4 x i32> %a) nounwind { ; CHECK-LABEL: test2: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d17, r2, r3 ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vabs.s32 q8, q8 @@ -33,7 +33,7 @@ define <4 x i32> @test2(<4 x i32> %a) nounwind { define <8 x i16> @test3(<8 x i16> %a) nounwind { ; CHECK-LABEL: test3: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d17, r2, r3 ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vabs.s16 q8, q8 @@ -48,7 +48,7 @@ define <8 x i16> @test3(<8 x i16> %a) nounwind { define <16 x i8> @test4(<16 x i8> %a) nounwind { ; CHECK-LABEL: test4: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d17, r2, r3 ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vabs.s8 q8, q8 @@ -63,7 +63,7 @@ define <16 x i8> @test4(<16 x i8> %a) nounwind { define <4 x i32> @test5(<4 x i32> %a) nounwind { ; CHECK-LABEL: test5: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d17, r2, r3 ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vabs.s32 q8, q8 @@ -78,7 +78,7 @@ define <4 x i32> @test5(<4 x i32> %a) nounwind { define <2 x i32> @test6(<2 x i32> %a) nounwind { ; CHECK-LABEL: test6: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vabs.s32 d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -91,7 +91,7 @@ define <2 x i32> @test6(<2 x i32> %a) nounwind { define <2 x i32> @test7(<2 x i32> %a) nounwind { ; CHECK-LABEL: test7: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vabs.s32 d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -104,7 +104,7 @@ define <2 x i32> @test7(<2 x i32> %a) nounwind { define <4 x i16> @test8(<4 x i16> %a) nounwind { ; CHECK-LABEL: test8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vabs.s16 d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -117,7 +117,7 @@ define <4 x i16> @test8(<4 x i16> %a) nounwind { define <8 x i8> @test9(<8 x i8> %a) nounwind { ; CHECK-LABEL: test9: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vabs.s8 d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -130,7 +130,7 @@ define <8 x i8> @test9(<8 x i8> %a) nounwind { define <2 x i32> @test10(<2 x i32> %a) nounwind { ; CHECK-LABEL: test10: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vabs.s32 d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -146,7 +146,7 @@ define <2 x i32> @test10(<2 x i32> %a) nounwind { define <4 x i32> @test11(<4 x i16> %a, <4 x i16> %b) nounwind { ; CHECK-LABEL: test11: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d16, r2, r3 ; CHECK-NEXT: vmov d17, r0, r1 ; CHECK-NEXT: vabdl.u16 q8, d17, d16 @@ -163,7 +163,7 @@ define <4 x i32> @test11(<4 x i16> %a, <4 x i16> %b) nounwind { } define <8 x i16> @test12(<8 x i8> %a, <8 x i8> %b) nounwind { ; CHECK-LABEL: test12: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d16, r2, r3 ; CHECK-NEXT: vmov d17, r0, r1 ; CHECK-NEXT: vabdl.u8 q8, d17, d16 @@ -181,7 +181,7 @@ define <8 x i16> @test12(<8 x i8> %a, <8 x i8> %b) nounwind { define <2 x i64> @test13(<2 x i32> %a, <2 x i32> %b) nounwind { ; CHECK-LABEL: test13: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d16, r2, r3 ; CHECK-NEXT: vmov d17, r0, r1 ; CHECK-NEXT: vabdl.u32 q8, d17, d16 diff --git a/test/CodeGen/ARM/nest-register.ll b/test/CodeGen/ARM/nest-register.ll index 6b8c3dc47db..ac7afe0007c 100644 --- a/test/CodeGen/ARM/nest-register.ll +++ b/test/CodeGen/ARM/nest-register.ll @@ -5,7 +5,7 @@ define i8* @nest_receiver(i8* nest %arg) nounwind { ; CHECK-LABEL: nest_receiver: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mov r0, r12 ; CHECK-NEXT: mov pc, lr ret i8* %arg diff --git a/test/CodeGen/ARM/noopt-dmb-v7.ll b/test/CodeGen/ARM/noopt-dmb-v7.ll index 56a29c8a17e..86b27600eb4 100644 --- a/test/CodeGen/ARM/noopt-dmb-v7.ll +++ b/test/CodeGen/ARM/noopt-dmb-v7.ll @@ -9,7 +9,7 @@ entry: ret i32 0 } -; CHECK: @ BB#0: @ %entry +; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: dmb ish ; CHECK-NEXT: dmb ish ; CHECK-NEXT: dmb ish diff --git a/test/CodeGen/ARM/select_const.ll b/test/CodeGen/ARM/select_const.ll index 23de9c35a5b..7cce0b08203 100644 --- a/test/CodeGen/ARM/select_const.ll +++ b/test/CodeGen/ARM/select_const.ll @@ -8,7 +8,7 @@ define i32 @select_0_or_1(i1 %cond) { ; CHECK-LABEL: select_0_or_1: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mov r1, #1 ; CHECK-NEXT: bic r0, r1, r0 ; CHECK-NEXT: mov pc, lr @@ -18,7 +18,7 @@ define i32 @select_0_or_1(i1 %cond) { define i32 @select_0_or_1_zeroext(i1 zeroext %cond) { ; CHECK-LABEL: select_0_or_1_zeroext: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: eor r0, r0, #1 ; CHECK-NEXT: mov pc, lr %sel = select i1 %cond, i32 0, i32 1 @@ -27,7 +27,7 @@ define i32 @select_0_or_1_zeroext(i1 zeroext %cond) { define i32 @select_0_or_1_signext(i1 signext %cond) { ; CHECK-LABEL: select_0_or_1_signext: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mov r1, #1 ; CHECK-NEXT: bic r0, r1, r0 ; CHECK-NEXT: mov pc, lr @@ -39,7 +39,7 @@ define i32 @select_0_or_1_signext(i1 signext %cond) { define i32 @select_1_or_0(i1 %cond) { ; CHECK-LABEL: select_1_or_0: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: and r0, r0, #1 ; CHECK-NEXT: mov pc, lr %sel = select i1 %cond, i32 1, i32 0 @@ -48,7 +48,7 @@ define i32 @select_1_or_0(i1 %cond) { define i32 @select_1_or_0_zeroext(i1 zeroext %cond) { ; CHECK-LABEL: select_1_or_0_zeroext: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mov pc, lr %sel = select i1 %cond, i32 1, i32 0 ret i32 %sel @@ -56,7 +56,7 @@ define i32 @select_1_or_0_zeroext(i1 zeroext %cond) { define i32 @select_1_or_0_signext(i1 signext %cond) { ; CHECK-LABEL: select_1_or_0_signext: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: and r0, r0, #1 ; CHECK-NEXT: mov pc, lr %sel = select i1 %cond, i32 1, i32 0 @@ -67,7 +67,7 @@ define i32 @select_1_or_0_signext(i1 signext %cond) { define i32 @select_0_or_neg1(i1 %cond) { ; CHECK-LABEL: select_0_or_neg1: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mov r1, #1 ; CHECK-NEXT: bic r0, r1, r0 ; CHECK-NEXT: rsb r0, r0, #0 @@ -78,7 +78,7 @@ define i32 @select_0_or_neg1(i1 %cond) { define i32 @select_0_or_neg1_zeroext(i1 zeroext %cond) { ; CHECK-LABEL: select_0_or_neg1_zeroext: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: eor r0, r0, #1 ; CHECK-NEXT: rsb r0, r0, #0 ; CHECK-NEXT: mov pc, lr @@ -88,7 +88,7 @@ define i32 @select_0_or_neg1_zeroext(i1 zeroext %cond) { define i32 @select_0_or_neg1_signext(i1 signext %cond) { ; CHECK-LABEL: select_0_or_neg1_signext: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mvn r0, r0 ; CHECK-NEXT: mov pc, lr %sel = select i1 %cond, i32 0, i32 -1 @@ -97,7 +97,7 @@ define i32 @select_0_or_neg1_signext(i1 signext %cond) { define i32 @select_0_or_neg1_alt(i1 %cond) { ; CHECK-LABEL: select_0_or_neg1_alt: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: and r0, r0, #1 ; CHECK-NEXT: sub r0, r0, #1 ; CHECK-NEXT: mov pc, lr @@ -108,7 +108,7 @@ define i32 @select_0_or_neg1_alt(i1 %cond) { define i32 @select_0_or_neg1_alt_zeroext(i1 zeroext %cond) { ; CHECK-LABEL: select_0_or_neg1_alt_zeroext: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: sub r0, r0, #1 ; CHECK-NEXT: mov pc, lr %z = zext i1 %cond to i32 @@ -118,7 +118,7 @@ define i32 @select_0_or_neg1_alt_zeroext(i1 zeroext %cond) { define i32 @select_0_or_neg1_alt_signext(i1 signext %cond) { ; CHECK-LABEL: select_0_or_neg1_alt_signext: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mvn r0, r0 ; CHECK-NEXT: mov pc, lr %z = zext i1 %cond to i32 @@ -130,7 +130,7 @@ define i32 @select_0_or_neg1_alt_signext(i1 signext %cond) { define i32 @select_neg1_or_0(i1 %cond) { ; CHECK-LABEL: select_neg1_or_0: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: and r0, r0, #1 ; CHECK-NEXT: rsb r0, r0, #0 ; CHECK-NEXT: mov pc, lr @@ -140,7 +140,7 @@ define i32 @select_neg1_or_0(i1 %cond) { define i32 @select_neg1_or_0_zeroext(i1 zeroext %cond) { ; CHECK-LABEL: select_neg1_or_0_zeroext: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: rsb r0, r0, #0 ; CHECK-NEXT: mov pc, lr %sel = select i1 %cond, i32 -1, i32 0 @@ -149,7 +149,7 @@ define i32 @select_neg1_or_0_zeroext(i1 zeroext %cond) { define i32 @select_neg1_or_0_signext(i1 signext %cond) { ; CHECK-LABEL: select_neg1_or_0_signext: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mov pc, lr %sel = select i1 %cond, i32 -1, i32 0 ret i32 %sel @@ -159,7 +159,7 @@ define i32 @select_neg1_or_0_signext(i1 signext %cond) { define i32 @select_Cplus1_C(i1 %cond) { ; CHECK-LABEL: select_Cplus1_C: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mov r1, #41 ; CHECK-NEXT: tst r0, #1 ; CHECK-NEXT: movne r1, #42 @@ -171,7 +171,7 @@ define i32 @select_Cplus1_C(i1 %cond) { define i32 @select_Cplus1_C_zeroext(i1 zeroext %cond) { ; CHECK-LABEL: select_Cplus1_C_zeroext: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mov r1, #41 ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: movne r1, #42 @@ -183,7 +183,7 @@ define i32 @select_Cplus1_C_zeroext(i1 zeroext %cond) { define i32 @select_Cplus1_C_signext(i1 signext %cond) { ; CHECK-LABEL: select_Cplus1_C_signext: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mov r1, #41 ; CHECK-NEXT: tst r0, #1 ; CHECK-NEXT: movne r1, #42 @@ -197,7 +197,7 @@ define i32 @select_Cplus1_C_signext(i1 signext %cond) { define i32 @select_C_Cplus1(i1 %cond) { ; CHECK-LABEL: select_C_Cplus1: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mov r1, #42 ; CHECK-NEXT: tst r0, #1 ; CHECK-NEXT: movne r1, #41 @@ -209,7 +209,7 @@ define i32 @select_C_Cplus1(i1 %cond) { define i32 @select_C_Cplus1_zeroext(i1 zeroext %cond) { ; CHECK-LABEL: select_C_Cplus1_zeroext: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mov r1, #42 ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: movne r1, #41 @@ -221,7 +221,7 @@ define i32 @select_C_Cplus1_zeroext(i1 zeroext %cond) { define i32 @select_C_Cplus1_signext(i1 signext %cond) { ; CHECK-LABEL: select_C_Cplus1_signext: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mov r1, #42 ; CHECK-NEXT: tst r0, #1 ; CHECK-NEXT: movne r1, #41 @@ -236,7 +236,7 @@ define i32 @select_C_Cplus1_signext(i1 signext %cond) { define i32 @select_C1_C2(i1 %cond) { ; CHECK-LABEL: select_C1_C2: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mov r1, #165 ; CHECK-NEXT: tst r0, #1 ; CHECK-NEXT: orr r1, r1, #256 @@ -249,7 +249,7 @@ define i32 @select_C1_C2(i1 %cond) { define i32 @select_C1_C2_zeroext(i1 zeroext %cond) { ; CHECK-LABEL: select_C1_C2_zeroext: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mov r1, #165 ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: orr r1, r1, #256 @@ -262,7 +262,7 @@ define i32 @select_C1_C2_zeroext(i1 zeroext %cond) { define i32 @select_C1_C2_signext(i1 signext %cond) { ; CHECK-LABEL: select_C1_C2_signext: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mov r1, #165 ; CHECK-NEXT: tst r0, #1 ; CHECK-NEXT: orr r1, r1, #256 @@ -278,7 +278,7 @@ define i32 @select_C1_C2_signext(i1 signext %cond) { define i64 @opaque_constant1(i1 %cond, i64 %x) { ; CHECK-LABEL: opaque_constant1: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, lr} ; CHECK-NEXT: push {r4, lr} ; CHECK-NEXT: mov lr, #1 @@ -310,7 +310,7 @@ define i64 @opaque_constant1(i1 %cond, i64 %x) { define i64 @opaque_constant2(i1 %cond, i64 %x) { ; CHECK-LABEL: opaque_constant2: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mov r1, #1 ; CHECK-NEXT: tst r0, #1 ; CHECK-NEXT: orr r1, r1, #65536 diff --git a/test/CodeGen/ARM/setcc-logic.ll b/test/CodeGen/ARM/setcc-logic.ll index 79bae1facb3..c48636dffa7 100644 --- a/test/CodeGen/ARM/setcc-logic.ll +++ b/test/CodeGen/ARM/setcc-logic.ll @@ -3,7 +3,7 @@ define zeroext i1 @ne_neg1_and_ne_zero(i32 %x) nounwind { ; CHECK-LABEL: ne_neg1_and_ne_zero: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: add r1, r0, #1 ; CHECK-NEXT: mov r0, #0 ; CHECK-NEXT: cmp r1, #1 @@ -19,7 +19,7 @@ define zeroext i1 @ne_neg1_and_ne_zero(i32 %x) nounwind { define zeroext i1 @and_eq(i32 %a, i32 %b, i32 %c, i32 %d) nounwind { ; CHECK-LABEL: and_eq: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: eor r2, r2, r3 ; CHECK-NEXT: eor r0, r0, r1 ; CHECK-NEXT: orrs r0, r0, r2 @@ -34,7 +34,7 @@ define zeroext i1 @and_eq(i32 %a, i32 %b, i32 %c, i32 %d) nounwind { define zeroext i1 @or_ne(i32 %a, i32 %b, i32 %c, i32 %d) nounwind { ; CHECK-LABEL: or_ne: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: eor r2, r2, r3 ; CHECK-NEXT: eor r0, r0, r1 ; CHECK-NEXT: orrs r0, r0, r2 @@ -48,7 +48,7 @@ define zeroext i1 @or_ne(i32 %a, i32 %b, i32 %c, i32 %d) nounwind { define <4 x i1> @and_eq_vec(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) nounwind { ; CHECK-LABEL: and_eq_vec: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r11, lr} ; CHECK-NEXT: push {r11, lr} ; CHECK-NEXT: vmov d19, r2, r3 diff --git a/test/CodeGen/ARM/tail-merge-branch-weight.ll b/test/CodeGen/ARM/tail-merge-branch-weight.ll index f83f2881579..f03906b6bf5 100644 --- a/test/CodeGen/ARM/tail-merge-branch-weight.ll +++ b/test/CodeGen/ARM/tail-merge-branch-weight.ll @@ -9,9 +9,9 @@ ; = 0.2 * 0.4 + 0.8 * 0.7 = 0.64 ; CHECK: # Machine code for function test0: -; CHECK: Successors according to CFG: BB#{{[0-9]+}}({{[0-9a-fx/= ]+}}20.00%) BB#{{[0-9]+}}({{[0-9a-fx/= ]+}}80.00%) -; CHECK: BB#{{[0-9]+}}: -; CHECK: BB#{{[0-9]+}}: +; CHECK: Successors according to CFG: %bb.{{[0-9]+}}({{[0-9a-fx/= ]+}}20.00%) %bb.{{[0-9]+}}({{[0-9a-fx/= ]+}}80.00%) +; CHECK: %bb.{{[0-9]+}}: +; CHECK: %bb.{{[0-9]+}}: ; CHECK: # End machine code for function test0. define i32 @test0(i32 %n, i32 %m, i32* nocapture %a, i32* nocapture %b) { diff --git a/test/CodeGen/ARM/taildup-branch-weight.ll b/test/CodeGen/ARM/taildup-branch-weight.ll index 6f8d245e74a..5b7ba0ae51b 100644 --- a/test/CodeGen/ARM/taildup-branch-weight.ll +++ b/test/CodeGen/ARM/taildup-branch-weight.ll @@ -3,7 +3,7 @@ ; RUN: | FileCheck %s ; CHECK: Machine code for function test0: -; CHECK: Successors according to CFG: BB#1({{[0-9a-fx/= ]+}}3.12%) BB#2({{[0-9a-fx/= ]+}}96.88%) +; CHECK: Successors according to CFG: %bb.1({{[0-9a-fx/= ]+}}3.12%) %bb.2({{[0-9a-fx/= ]+}}96.88%) define void @test0(i32 %a, i32 %b, i32* %c, i32* %d) { entry: @@ -30,7 +30,7 @@ B4: !0 = !{!"branch_weights", i32 4, i32 124} ; CHECK: Machine code for function test1: -; CHECK: Successors according to CFG: BB#2(0x7c000000 / 0x80000000 = 96.88%) BB#1(0x04000000 / 0x80000000 = 3.12%) +; CHECK: Successors according to CFG: %bb.2(0x7c000000 / 0x80000000 = 96.88%) %bb.1(0x04000000 / 0x80000000 = 3.12%) @g0 = common global i32 0, align 4 diff --git a/test/CodeGen/ARM/v8m.base-jumptable_alignment.ll b/test/CodeGen/ARM/v8m.base-jumptable_alignment.ll index 673e04687a1..73189fe69db 100644 --- a/test/CodeGen/ARM/v8m.base-jumptable_alignment.ll +++ b/test/CodeGen/ARM/v8m.base-jumptable_alignment.ll @@ -30,7 +30,7 @@ for.cond7.preheader.i.us.i.i: ; preds = %for.cond7.preheader unreachable for.cond14.preheader.us.i.i.i: ; preds = %for.inc459.us.i.i.i, %for.cond7.preheader.i.i.preheader.i -; CHECK: @ BB#4 +; CHECK: @ %bb.4 ; CHECK-NEXT: .p2align 2 switch i4 undef, label %func_1.exit.loopexit [ i4 0, label %for.inc459.us.i.i.i diff --git a/test/CodeGen/ARM/vbits.ll b/test/CodeGen/ARM/vbits.ll index 0a7f7698fa8..2997750ccb1 100644 --- a/test/CodeGen/ARM/vbits.ll +++ b/test/CodeGen/ARM/vbits.ll @@ -3,7 +3,7 @@ define <8 x i8> @v_andi8(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: v_andi8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vand d16, d17, d16 @@ -17,7 +17,7 @@ define <8 x i8> @v_andi8(<8 x i8>* %A, <8 x i8>* %B) nounwind { define <4 x i16> @v_andi16(<4 x i16>* %A, <4 x i16>* %B) nounwind { ; CHECK-LABEL: v_andi16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vand d16, d17, d16 @@ -31,7 +31,7 @@ define <4 x i16> @v_andi16(<4 x i16>* %A, <4 x i16>* %B) nounwind { define <2 x i32> @v_andi32(<2 x i32>* %A, <2 x i32>* %B) nounwind { ; CHECK-LABEL: v_andi32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vand d16, d17, d16 @@ -45,7 +45,7 @@ define <2 x i32> @v_andi32(<2 x i32>* %A, <2 x i32>* %B) nounwind { define <1 x i64> @v_andi64(<1 x i64>* %A, <1 x i64>* %B) nounwind { ; CHECK-LABEL: v_andi64: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vand d16, d17, d16 @@ -59,7 +59,7 @@ define <1 x i64> @v_andi64(<1 x i64>* %A, <1 x i64>* %B) nounwind { define <16 x i8> @v_andQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { ; CHECK-LABEL: v_andQi8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vand q8, q9, q8 @@ -74,7 +74,7 @@ define <16 x i8> @v_andQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { define <8 x i16> @v_andQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: v_andQi16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vand q8, q9, q8 @@ -89,7 +89,7 @@ define <8 x i16> @v_andQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { define <4 x i32> @v_andQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { ; CHECK-LABEL: v_andQi32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vand q8, q9, q8 @@ -104,7 +104,7 @@ define <4 x i32> @v_andQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { define <2 x i64> @v_andQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind { ; CHECK-LABEL: v_andQi64: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vand q8, q9, q8 @@ -119,7 +119,7 @@ define <2 x i64> @v_andQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind { define <8 x i8> @v_bici8(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: v_bici8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vbic d16, d17, d16 @@ -134,7 +134,7 @@ define <8 x i8> @v_bici8(<8 x i8>* %A, <8 x i8>* %B) nounwind { define <4 x i16> @v_bici16(<4 x i16>* %A, <4 x i16>* %B) nounwind { ; CHECK-LABEL: v_bici16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vbic d16, d17, d16 @@ -149,7 +149,7 @@ define <4 x i16> @v_bici16(<4 x i16>* %A, <4 x i16>* %B) nounwind { define <2 x i32> @v_bici32(<2 x i32>* %A, <2 x i32>* %B) nounwind { ; CHECK-LABEL: v_bici32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vbic d16, d17, d16 @@ -164,7 +164,7 @@ define <2 x i32> @v_bici32(<2 x i32>* %A, <2 x i32>* %B) nounwind { define <1 x i64> @v_bici64(<1 x i64>* %A, <1 x i64>* %B) nounwind { ; CHECK-LABEL: v_bici64: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vbic d16, d17, d16 @@ -179,7 +179,7 @@ define <1 x i64> @v_bici64(<1 x i64>* %A, <1 x i64>* %B) nounwind { define <16 x i8> @v_bicQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { ; CHECK-LABEL: v_bicQi8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vbic q8, q9, q8 @@ -195,7 +195,7 @@ define <16 x i8> @v_bicQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { define <8 x i16> @v_bicQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: v_bicQi16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vbic q8, q9, q8 @@ -211,7 +211,7 @@ define <8 x i16> @v_bicQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { define <4 x i32> @v_bicQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { ; CHECK-LABEL: v_bicQi32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vbic q8, q9, q8 @@ -227,7 +227,7 @@ define <4 x i32> @v_bicQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { define <2 x i64> @v_bicQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind { ; CHECK-LABEL: v_bicQi64: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vbic q8, q9, q8 @@ -243,7 +243,7 @@ define <2 x i64> @v_bicQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind { define <8 x i8> @v_eori8(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: v_eori8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: veor d16, d17, d16 @@ -257,7 +257,7 @@ define <8 x i8> @v_eori8(<8 x i8>* %A, <8 x i8>* %B) nounwind { define <4 x i16> @v_eori16(<4 x i16>* %A, <4 x i16>* %B) nounwind { ; CHECK-LABEL: v_eori16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: veor d16, d17, d16 @@ -271,7 +271,7 @@ define <4 x i16> @v_eori16(<4 x i16>* %A, <4 x i16>* %B) nounwind { define <2 x i32> @v_eori32(<2 x i32>* %A, <2 x i32>* %B) nounwind { ; CHECK-LABEL: v_eori32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: veor d16, d17, d16 @@ -285,7 +285,7 @@ define <2 x i32> @v_eori32(<2 x i32>* %A, <2 x i32>* %B) nounwind { define <1 x i64> @v_eori64(<1 x i64>* %A, <1 x i64>* %B) nounwind { ; CHECK-LABEL: v_eori64: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: veor d16, d17, d16 @@ -299,7 +299,7 @@ define <1 x i64> @v_eori64(<1 x i64>* %A, <1 x i64>* %B) nounwind { define <16 x i8> @v_eorQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { ; CHECK-LABEL: v_eorQi8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: veor q8, q9, q8 @@ -314,7 +314,7 @@ define <16 x i8> @v_eorQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { define <8 x i16> @v_eorQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: v_eorQi16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: veor q8, q9, q8 @@ -329,7 +329,7 @@ define <8 x i16> @v_eorQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { define <4 x i32> @v_eorQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { ; CHECK-LABEL: v_eorQi32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: veor q8, q9, q8 @@ -344,7 +344,7 @@ define <4 x i32> @v_eorQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { define <2 x i64> @v_eorQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind { ; CHECK-LABEL: v_eorQi64: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: veor q8, q9, q8 @@ -359,7 +359,7 @@ define <2 x i64> @v_eorQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind { define <8 x i8> @v_mvni8(<8 x i8>* %A) nounwind { ; CHECK-LABEL: v_mvni8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vmvn d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -371,7 +371,7 @@ define <8 x i8> @v_mvni8(<8 x i8>* %A) nounwind { define <4 x i16> @v_mvni16(<4 x i16>* %A) nounwind { ; CHECK-LABEL: v_mvni16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vmvn d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -383,7 +383,7 @@ define <4 x i16> @v_mvni16(<4 x i16>* %A) nounwind { define <2 x i32> @v_mvni32(<2 x i32>* %A) nounwind { ; CHECK-LABEL: v_mvni32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vmvn d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -395,7 +395,7 @@ define <2 x i32> @v_mvni32(<2 x i32>* %A) nounwind { define <1 x i64> @v_mvni64(<1 x i64>* %A) nounwind { ; CHECK-LABEL: v_mvni64: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vmvn d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -407,7 +407,7 @@ define <1 x i64> @v_mvni64(<1 x i64>* %A) nounwind { define <16 x i8> @v_mvnQi8(<16 x i8>* %A) nounwind { ; CHECK-LABEL: v_mvnQi8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vmvn q8, q8 ; CHECK-NEXT: vmov r0, r1, d16 @@ -420,7 +420,7 @@ define <16 x i8> @v_mvnQi8(<16 x i8>* %A) nounwind { define <8 x i16> @v_mvnQi16(<8 x i16>* %A) nounwind { ; CHECK-LABEL: v_mvnQi16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vmvn q8, q8 ; CHECK-NEXT: vmov r0, r1, d16 @@ -433,7 +433,7 @@ define <8 x i16> @v_mvnQi16(<8 x i16>* %A) nounwind { define <4 x i32> @v_mvnQi32(<4 x i32>* %A) nounwind { ; CHECK-LABEL: v_mvnQi32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vmvn q8, q8 ; CHECK-NEXT: vmov r0, r1, d16 @@ -446,7 +446,7 @@ define <4 x i32> @v_mvnQi32(<4 x i32>* %A) nounwind { define <2 x i64> @v_mvnQi64(<2 x i64>* %A) nounwind { ; CHECK-LABEL: v_mvnQi64: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vmvn q8, q8 ; CHECK-NEXT: vmov r0, r1, d16 @@ -459,7 +459,7 @@ define <2 x i64> @v_mvnQi64(<2 x i64>* %A) nounwind { define <8 x i8> @v_orri8(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: v_orri8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vorr d16, d17, d16 @@ -473,7 +473,7 @@ define <8 x i8> @v_orri8(<8 x i8>* %A, <8 x i8>* %B) nounwind { define <4 x i16> @v_orri16(<4 x i16>* %A, <4 x i16>* %B) nounwind { ; CHECK-LABEL: v_orri16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vorr d16, d17, d16 @@ -487,7 +487,7 @@ define <4 x i16> @v_orri16(<4 x i16>* %A, <4 x i16>* %B) nounwind { define <2 x i32> @v_orri32(<2 x i32>* %A, <2 x i32>* %B) nounwind { ; CHECK-LABEL: v_orri32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vorr d16, d17, d16 @@ -501,7 +501,7 @@ define <2 x i32> @v_orri32(<2 x i32>* %A, <2 x i32>* %B) nounwind { define <1 x i64> @v_orri64(<1 x i64>* %A, <1 x i64>* %B) nounwind { ; CHECK-LABEL: v_orri64: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vorr d16, d17, d16 @@ -515,7 +515,7 @@ define <1 x i64> @v_orri64(<1 x i64>* %A, <1 x i64>* %B) nounwind { define <16 x i8> @v_orrQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { ; CHECK-LABEL: v_orrQi8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vorr q8, q9, q8 @@ -530,7 +530,7 @@ define <16 x i8> @v_orrQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { define <8 x i16> @v_orrQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: v_orrQi16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vorr q8, q9, q8 @@ -545,7 +545,7 @@ define <8 x i16> @v_orrQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { define <4 x i32> @v_orrQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { ; CHECK-LABEL: v_orrQi32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vorr q8, q9, q8 @@ -560,7 +560,7 @@ define <4 x i32> @v_orrQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { define <2 x i64> @v_orrQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind { ; CHECK-LABEL: v_orrQi64: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vorr q8, q9, q8 @@ -575,7 +575,7 @@ define <2 x i64> @v_orrQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind { define <8 x i8> @v_orni8(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: v_orni8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vorn d16, d17, d16 @@ -590,7 +590,7 @@ define <8 x i8> @v_orni8(<8 x i8>* %A, <8 x i8>* %B) nounwind { define <4 x i16> @v_orni16(<4 x i16>* %A, <4 x i16>* %B) nounwind { ; CHECK-LABEL: v_orni16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vorn d16, d17, d16 @@ -605,7 +605,7 @@ define <4 x i16> @v_orni16(<4 x i16>* %A, <4 x i16>* %B) nounwind { define <2 x i32> @v_orni32(<2 x i32>* %A, <2 x i32>* %B) nounwind { ; CHECK-LABEL: v_orni32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vorn d16, d17, d16 @@ -620,7 +620,7 @@ define <2 x i32> @v_orni32(<2 x i32>* %A, <2 x i32>* %B) nounwind { define <1 x i64> @v_orni64(<1 x i64>* %A, <1 x i64>* %B) nounwind { ; CHECK-LABEL: v_orni64: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vorn d16, d17, d16 @@ -635,7 +635,7 @@ define <1 x i64> @v_orni64(<1 x i64>* %A, <1 x i64>* %B) nounwind { define <16 x i8> @v_ornQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { ; CHECK-LABEL: v_ornQi8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vorn q8, q9, q8 @@ -651,7 +651,7 @@ define <16 x i8> @v_ornQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { define <8 x i16> @v_ornQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: v_ornQi16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vorn q8, q9, q8 @@ -667,7 +667,7 @@ define <8 x i16> @v_ornQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { define <4 x i32> @v_ornQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { ; CHECK-LABEL: v_ornQi32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vorn q8, q9, q8 @@ -683,7 +683,7 @@ define <4 x i32> @v_ornQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { define <2 x i64> @v_ornQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind { ; CHECK-LABEL: v_ornQi64: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vorn q8, q9, q8 @@ -699,7 +699,7 @@ define <2 x i64> @v_ornQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind { define <8 x i8> @vtsti8(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: vtsti8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vtst.8 d16, d17, d16 @@ -715,7 +715,7 @@ define <8 x i8> @vtsti8(<8 x i8>* %A, <8 x i8>* %B) nounwind { define <4 x i16> @vtsti16(<4 x i16>* %A, <4 x i16>* %B) nounwind { ; CHECK-LABEL: vtsti16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vtst.16 d16, d17, d16 @@ -731,7 +731,7 @@ define <4 x i16> @vtsti16(<4 x i16>* %A, <4 x i16>* %B) nounwind { define <2 x i32> @vtsti32(<2 x i32>* %A, <2 x i32>* %B) nounwind { ; CHECK-LABEL: vtsti32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vtst.32 d16, d17, d16 @@ -747,7 +747,7 @@ define <2 x i32> @vtsti32(<2 x i32>* %A, <2 x i32>* %B) nounwind { define <16 x i8> @vtstQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { ; CHECK-LABEL: vtstQi8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vtst.8 q8, q9, q8 @@ -764,7 +764,7 @@ define <16 x i8> @vtstQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { define <8 x i16> @vtstQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: vtstQi16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vtst.16 q8, q9, q8 @@ -781,7 +781,7 @@ define <8 x i16> @vtstQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { define <4 x i32> @vtstQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { ; CHECK-LABEL: vtstQi32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vtst.32 q8, q9, q8 @@ -798,7 +798,7 @@ define <4 x i32> @vtstQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { define <8 x i8> @v_orrimm(<8 x i8>* %A) nounwind { ; CHECK-LABEL: v_orrimm: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vorr.i32 d16, #0x1000000 ; CHECK-NEXT: vmov r0, r1, d16 @@ -810,7 +810,7 @@ define <8 x i8> @v_orrimm(<8 x i8>* %A) nounwind { define <16 x i8> @v_orrimmQ(<16 x i8>* %A) nounwind { ; CHECK-LABEL: v_orrimmQ: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vorr.i32 q8, #0x1000000 ; CHECK-NEXT: vmov r0, r1, d16 @@ -823,7 +823,7 @@ define <16 x i8> @v_orrimmQ(<16 x i8>* %A) nounwind { define <8 x i8> @v_bicimm(<8 x i8>* %A) nounwind { ; CHECK-LABEL: v_bicimm: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vbic.i32 d16, #0xff000000 ; CHECK-NEXT: vmov r0, r1, d16 @@ -835,7 +835,7 @@ define <8 x i8> @v_bicimm(<8 x i8>* %A) nounwind { define <16 x i8> @v_bicimmQ(<16 x i8>* %A) nounwind { ; CHECK-LABEL: v_bicimmQ: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vbic.i32 q8, #0xff000000 ; CHECK-NEXT: vmov r0, r1, d16 @@ -848,7 +848,7 @@ define <16 x i8> @v_bicimmQ(<16 x i8>* %A) nounwind { define <4 x i32> @hidden_not_v4i32(<4 x i32> %x) nounwind { ; CHECK-LABEL: hidden_not_v4i32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d19, r2, r3 ; CHECK-NEXT: vmov.i32 q8, #0x6 ; CHECK-NEXT: vmov d18, r0, r1 diff --git a/test/CodeGen/ARM/vcvt.ll b/test/CodeGen/ARM/vcvt.ll index 5f470d60707..7052607bf80 100644 --- a/test/CodeGen/ARM/vcvt.ll +++ b/test/CodeGen/ARM/vcvt.ll @@ -3,7 +3,7 @@ define <2 x i32> @vcvt_f32tos32(<2 x float>* %A) nounwind { ; CHECK-LABEL: vcvt_f32tos32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vcvt.s32.f32 d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -15,7 +15,7 @@ define <2 x i32> @vcvt_f32tos32(<2 x float>* %A) nounwind { define <2 x i32> @vcvt_f32tou32(<2 x float>* %A) nounwind { ; CHECK-LABEL: vcvt_f32tou32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vcvt.u32.f32 d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -27,7 +27,7 @@ define <2 x i32> @vcvt_f32tou32(<2 x float>* %A) nounwind { define <2 x float> @vcvt_s32tof32(<2 x i32>* %A) nounwind { ; CHECK-LABEL: vcvt_s32tof32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vcvt.f32.s32 d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -39,7 +39,7 @@ define <2 x float> @vcvt_s32tof32(<2 x i32>* %A) nounwind { define <2 x float> @vcvt_u32tof32(<2 x i32>* %A) nounwind { ; CHECK-LABEL: vcvt_u32tof32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vcvt.f32.u32 d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -51,7 +51,7 @@ define <2 x float> @vcvt_u32tof32(<2 x i32>* %A) nounwind { define <4 x i32> @vcvtQ_f32tos32(<4 x float>* %A) nounwind { ; CHECK-LABEL: vcvtQ_f32tos32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vcvt.s32.f32 q8, q8 ; CHECK-NEXT: vmov r0, r1, d16 @@ -64,7 +64,7 @@ define <4 x i32> @vcvtQ_f32tos32(<4 x float>* %A) nounwind { define <4 x i32> @vcvtQ_f32tou32(<4 x float>* %A) nounwind { ; CHECK-LABEL: vcvtQ_f32tou32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vcvt.u32.f32 q8, q8 ; CHECK-NEXT: vmov r0, r1, d16 @@ -77,7 +77,7 @@ define <4 x i32> @vcvtQ_f32tou32(<4 x float>* %A) nounwind { define <4 x float> @vcvtQ_s32tof32(<4 x i32>* %A) nounwind { ; CHECK-LABEL: vcvtQ_s32tof32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vcvt.f32.s32 q8, q8 ; CHECK-NEXT: vmov r0, r1, d16 @@ -90,7 +90,7 @@ define <4 x float> @vcvtQ_s32tof32(<4 x i32>* %A) nounwind { define <4 x float> @vcvtQ_u32tof32(<4 x i32>* %A) nounwind { ; CHECK-LABEL: vcvtQ_u32tof32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vcvt.f32.u32 q8, q8 ; CHECK-NEXT: vmov r0, r1, d16 @@ -103,7 +103,7 @@ define <4 x float> @vcvtQ_u32tof32(<4 x i32>* %A) nounwind { define <2 x i32> @vcvt_n_f32tos32(<2 x float>* %A) nounwind { ; CHECK-LABEL: vcvt_n_f32tos32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vcvt.s32.f32 d16, d16, #1 ; CHECK-NEXT: vmov r0, r1, d16 @@ -115,7 +115,7 @@ define <2 x i32> @vcvt_n_f32tos32(<2 x float>* %A) nounwind { define <2 x i32> @vcvt_n_f32tou32(<2 x float>* %A) nounwind { ; CHECK-LABEL: vcvt_n_f32tou32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vcvt.u32.f32 d16, d16, #1 ; CHECK-NEXT: vmov r0, r1, d16 @@ -127,7 +127,7 @@ define <2 x i32> @vcvt_n_f32tou32(<2 x float>* %A) nounwind { define <2 x float> @vcvt_n_s32tof32(<2 x i32>* %A) nounwind { ; CHECK-LABEL: vcvt_n_s32tof32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vcvt.f32.s32 d16, d16, #1 ; CHECK-NEXT: vmov r0, r1, d16 @@ -139,7 +139,7 @@ define <2 x float> @vcvt_n_s32tof32(<2 x i32>* %A) nounwind { define <2 x float> @vcvt_n_u32tof32(<2 x i32>* %A) nounwind { ; CHECK-LABEL: vcvt_n_u32tof32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vcvt.f32.u32 d16, d16, #1 ; CHECK-NEXT: vmov r0, r1, d16 @@ -156,7 +156,7 @@ declare <2 x float> @llvm.arm.neon.vcvtfxu2fp.v2f32.v2i32(<2 x i32>, i32) nounwi define <4 x i32> @vcvtQ_n_f32tos32(<4 x float>* %A) nounwind { ; CHECK-LABEL: vcvtQ_n_f32tos32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vcvt.s32.f32 q8, q8, #1 ; CHECK-NEXT: vmov r0, r1, d16 @@ -169,7 +169,7 @@ define <4 x i32> @vcvtQ_n_f32tos32(<4 x float>* %A) nounwind { define <4 x i32> @vcvtQ_n_f32tou32(<4 x float>* %A) nounwind { ; CHECK-LABEL: vcvtQ_n_f32tou32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vcvt.u32.f32 q8, q8, #1 ; CHECK-NEXT: vmov r0, r1, d16 @@ -182,7 +182,7 @@ define <4 x i32> @vcvtQ_n_f32tou32(<4 x float>* %A) nounwind { define <4 x float> @vcvtQ_n_s32tof32(<4 x i32>* %A) nounwind { ; CHECK-LABEL: vcvtQ_n_s32tof32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vcvt.f32.s32 q8, q8, #1 ; CHECK-NEXT: vmov r0, r1, d16 @@ -195,7 +195,7 @@ define <4 x float> @vcvtQ_n_s32tof32(<4 x i32>* %A) nounwind { define <4 x float> @vcvtQ_n_u32tof32(<4 x i32>* %A) nounwind { ; CHECK-LABEL: vcvtQ_n_u32tof32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vcvt.f32.u32 q8, q8, #1 ; CHECK-NEXT: vmov r0, r1, d16 @@ -213,7 +213,7 @@ declare <4 x float> @llvm.arm.neon.vcvtfxu2fp.v4f32.v4i32(<4 x i32>, i32) nounwi define <4 x float> @vcvt_f16tof32(<4 x i16>* %A) nounwind { ; CHECK-LABEL: vcvt_f16tof32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vcvt.f32.f16 q8, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -226,7 +226,7 @@ define <4 x float> @vcvt_f16tof32(<4 x i16>* %A) nounwind { define <4 x i16> @vcvt_f32tof16(<4 x float>* %A) nounwind { ; CHECK-LABEL: vcvt_f32tof16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vcvt.f16.f32 d16, q8 ; CHECK-NEXT: vmov r0, r1, d16 @@ -242,7 +242,7 @@ declare <4 x i16> @llvm.arm.neon.vcvtfp2hf(<4 x float>) nounwind readnone define <4 x i16> @fix_float_to_i16(<4 x float> %in) { ; CHECK-LABEL: fix_float_to_i16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d17, r2, r3 ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vcvt.u32.f32 q8, q8, #1 @@ -257,7 +257,7 @@ define <4 x i16> @fix_float_to_i16(<4 x float> %in) { define <2 x i64> @fix_float_to_i64(<2 x float> %in) { ; CHECK-LABEL: fix_float_to_i64: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, lr} ; CHECK-NEXT: push {r4, lr} ; CHECK-NEXT: .vsave {d8, d9} @@ -287,7 +287,7 @@ define <2 x i64> @fix_float_to_i64(<2 x float> %in) { define <4 x i16> @fix_double_to_i16(<4 x double> %in) { ; CHECK-LABEL: fix_double_to_i16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d18, r0, r1 ; CHECK-NEXT: mov r12, sp ; CHECK-NEXT: vld1.64 {d16, d17}, [r12] @@ -319,7 +319,7 @@ define <4 x i16> @fix_double_to_i16(<4 x double> %in) { define <2 x i64> @fix_double_to_i64(<2 x double> %in) { ; CHECK-LABEL: fix_double_to_i64: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, lr} ; CHECK-NEXT: push {r4, lr} ; CHECK-NEXT: .vsave {d8, d9} @@ -352,7 +352,7 @@ define <2 x i64> @fix_double_to_i64(<2 x double> %in) { define i32 @multi_sint(double %c, i32* nocapture %p, i32* nocapture %q) { ; CHECK-LABEL: multi_sint: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vcvt.s32.f64 s0, d16 ; CHECK-NEXT: vstr s0, [r2] @@ -369,7 +369,7 @@ define i32 @multi_sint(double %c, i32* nocapture %p, i32* nocapture %q) { define i32 @multi_uint(double %c, i32* nocapture %p, i32* nocapture %q) { ; CHECK-LABEL: multi_uint: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vcvt.u32.f64 s0, d16 ; CHECK-NEXT: vstr s0, [r2] @@ -386,7 +386,7 @@ define i32 @multi_uint(double %c, i32* nocapture %p, i32* nocapture %q) { define void @double_to_sint_store(double %c, i32* nocapture %p) { ; CHECK-LABEL: double_to_sint_store: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vcvt.s32.f64 s0, d16 ; CHECK-NEXT: vstr s0, [r2] @@ -398,7 +398,7 @@ define void @double_to_sint_store(double %c, i32* nocapture %p) { define void @double_to_uint_store(double %c, i32* nocapture %p) { ; CHECK-LABEL: double_to_uint_store: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vcvt.u32.f64 s0, d16 ; CHECK-NEXT: vstr s0, [r2] @@ -410,7 +410,7 @@ define void @double_to_uint_store(double %c, i32* nocapture %p) { define void @float_to_sint_store(float %c, i32* nocapture %p) { ; CHECK-LABEL: float_to_sint_store: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov s0, r0 ; CHECK-NEXT: vcvt.s32.f32 s0, s0 ; CHECK-NEXT: vstr s0, [r1] @@ -422,7 +422,7 @@ define void @float_to_sint_store(float %c, i32* nocapture %p) { define void @float_to_uint_store(float %c, i32* nocapture %p) { ; CHECK-LABEL: float_to_uint_store: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov s0, r0 ; CHECK-NEXT: vcvt.u32.f32 s0, s0 ; CHECK-NEXT: vstr s0, [r1] diff --git a/test/CodeGen/ARM/vext.ll b/test/CodeGen/ARM/vext.ll index 5b524145be7..397680c5b0c 100644 --- a/test/CodeGen/ARM/vext.ll +++ b/test/CodeGen/ARM/vext.ll @@ -3,7 +3,7 @@ define <8 x i8> @test_vextd(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: test_vextd: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vext.8 d16, d17, d16, #3 @@ -17,7 +17,7 @@ define <8 x i8> @test_vextd(<8 x i8>* %A, <8 x i8>* %B) nounwind { define <8 x i8> @test_vextRd(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: test_vextRd: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vldr d17, [r1] ; CHECK-NEXT: vext.8 d16, d17, d16, #5 @@ -31,7 +31,7 @@ define <8 x i8> @test_vextRd(<8 x i8>* %A, <8 x i8>* %B) nounwind { define <16 x i8> @test_vextq(<16 x i8>* %A, <16 x i8>* %B) nounwind { ; CHECK-LABEL: test_vextq: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vext.8 q8, q9, q8, #3 @@ -46,7 +46,7 @@ define <16 x i8> @test_vextq(<16 x i8>* %A, <16 x i8>* %B) nounwind { define <16 x i8> @test_vextRq(<16 x i8>* %A, <16 x i8>* %B) nounwind { ; CHECK-LABEL: test_vextRq: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vld1.64 {d18, d19}, [r1] ; CHECK-NEXT: vext.8 q8, q9, q8, #7 @@ -61,7 +61,7 @@ define <16 x i8> @test_vextRq(<16 x i8>* %A, <16 x i8>* %B) nounwind { define <4 x i16> @test_vextd16(<4 x i16>* %A, <4 x i16>* %B) nounwind { ; CHECK-LABEL: test_vextd16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vext.16 d16, d17, d16, #3 @@ -75,7 +75,7 @@ define <4 x i16> @test_vextd16(<4 x i16>* %A, <4 x i16>* %B) nounwind { define <4 x i32> @test_vextq32(<4 x i32>* %A, <4 x i32>* %B) nounwind { ; CHECK-LABEL: test_vextq32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vext.32 q8, q9, q8, #3 @@ -92,7 +92,7 @@ define <4 x i32> @test_vextq32(<4 x i32>* %A, <4 x i32>* %B) nounwind { define <8 x i8> @test_vextd_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: test_vextd_undef: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vext.8 d16, d17, d16, #3 @@ -106,7 +106,7 @@ define <8 x i8> @test_vextd_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind { define <16 x i8> @test_vextRq_undef(<16 x i8>* %A, <16 x i8>* %B) nounwind { ; CHECK-LABEL: test_vextRq_undef: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vld1.64 {d18, d19}, [r1] ; CHECK-NEXT: vext.8 q8, q9, q8, #7 @@ -121,7 +121,7 @@ define <16 x i8> @test_vextRq_undef(<16 x i8>* %A, <16 x i8>* %B) nounwind { define <16 x i8> @test_vextq_undef_op2(<16 x i8> %a) nounwind { ; CHECK-LABEL: test_vextq_undef_op2: -; CHECK: @ BB#0: @ %entry +; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmov d17, r2, r3 ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vext.8 q8, q8, q8, #2 @@ -135,7 +135,7 @@ entry: define <8 x i8> @test_vextd_undef_op2(<8 x i8> %a) nounwind { ; CHECK-LABEL: test_vextd_undef_op2: -; CHECK: @ BB#0: @ %entry +; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vext.8 d16, d16, d16, #2 ; CHECK-NEXT: vmov r0, r1, d16 @@ -148,7 +148,7 @@ entry: define <16 x i8> @test_vextq_undef_op2_undef(<16 x i8> %a) nounwind { ; CHECK-LABEL: test_vextq_undef_op2_undef: -; CHECK: @ BB#0: @ %entry +; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmov d17, r2, r3 ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vext.8 q8, q8, q8, #2 @@ -162,7 +162,7 @@ entry: define <8 x i8> @test_vextd_undef_op2_undef(<8 x i8> %a) nounwind { ; CHECK-LABEL: test_vextd_undef_op2_undef: -; CHECK: @ BB#0: @ %entry +; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vext.8 d16, d16, d16, #2 ; CHECK-NEXT: vmov r0, r1, d16 @@ -180,7 +180,7 @@ entry: ; Essence: a vext is used on %A and something saner than stack load/store for final result. define <4 x i16> @test_interleaved(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: test_interleaved: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vext.16 d16, d16, d17, #3 ; CHECK-NEXT: vorr d17, d16, d16 @@ -198,7 +198,7 @@ define <4 x i16> @test_interleaved(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; An undef in the shuffle list should still be optimizable define <4 x i16> @test_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: test_undef: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0, #8] ; CHECK-NEXT: vzip.16 d17, d16 @@ -215,7 +215,7 @@ define <4 x i16> @test_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; Try to look for fallback to by-element inserts. define <4 x i16> @test_multisource(<32 x i16>* %B) nounwind { ; CHECK-LABEL: test_multisource: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mov r1, r0 ; CHECK-NEXT: add r2, r0, #48 ; CHECK-NEXT: add r0, r0, #32 @@ -240,7 +240,7 @@ define <4 x i16> @test_multisource(<32 x i16>* %B) nounwind { ; Again, test for fallback to by-element inserts. define <4 x i16> @test_largespan(<8 x i16>* %B) nounwind { ; CHECK-LABEL: test_largespan: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vorr d18, d16, d16 ; CHECK-NEXT: vuzp.16 d18, d17 @@ -258,7 +258,7 @@ define <4 x i16> @test_largespan(<8 x i16>* %B) nounwind { ; really important.) define <8 x i16> @test_illegal(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: test_illegal: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vorr d22, d16, d16 ; CHECK-NEXT: vmov.u16 r0, d16[0] @@ -287,7 +287,7 @@ define <8 x i16> @test_illegal(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; Make sure this doesn't crash define arm_aapcscc void @test_elem_mismatch(<2 x i64>* nocapture %src, <4 x i16>* nocapture %dest) nounwind { ; CHECK-LABEL: test_elem_mismatch: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0:128] ; CHECK-NEXT: vmov.32 r0, d16[0] ; CHECK-NEXT: vmov.32 r2, d17[0] @@ -309,7 +309,7 @@ define arm_aapcscc void @test_elem_mismatch(<2 x i64>* nocapture %src, <4 x i16> define <4 x i32> @test_reverse_and_extract(<2 x i32>* %A) { ; CHECK-LABEL: test_reverse_and_extract: -; CHECK: @ BB#0: @ %entry +; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vrev64.32 q9, q8 ; CHECK-NEXT: vext.32 q8, q8, q9, #2 @@ -324,7 +324,7 @@ entry: define <4 x i32> @test_dup_and_extract(<2 x i32>* %A) { ; CHECK-LABEL: test_dup_and_extract: -; CHECK: @ BB#0: @ %entry +; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vdup.32 q9, d16[0] ; CHECK-NEXT: vext.32 q8, q9, q8, #2 @@ -339,7 +339,7 @@ entry: define <4 x i32> @test_zip_and_extract(<2 x i32>* %A) { ; CHECK-LABEL: test_zip_and_extract: -; CHECK: @ BB#0: @ %entry +; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vorr q9, q8, q8 ; CHECK-NEXT: vorr q10, q8, q8 diff --git a/test/CodeGen/ARM/vpadd.ll b/test/CodeGen/ARM/vpadd.ll index 3fa93bb43f0..731bc373aaa 100644 --- a/test/CodeGen/ARM/vpadd.ll +++ b/test/CodeGen/ARM/vpadd.ll @@ -3,7 +3,7 @@ define <8 x i8> @vpaddi8(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: vpaddi8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vpadd.i8 d16, d17, d16 @@ -17,7 +17,7 @@ define <8 x i8> @vpaddi8(<8 x i8>* %A, <8 x i8>* %B) nounwind { define <4 x i16> @vpaddi16(<4 x i16>* %A, <4 x i16>* %B) nounwind { ; CHECK-LABEL: vpaddi16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vpadd.i16 d16, d17, d16 @@ -31,7 +31,7 @@ define <4 x i16> @vpaddi16(<4 x i16>* %A, <4 x i16>* %B) nounwind { define <2 x i32> @vpaddi32(<2 x i32>* %A, <2 x i32>* %B) nounwind { ; CHECK-LABEL: vpaddi32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vpadd.i32 d16, d17, d16 @@ -45,7 +45,7 @@ define <2 x i32> @vpaddi32(<2 x i32>* %A, <2 x i32>* %B) nounwind { define <2 x float> @vpaddf32(<2 x float>* %A, <2 x float>* %B) nounwind { ; CHECK-LABEL: vpaddf32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vpadd.f32 d16, d17, d16 @@ -65,7 +65,7 @@ declare <2 x float> @llvm.arm.neon.vpadd.v2f32(<2 x float>, <2 x float>) nounwin define <4 x i16> @vpaddls8(<8 x i8>* %A) nounwind { ; CHECK-LABEL: vpaddls8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vpaddl.s8 d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -77,7 +77,7 @@ define <4 x i16> @vpaddls8(<8 x i8>* %A) nounwind { define <2 x i32> @vpaddls16(<4 x i16>* %A) nounwind { ; CHECK-LABEL: vpaddls16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vpaddl.s16 d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -89,7 +89,7 @@ define <2 x i32> @vpaddls16(<4 x i16>* %A) nounwind { define <1 x i64> @vpaddls32(<2 x i32>* %A) nounwind { ; CHECK-LABEL: vpaddls32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vpaddl.s32 d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -101,7 +101,7 @@ define <1 x i64> @vpaddls32(<2 x i32>* %A) nounwind { define <4 x i16> @vpaddlu8(<8 x i8>* %A) nounwind { ; CHECK-LABEL: vpaddlu8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vpaddl.u8 d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -113,7 +113,7 @@ define <4 x i16> @vpaddlu8(<8 x i8>* %A) nounwind { define <2 x i32> @vpaddlu16(<4 x i16>* %A) nounwind { ; CHECK-LABEL: vpaddlu16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vpaddl.u16 d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -125,7 +125,7 @@ define <2 x i32> @vpaddlu16(<4 x i16>* %A) nounwind { define <1 x i64> @vpaddlu32(<2 x i32>* %A) nounwind { ; CHECK-LABEL: vpaddlu32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vpaddl.u32 d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -137,7 +137,7 @@ define <1 x i64> @vpaddlu32(<2 x i32>* %A) nounwind { define <8 x i16> @vpaddlQs8(<16 x i8>* %A) nounwind { ; CHECK-LABEL: vpaddlQs8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vpaddl.s8 q8, q8 ; CHECK-NEXT: vmov r0, r1, d16 @@ -150,7 +150,7 @@ define <8 x i16> @vpaddlQs8(<16 x i8>* %A) nounwind { define <4 x i32> @vpaddlQs16(<8 x i16>* %A) nounwind { ; CHECK-LABEL: vpaddlQs16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vpaddl.s16 q8, q8 ; CHECK-NEXT: vmov r0, r1, d16 @@ -163,7 +163,7 @@ define <4 x i32> @vpaddlQs16(<8 x i16>* %A) nounwind { define <2 x i64> @vpaddlQs32(<4 x i32>* %A) nounwind { ; CHECK-LABEL: vpaddlQs32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vpaddl.s32 q8, q8 ; CHECK-NEXT: vmov r0, r1, d16 @@ -176,7 +176,7 @@ define <2 x i64> @vpaddlQs32(<4 x i32>* %A) nounwind { define <8 x i16> @vpaddlQu8(<16 x i8>* %A) nounwind { ; CHECK-LABEL: vpaddlQu8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vpaddl.u8 q8, q8 ; CHECK-NEXT: vmov r0, r1, d16 @@ -189,7 +189,7 @@ define <8 x i16> @vpaddlQu8(<16 x i8>* %A) nounwind { define <4 x i32> @vpaddlQu16(<8 x i16>* %A) nounwind { ; CHECK-LABEL: vpaddlQu16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vpaddl.u16 q8, q8 ; CHECK-NEXT: vmov r0, r1, d16 @@ -202,7 +202,7 @@ define <4 x i32> @vpaddlQu16(<8 x i16>* %A) nounwind { define <2 x i64> @vpaddlQu32(<4 x i32>* %A) nounwind { ; CHECK-LABEL: vpaddlQu32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vpaddl.u32 q8, q8 ; CHECK-NEXT: vmov r0, r1, d16 @@ -216,7 +216,7 @@ define <2 x i64> @vpaddlQu32(<4 x i32>* %A) nounwind { ; Combine vuzp+vadd->vpadd. define void @addCombineToVPADD_i8(<16 x i8> *%cbcr, <8 x i8> *%X) nounwind ssp { ; CHECK-LABEL: addCombineToVPADD_i8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vpadd.i8 d16, d16, d17 ; CHECK-NEXT: vstr d16, [r1] @@ -233,7 +233,7 @@ define void @addCombineToVPADD_i8(<16 x i8> *%cbcr, <8 x i8> *%X) nounwind ssp { ; Combine vuzp+vadd->vpadd. define void @addCombineToVPADD_i16(<8 x i16> *%cbcr, <4 x i16> *%X) nounwind ssp { ; CHECK-LABEL: addCombineToVPADD_i16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vpadd.i16 d16, d16, d17 ; CHECK-NEXT: vstr d16, [r1] @@ -249,7 +249,7 @@ define void @addCombineToVPADD_i16(<8 x i16> *%cbcr, <4 x i16> *%X) nounwind ssp ; Combine vtrn+vadd->vpadd. define void @addCombineToVPADD_i32(<4 x i32> *%cbcr, <2 x i32> *%X) nounwind ssp { ; CHECK-LABEL: addCombineToVPADD_i32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vpadd.i32 d16, d16, d17 ; CHECK-NEXT: vstr d16, [r1] @@ -265,7 +265,7 @@ define void @addCombineToVPADD_i32(<4 x i32> *%cbcr, <2 x i32> *%X) nounwind ssp ; Combine vuzp+vaddl->vpaddl define void @addCombineToVPADDLq_s8(<16 x i8> *%cbcr, <8 x i16> *%X) nounwind ssp { ; CHECK-LABEL: addCombineToVPADDLq_s8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vpaddl.s8 q8, q8 ; CHECK-NEXT: vst1.64 {d16, d17}, [r1] @@ -284,7 +284,7 @@ define void @addCombineToVPADDLq_s8(<16 x i8> *%cbcr, <8 x i16> *%X) nounwind ss ; FIXME: Legalization butchers the shuffles. define void @addCombineToVPADDL_s8(<16 x i8> *%cbcr, <4 x i16> *%X) nounwind ssp { ; CHECK-LABEL: addCombineToVPADDL_s8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov.i16 d16, #0x8 ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vext.8 d17, d18, d16, #1 @@ -309,7 +309,7 @@ define void @addCombineToVPADDL_s8(<16 x i8> *%cbcr, <4 x i16> *%X) nounwind ssp ; Combine vuzp+vaddl->vpaddl define void @addCombineToVPADDLq_u8(<16 x i8> *%cbcr, <8 x i16> *%X) nounwind ssp { ; CHECK-LABEL: addCombineToVPADDLq_u8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vpaddl.u8 q8, q8 ; CHECK-NEXT: vst1.64 {d16, d17}, [r1] @@ -328,7 +328,7 @@ define void @addCombineToVPADDLq_u8(<16 x i8> *%cbcr, <8 x i16> *%X) nounwind ss ; shuffle is awkward, so this doesn't match at the moment. define void @addCombineToVPADDLq_u8_early_zext(<16 x i8> *%cbcr, <8 x i16> *%X) nounwind ssp { ; CHECK-LABEL: addCombineToVPADDLq_u8_early_zext: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vmovl.u8 q9, d17 ; CHECK-NEXT: vmovl.u8 q8, d16 @@ -349,7 +349,7 @@ define void @addCombineToVPADDLq_u8_early_zext(<16 x i8> *%cbcr, <8 x i16> *%X) ; FIXME: Legalization butchers the shuffle. define void @addCombineToVPADDL_u8(<16 x i8> *%cbcr, <4 x i16> *%X) nounwind ssp { ; CHECK-LABEL: addCombineToVPADDL_u8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vext.8 d18, d16, d16, #1 ; CHECK-NEXT: vbic.i16 d16, #0xff00 @@ -370,7 +370,7 @@ define void @addCombineToVPADDL_u8(<16 x i8> *%cbcr, <4 x i16> *%X) nounwind ssp ; Matching to vpaddl.8 requires matching shuffle(zext()). define void @addCombineToVPADDL_u8_early_zext(<16 x i8> *%cbcr, <4 x i16> *%X) nounwind ssp { ; CHECK-LABEL: addCombineToVPADDL_u8_early_zext: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vmovl.u8 q8, d16 ; CHECK-NEXT: vpadd.i16 d16, d16, d17 @@ -388,7 +388,7 @@ define void @addCombineToVPADDL_u8_early_zext(<16 x i8> *%cbcr, <4 x i16> *%X) n ; Combine vuzp+vaddl->vpaddl define void @addCombineToVPADDLq_s16(<8 x i16> *%cbcr, <4 x i32> *%X) nounwind ssp { ; CHECK-LABEL: addCombineToVPADDLq_s16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vpaddl.s16 q8, q8 ; CHECK-NEXT: vst1.64 {d16, d17}, [r1] @@ -406,7 +406,7 @@ define void @addCombineToVPADDLq_s16(<8 x i16> *%cbcr, <4 x i32> *%X) nounwind s ; Combine vuzp+vaddl->vpaddl define void @addCombineToVPADDLq_u16(<8 x i16> *%cbcr, <4 x i32> *%X) nounwind ssp { ; CHECK-LABEL: addCombineToVPADDLq_u16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vpaddl.u16 q8, q8 ; CHECK-NEXT: vst1.64 {d16, d17}, [r1] @@ -424,7 +424,7 @@ define void @addCombineToVPADDLq_u16(<8 x i16> *%cbcr, <4 x i32> *%X) nounwind s ; Combine vtrn+vaddl->vpaddl define void @addCombineToVPADDLq_s32(<4 x i32> *%cbcr, <2 x i64> *%X) nounwind ssp { ; CHECK-LABEL: addCombineToVPADDLq_s32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vpaddl.s32 q8, q8 ; CHECK-NEXT: vst1.64 {d16, d17}, [r1] @@ -442,7 +442,7 @@ define void @addCombineToVPADDLq_s32(<4 x i32> *%cbcr, <2 x i64> *%X) nounwind s ; Combine vtrn+vaddl->vpaddl define void @addCombineToVPADDLq_u32(<4 x i32> *%cbcr, <2 x i64> *%X) nounwind ssp { ; CHECK-LABEL: addCombineToVPADDLq_u32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vpaddl.u32 q8, q8 ; CHECK-NEXT: vst1.64 {d16, d17}, [r1] @@ -460,7 +460,7 @@ define void @addCombineToVPADDLq_u32(<4 x i32> *%cbcr, <2 x i64> *%X) nounwind s ; Legalization promotes the <4 x i8> to <4 x i16>. define <4 x i8> @fromExtendingExtractVectorElt_i8(<8 x i8> %in) { ; CHECK-LABEL: fromExtendingExtractVectorElt_i8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vpaddl.s8 d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 @@ -474,7 +474,7 @@ define <4 x i8> @fromExtendingExtractVectorElt_i8(<8 x i8> %in) { ; Legalization promotes the <2 x i16> to <2 x i32>. define <2 x i16> @fromExtendingExtractVectorElt_i16(<4 x i16> %in) { ; CHECK-LABEL: fromExtendingExtractVectorElt_i16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vpaddl.s16 d16, d16 ; CHECK-NEXT: vmov r0, r1, d16 diff --git a/test/CodeGen/ARM/vtrn.ll b/test/CodeGen/ARM/vtrn.ll index df6336043fd..12cb504eda7 100644 --- a/test/CodeGen/ARM/vtrn.ll +++ b/test/CodeGen/ARM/vtrn.ll @@ -2,7 +2,7 @@ define <8 x i8> @vtrni8(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: vtrni8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vtrn.8 d17, d16 @@ -19,7 +19,7 @@ define <8 x i8> @vtrni8(<8 x i8>* %A, <8 x i8>* %B) nounwind { define <16 x i8> @vtrni8_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: vtrni8_Qres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr [[LDR1:d[0-9]+]], [r1] ; CHECK-NEXT: vldr [[LDR0:d[0-9]+]], [r0] ; CHECK-NEXT: vtrn.8 [[LDR0]], [[LDR1]] @@ -34,7 +34,7 @@ define <16 x i8> @vtrni8_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind { define <4 x i16> @vtrni16(<4 x i16>* %A, <4 x i16>* %B) nounwind { ; CHECK-LABEL: vtrni16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vtrn.16 d17, d16 @@ -51,7 +51,7 @@ define <4 x i16> @vtrni16(<4 x i16>* %A, <4 x i16>* %B) nounwind { define <8 x i16> @vtrni16_Qres(<4 x i16>* %A, <4 x i16>* %B) nounwind { ; CHECK-LABEL: vtrni16_Qres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr [[LDR1:d[0-9]+]], [r1] ; CHECK-NEXT: vldr [[LDR0:d[0-9]+]], [r0] ; CHECK-NEXT: vtrn.16 [[LDR0]], [[LDR1]] @@ -66,7 +66,7 @@ define <8 x i16> @vtrni16_Qres(<4 x i16>* %A, <4 x i16>* %B) nounwind { define <2 x i32> @vtrni32(<2 x i32>* %A, <2 x i32>* %B) nounwind { ; CHECK-LABEL: vtrni32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vtrn.32 d17, d16 @@ -83,7 +83,7 @@ define <2 x i32> @vtrni32(<2 x i32>* %A, <2 x i32>* %B) nounwind { define <4 x i32> @vtrni32_Qres(<2 x i32>* %A, <2 x i32>* %B) nounwind { ; CHECK-LABEL: vtrni32_Qres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr [[LDR1:d[0-9]+]], [r1] ; CHECK-NEXT: vldr [[LDR0:d[0-9]+]], [r0] ; CHECK-NEXT: vtrn.32 [[LDR0]], [[LDR1]] @@ -98,7 +98,7 @@ define <4 x i32> @vtrni32_Qres(<2 x i32>* %A, <2 x i32>* %B) nounwind { define <2 x float> @vtrnf(<2 x float>* %A, <2 x float>* %B) nounwind { ; CHECK-LABEL: vtrnf: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vtrn.32 d17, d16 @@ -115,7 +115,7 @@ define <2 x float> @vtrnf(<2 x float>* %A, <2 x float>* %B) nounwind { define <4 x float> @vtrnf_Qres(<2 x float>* %A, <2 x float>* %B) nounwind { ; CHECK-LABEL: vtrnf_Qres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr [[LDR1:d[0-9]+]], [r1] ; CHECK-NEXT: vldr [[LDR0:d[0-9]+]], [r0] ; CHECK-NEXT: vtrn.32 [[LDR0]], [[LDR1]] @@ -130,7 +130,7 @@ define <4 x float> @vtrnf_Qres(<2 x float>* %A, <2 x float>* %B) nounwind { define <16 x i8> @vtrnQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { ; CHECK-LABEL: vtrnQi8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vtrn.8 q9, q8 @@ -148,7 +148,7 @@ define <16 x i8> @vtrnQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { define <32 x i8> @vtrnQi8_QQres(<16 x i8>* %A, <16 x i8>* %B) nounwind { ; CHECK-LABEL: vtrnQi8_QQres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r2] ; CHECK-NEXT: vld1.64 {d18, d19}, [r1] ; CHECK-NEXT: vtrn.8 q9, q8 @@ -163,7 +163,7 @@ define <32 x i8> @vtrnQi8_QQres(<16 x i8>* %A, <16 x i8>* %B) nounwind { define <8 x i16> @vtrnQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: vtrnQi16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vtrn.16 q9, q8 @@ -181,7 +181,7 @@ define <8 x i16> @vtrnQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { define <16 x i16> @vtrnQi16_QQres(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: vtrnQi16_QQres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r2] ; CHECK-NEXT: vld1.64 {d18, d19}, [r1] ; CHECK-NEXT: vtrn.16 q9, q8 @@ -196,7 +196,7 @@ define <16 x i16> @vtrnQi16_QQres(<8 x i16>* %A, <8 x i16>* %B) nounwind { define <4 x i32> @vtrnQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { ; CHECK-LABEL: vtrnQi32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vtrn.32 q9, q8 @@ -214,7 +214,7 @@ define <4 x i32> @vtrnQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { define <8 x i32> @vtrnQi32_QQres(<4 x i32>* %A, <4 x i32>* %B) nounwind { ; CHECK-LABEL: vtrnQi32_QQres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r2] ; CHECK-NEXT: vld1.64 {d18, d19}, [r1] ; CHECK-NEXT: vtrn.32 q9, q8 @@ -229,7 +229,7 @@ define <8 x i32> @vtrnQi32_QQres(<4 x i32>* %A, <4 x i32>* %B) nounwind { define <4 x float> @vtrnQf(<4 x float>* %A, <4 x float>* %B) nounwind { ; CHECK-LABEL: vtrnQf: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vtrn.32 q9, q8 @@ -247,7 +247,7 @@ define <4 x float> @vtrnQf(<4 x float>* %A, <4 x float>* %B) nounwind { define <8 x float> @vtrnQf_QQres(<4 x float>* %A, <4 x float>* %B) nounwind { ; CHECK-LABEL: vtrnQf_QQres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r2] ; CHECK-NEXT: vld1.64 {d18, d19}, [r1] ; CHECK-NEXT: vtrn.32 q9, q8 @@ -263,7 +263,7 @@ define <8 x float> @vtrnQf_QQres(<4 x float>* %A, <4 x float>* %B) nounwind { define <8 x i8> @vtrni8_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: vtrni8_undef: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vtrn.8 d17, d16 @@ -280,7 +280,7 @@ define <8 x i8> @vtrni8_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind { define <16 x i8> @vtrni8_undef_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: vtrni8_undef_Qres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr [[LDR1:d[0-9]+]], [r1] ; CHECK-NEXT: vldr [[LDR0:d[0-9]+]], [r0] ; CHECK-NEXT: vtrn.8 [[LDR0]], [[LDR1]] @@ -295,7 +295,7 @@ define <16 x i8> @vtrni8_undef_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind { define <8 x i16> @vtrnQi16_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: vtrnQi16_undef: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vtrn.16 q9, q8 @@ -313,7 +313,7 @@ define <8 x i16> @vtrnQi16_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind { define <16 x i16> @vtrnQi16_undef_QQres(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: vtrnQi16_undef_QQres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r2] ; CHECK-NEXT: vld1.64 {d18, d19}, [r1] ; CHECK-NEXT: vtrn.16 q9, q8 @@ -375,7 +375,7 @@ define <8 x i8> @vtrn_mismatched_builvector1(<8 x i8> %tr0, <8 x i8> %tr1, define void @lower_twice_no_vtrn(<4 x i16>* %A, <4 x i16>* %B, <8 x i16>* %C) { entry: ; CHECK-LABEL: lower_twice_no_vtrn: - ; CHECK: @ BB#0: + ; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d18, [r0] ; CHECK-NEXT: vtrn.16 d18, d16 @@ -394,7 +394,7 @@ entry: define void @upper_twice_no_vtrn(<4 x i16>* %A, <4 x i16>* %B, <8 x i16>* %C) { entry: ; CHECK-LABEL: upper_twice_no_vtrn: - ; CHECK: @ BB#0: + ; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d18, [r0] ; CHECK-NEXT: vtrn.16 d18, d16 diff --git a/test/CodeGen/ARM/vuzp.ll b/test/CodeGen/ARM/vuzp.ll index 24090cfd6c6..0ac366be3fe 100644 --- a/test/CodeGen/ARM/vuzp.ll +++ b/test/CodeGen/ARM/vuzp.ll @@ -3,7 +3,7 @@ define <8 x i8> @vuzpi8(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: vuzpi8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vuzp.8 d17, d16 @@ -20,7 +20,7 @@ define <8 x i8> @vuzpi8(<8 x i8>* %A, <8 x i8>* %B) nounwind { define <16 x i8> @vuzpi8_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: vuzpi8_Qres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d17, [r1] ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vuzp.8 d16, d17 @@ -35,7 +35,7 @@ define <16 x i8> @vuzpi8_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind { define <4 x i16> @vuzpi16(<4 x i16>* %A, <4 x i16>* %B) nounwind { ; CHECK-LABEL: vuzpi16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vuzp.16 d17, d16 @@ -52,7 +52,7 @@ define <4 x i16> @vuzpi16(<4 x i16>* %A, <4 x i16>* %B) nounwind { define <8 x i16> @vuzpi16_Qres(<4 x i16>* %A, <4 x i16>* %B) nounwind { ; CHECK-LABEL: vuzpi16_Qres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d17, [r1] ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vuzp.16 d16, d17 @@ -69,7 +69,7 @@ define <8 x i16> @vuzpi16_Qres(<4 x i16>* %A, <4 x i16>* %B) nounwind { define <16 x i8> @vuzpQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { ; CHECK-LABEL: vuzpQi8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vuzp.8 q9, q8 @@ -87,7 +87,7 @@ define <16 x i8> @vuzpQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { define <32 x i8> @vuzpQi8_QQres(<16 x i8>* %A, <16 x i8>* %B) nounwind { ; CHECK-LABEL: vuzpQi8_QQres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r2] ; CHECK-NEXT: vld1.64 {d18, d19}, [r1] ; CHECK-NEXT: vuzp.8 q9, q8 @@ -102,7 +102,7 @@ define <32 x i8> @vuzpQi8_QQres(<16 x i8>* %A, <16 x i8>* %B) nounwind { define <8 x i16> @vuzpQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: vuzpQi16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vuzp.16 q9, q8 @@ -120,7 +120,7 @@ define <8 x i16> @vuzpQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { define <16 x i16> @vuzpQi16_QQres(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: vuzpQi16_QQres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r2] ; CHECK-NEXT: vld1.64 {d18, d19}, [r1] ; CHECK-NEXT: vuzp.16 q9, q8 @@ -135,7 +135,7 @@ define <16 x i16> @vuzpQi16_QQres(<8 x i16>* %A, <8 x i16>* %B) nounwind { define <4 x i32> @vuzpQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { ; CHECK-LABEL: vuzpQi32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vuzp.32 q9, q8 @@ -153,7 +153,7 @@ define <4 x i32> @vuzpQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { define <8 x i32> @vuzpQi32_QQres(<4 x i32>* %A, <4 x i32>* %B) nounwind { ; CHECK-LABEL: vuzpQi32_QQres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r2] ; CHECK-NEXT: vld1.64 {d18, d19}, [r1] ; CHECK-NEXT: vuzp.32 q9, q8 @@ -168,7 +168,7 @@ define <8 x i32> @vuzpQi32_QQres(<4 x i32>* %A, <4 x i32>* %B) nounwind { define <4 x float> @vuzpQf(<4 x float>* %A, <4 x float>* %B) nounwind { ; CHECK-LABEL: vuzpQf: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vuzp.32 q9, q8 @@ -186,7 +186,7 @@ define <4 x float> @vuzpQf(<4 x float>* %A, <4 x float>* %B) nounwind { define <8 x float> @vuzpQf_QQres(<4 x float>* %A, <4 x float>* %B) nounwind { ; CHECK-LABEL: vuzpQf_QQres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r2] ; CHECK-NEXT: vld1.64 {d18, d19}, [r1] ; CHECK-NEXT: vuzp.32 q9, q8 @@ -203,7 +203,7 @@ define <8 x float> @vuzpQf_QQres(<4 x float>* %A, <4 x float>* %B) nounwind { define <8 x i8> @vuzpi8_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: vuzpi8_undef: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vuzp.8 d17, d16 @@ -220,7 +220,7 @@ define <8 x i8> @vuzpi8_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind { define <16 x i8> @vuzpi8_undef_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: vuzpi8_undef_Qres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d17, [r1] ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vuzp.8 d16, d17 @@ -235,7 +235,7 @@ define <16 x i8> @vuzpi8_undef_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind { define <8 x i16> @vuzpQi16_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: vuzpQi16_undef: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vuzp.16 q9, q8 @@ -253,7 +253,7 @@ define <8 x i16> @vuzpQi16_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind { define <16 x i16> @vuzpQi16_undef_QQres(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: vuzpQi16_undef_QQres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r2] ; CHECK-NEXT: vld1.64 {d18, d19}, [r1] ; CHECK-NEXT: vuzp.16 q9, q8 @@ -268,7 +268,7 @@ define <16 x i16> @vuzpQi16_undef_QQres(<8 x i16>* %A, <8 x i16>* %B) nounwind { define <8 x i16> @vuzp_lower_shufflemask_undef(<4 x i16>* %A, <4 x i16>* %B) { ; CHECK-LABEL: vuzp_lower_shufflemask_undef: -; CHECK: @ BB#0: @ %entry +; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vldr d17, [r1] ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vorr q9, q8, q8 @@ -285,7 +285,7 @@ entry: define <4 x i32> @vuzp_lower_shufflemask_zeroed(<2 x i32>* %A, <2 x i32>* %B) { ; CHECK-LABEL: vuzp_lower_shufflemask_zeroed: -; CHECK: @ BB#0: @ %entry +; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vldr d17, [r1] ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vdup.32 q9, d16[0] @@ -303,7 +303,7 @@ entry: define void @vuzp_rev_shufflemask_vtrn(<2 x i32>* %A, <2 x i32>* %B, <4 x i32>* %C) { ; CHECK-LABEL: vuzp_rev_shufflemask_vtrn: -; CHECK: @ BB#0: @ %entry +; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vldr d17, [r1] ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vrev64.32 q9, q8 @@ -323,7 +323,7 @@ define <8 x i8> @cmpsel_trunc(<8 x i8> %in0, <8 x i8> %in1, <8 x i32> %cmp0, <8 ; This results in a build_vector with mismatched types. We will generate two vmovn.i32 instructions to ; truncate from i32 to i16 and one vmovn.i16 to perform the final truncation for i8. ; CHECK-LABEL: cmpsel_trunc: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: add r12, sp, #16 ; CHECK-NEXT: vld1.64 {d16, d17}, [r12] ; CHECK-NEXT: mov r12, sp @@ -352,7 +352,7 @@ define <8 x i8> @cmpsel_trunc(<8 x i8> %in0, <8 x i8> %in1, <8 x i32> %cmp0, <8 ; to perform the vuzp and get the vbsl mask. define <8 x i8> @vuzp_trunc_and_shuffle(<8 x i8> %tr0, <8 x i8> %tr1, ; CHECK-LABEL: vuzp_trunc_and_shuffle: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r11, lr} ; CHECK-NEXT: push {r11, lr} ; CHECK-NEXT: add r12, sp, #8 @@ -388,7 +388,7 @@ define <8 x i8> @vuzp_trunc_and_shuffle(<8 x i8> %tr0, <8 x i8> %tr1, ; This produces a build_vector with some of the operands undefs. define <8 x i8> @vuzp_trunc_and_shuffle_undef_right(<8 x i8> %tr0, <8 x i8> %tr1, ; CHECK-LABEL: vuzp_trunc_and_shuffle_undef_right: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mov r12, sp ; CHECK-NEXT: vld1.64 {d16, d17}, [r12] ; CHECK-NEXT: add r12, sp, #16 @@ -416,7 +416,7 @@ define <8 x i8> @vuzp_trunc_and_shuffle_undef_right(<8 x i8> %tr0, <8 x i8> %tr1 define <8 x i8> @vuzp_trunc_and_shuffle_undef_left(<8 x i8> %tr0, <8 x i8> %tr1, ; CHECK-LABEL: vuzp_trunc_and_shuffle_undef_left: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: mov r12, sp ; CHECK-NEXT: vld1.64 {d16, d17}, [r12] ; CHECK-NEXT: add r12, sp, #16 @@ -435,7 +435,7 @@ define <8 x i8> @vuzp_trunc_and_shuffle_undef_left(<8 x i8> %tr0, <8 x i8> %tr1, ; CHECK-NEXT: vmov r0, r1, d16 ; CHECK-NEXT: mov pc, lr ; CHECK-NEXT: .p2align 3 -; CHECK-NEXT: @ BB#1: +; CHECK-NEXT: @ %bb.1: ; CHECK-NEXT: .LCPI22_0: ; CHECK-NEXT: .byte 255 @ 0xff ; CHECK-NEXT: .byte 255 @ 0xff @@ -458,7 +458,7 @@ define <8 x i8> @vuzp_trunc_and_shuffle_undef_left(<8 x i8> %tr0, <8 x i8> %tr1, ; get some vector size that we can represent. define <10 x i8> @vuzp_wide_type(<10 x i8> %tr0, <10 x i8> %tr1, ; CHECK-LABEL: vuzp_wide_type: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, r10, r11, lr} ; CHECK-NEXT: push {r4, r10, r11, lr} ; CHECK-NEXT: .setfp r11, sp, #8 @@ -517,7 +517,7 @@ define <10 x i8> @vuzp_wide_type(<10 x i8> %tr0, <10 x i8> %tr1, ; CHECK-NEXT: pop {r4, r10, r11, lr} ; CHECK-NEXT: mov pc, lr ; CHECK-NEXT: .p2align 3 -; CHECK-NEXT: @ BB#1: +; CHECK-NEXT: @ %bb.1: ; CHECK-NEXT: .LCPI23_0: ; CHECK-NEXT: .byte 0 @ 0x0 ; CHECK-NEXT: .byte 1 @ 0x1 @@ -539,7 +539,7 @@ define <10 x i8> @vuzp_wide_type(<10 x i8> %tr0, <10 x i8> %tr1, %struct.uint8x8x2_t = type { [2 x <8 x i8>] } define %struct.uint8x8x2_t @vuzp_extract_subvector(<16 x i8> %t) #0 { ; CHECK-LABEL: vuzp_extract_subvector: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vmov d17, r2, r3 ; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vorr d18, d17, d17 diff --git a/test/CodeGen/ARM/vzip.ll b/test/CodeGen/ARM/vzip.ll index 06b49ab9405..5047b3e087a 100644 --- a/test/CodeGen/ARM/vzip.ll +++ b/test/CodeGen/ARM/vzip.ll @@ -3,7 +3,7 @@ define <8 x i8> @vzipi8(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: vzipi8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vzip.8 d17, d16 @@ -20,7 +20,7 @@ define <8 x i8> @vzipi8(<8 x i8>* %A, <8 x i8>* %B) nounwind { define <16 x i8> @vzipi8_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: vzipi8_Qres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d17, [r1] ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vzip.8 d16, d17 @@ -35,7 +35,7 @@ define <16 x i8> @vzipi8_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind { define <4 x i16> @vzipi16(<4 x i16>* %A, <4 x i16>* %B) nounwind { ; CHECK-LABEL: vzipi16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vzip.16 d17, d16 @@ -52,7 +52,7 @@ define <4 x i16> @vzipi16(<4 x i16>* %A, <4 x i16>* %B) nounwind { define <8 x i16> @vzipi16_Qres(<4 x i16>* %A, <4 x i16>* %B) nounwind { ; CHECK-LABEL: vzipi16_Qres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d17, [r1] ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vzip.16 d16, d17 @@ -69,7 +69,7 @@ define <8 x i16> @vzipi16_Qres(<4 x i16>* %A, <4 x i16>* %B) nounwind { define <16 x i8> @vzipQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { ; CHECK-LABEL: vzipQi8: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vzip.8 q9, q8 @@ -87,7 +87,7 @@ define <16 x i8> @vzipQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { define <32 x i8> @vzipQi8_QQres(<16 x i8>* %A, <16 x i8>* %B) nounwind { ; CHECK-LABEL: vzipQi8_QQres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r2] ; CHECK-NEXT: vld1.64 {d18, d19}, [r1] ; CHECK-NEXT: vzip.8 q9, q8 @@ -102,7 +102,7 @@ define <32 x i8> @vzipQi8_QQres(<16 x i8>* %A, <16 x i8>* %B) nounwind { define <8 x i16> @vzipQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: vzipQi16: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vzip.16 q9, q8 @@ -120,7 +120,7 @@ define <8 x i16> @vzipQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { define <16 x i16> @vzipQi16_QQres(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: vzipQi16_QQres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r2] ; CHECK-NEXT: vld1.64 {d18, d19}, [r1] ; CHECK-NEXT: vzip.16 q9, q8 @@ -135,7 +135,7 @@ define <16 x i16> @vzipQi16_QQres(<8 x i16>* %A, <8 x i16>* %B) nounwind { define <4 x i32> @vzipQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { ; CHECK-LABEL: vzipQi32: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vzip.32 q9, q8 @@ -153,7 +153,7 @@ define <4 x i32> @vzipQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { define <8 x i32> @vzipQi32_QQres(<4 x i32>* %A, <4 x i32>* %B) nounwind { ; CHECK-LABEL: vzipQi32_QQres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r2] ; CHECK-NEXT: vld1.64 {d18, d19}, [r1] ; CHECK-NEXT: vzip.32 q9, q8 @@ -168,7 +168,7 @@ define <8 x i32> @vzipQi32_QQres(<4 x i32>* %A, <4 x i32>* %B) nounwind { define <4 x float> @vzipQf(<4 x float>* %A, <4 x float>* %B) nounwind { ; CHECK-LABEL: vzipQf: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vzip.32 q9, q8 @@ -186,7 +186,7 @@ define <4 x float> @vzipQf(<4 x float>* %A, <4 x float>* %B) nounwind { define <8 x float> @vzipQf_QQres(<4 x float>* %A, <4 x float>* %B) nounwind { ; CHECK-LABEL: vzipQf_QQres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r2] ; CHECK-NEXT: vld1.64 {d18, d19}, [r1] ; CHECK-NEXT: vzip.32 q9, q8 @@ -203,7 +203,7 @@ define <8 x float> @vzipQf_QQres(<4 x float>* %A, <4 x float>* %B) nounwind { define <8 x i8> @vzipi8_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: vzipi8_undef: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d17, [r0] ; CHECK-NEXT: vzip.8 d17, d16 @@ -220,7 +220,7 @@ define <8 x i8> @vzipi8_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind { define <16 x i8> @vzipi8_undef_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: vzipi8_undef_Qres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d17, [r1] ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vzip.8 d16, d17 @@ -235,7 +235,7 @@ define <16 x i8> @vzipi8_undef_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind { define <16 x i8> @vzipQi8_undef(<16 x i8>* %A, <16 x i8>* %B) nounwind { ; CHECK-LABEL: vzipQi8_undef: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r1] ; CHECK-NEXT: vld1.64 {d18, d19}, [r0] ; CHECK-NEXT: vzip.8 q9, q8 @@ -253,7 +253,7 @@ define <16 x i8> @vzipQi8_undef(<16 x i8>* %A, <16 x i8>* %B) nounwind { define <32 x i8> @vzipQi8_undef_QQres(<16 x i8>* %A, <16 x i8>* %B) nounwind { ; CHECK-LABEL: vzipQi8_undef_QQres: -; CHECK: @ BB#0: +; CHECK: @ %bb.0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r2] ; CHECK-NEXT: vld1.64 {d18, d19}, [r1] ; CHECK-NEXT: vzip.8 q9, q8 @@ -268,7 +268,7 @@ define <32 x i8> @vzipQi8_undef_QQres(<16 x i8>* %A, <16 x i8>* %B) nounwind { define <8 x i16> @vzip_lower_shufflemask_undef(<4 x i16>* %A, <4 x i16>* %B) { ; CHECK-LABEL: vzip_lower_shufflemask_undef: -; CHECK: @ BB#0: @ %entry +; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vldr d17, [r1] ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vzip.16 d16, d17 @@ -287,7 +287,7 @@ entry: ; as a vtrn. define <8 x i16> @vzip_lower_shufflemask_undef_rev(<4 x i16>* %A, <4 x i16>* %B) { ; CHECK-LABEL: vzip_lower_shufflemask_undef_rev: -; CHECK: @ BB#0: @ %entry +; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vldr d16, [r1] ; CHECK-NEXT: vldr d19, [r0] ; CHECK-NEXT: vtrn.16 d19, d16 @@ -303,7 +303,7 @@ entry: define <4 x i32> @vzip_lower_shufflemask_zeroed(<2 x i32>* %A) { ; CHECK-LABEL: vzip_lower_shufflemask_zeroed: -; CHECK: @ BB#0: @ %entry +; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vdup.32 q9, d16[0] ; CHECK-NEXT: vzip.32 q8, q9 @@ -318,7 +318,7 @@ entry: define <4 x i32> @vzip_lower_shufflemask_vuzp(<2 x i32>* %A) { ; CHECK-LABEL: vzip_lower_shufflemask_vuzp: -; CHECK: @ BB#0: @ %entry +; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vdup.32 q9, d16[0] ; CHECK-NEXT: vzip.32 q8, q9 @@ -333,7 +333,7 @@ entry: define void @vzip_undef_rev_shufflemask_vtrn(<2 x i32>* %A, <4 x i32>* %B) { ; CHECK-LABEL: vzip_undef_rev_shufflemask_vtrn: -; CHECK: @ BB#0: @ %entry +; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vldr d16, [r0] ; CHECK-NEXT: vorr q9, q8, q8 ; CHECK-NEXT: vzip.32 q8, q9 @@ -349,7 +349,7 @@ entry: define void @vzip_vext_factor(<8 x i16>* %A, <4 x i16>* %B) { ; CHECK-LABEL: vzip_vext_factor: -; CHECK: @ BB#0: @ %entry +; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vext.16 d18, d16, d17, #1 ; CHECK-NEXT: vext.16 d16, d18, d17, #2 @@ -365,7 +365,7 @@ entry: define <8 x i8> @vdup_zip(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; CHECK-LABEL: vdup_zip: -; CHECK: @ BB#0: @ %entry +; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vld1.8 {d16[]}, [r1] ; CHECK-NEXT: vld1.8 {d17[]}, [r0] ; CHECK-NEXT: vzip.8 d17, d16 diff --git a/test/CodeGen/AVR/atomics/fence.ll b/test/CodeGen/AVR/atomics/fence.ll index 6ea49bc7e3f..b4cd215f3a2 100644 --- a/test/CodeGen/AVR/atomics/fence.ll +++ b/test/CodeGen/AVR/atomics/fence.ll @@ -4,7 +4,7 @@ ; AVR is always singlethreaded so fences do nothing. ; CHECK_LABEL: atomic_fence8 -; CHECK: ; BB#0: +; CHECK: ; %bb.0: ; CHECK-NEXT: ret define void @atomic_fence8() { fence acquire diff --git a/test/CodeGen/AVR/select-must-add-unconditional-jump.ll b/test/CodeGen/AVR/select-must-add-unconditional-jump.ll index 64faff70a33..5c247f6e8e6 100644 --- a/test/CodeGen/AVR/select-must-add-unconditional-jump.ll +++ b/test/CodeGen/AVR/select-must-add-unconditional-jump.ll @@ -9,18 +9,18 @@ ; ; This issue manifests in a CFG that looks something like this: ; -; BB#2: derived from LLVM BB %finish -; Predecessors according to CFG: BB#0 BB#1 -; %0 = PHI %3, , %5, +; %bb.2: derived from LLVM BB %finish +; Predecessors according to CFG: %bb.0 %bb.1 +; %0 = PHI %3, <%bb.0>, %5, <%bb.1> ; %7 = LDIRdK 2 ; %8 = LDIRdK 1 ; CPRdRr %2, %0, %SREG -; BREQk , %SREG -; Successors according to CFG: BB#5(?%) BB#6(?%) +; BREQk <%bb.6>, %SREG +; Successors according to CFG: %bb.5(?%) %bb.6(?%) ; -; The code assumes it the fallthrough block after this is BB#5, but -; it's actually BB#3! To be proper, there should be an unconditional -; jump tying this block to BB#5. +; The code assumes it the fallthrough block after this is %bb.5, but +; it's actually %bb.3! To be proper, there should be an unconditional +; jump tying this block to %bb.5. define i8 @select_must_add_unconditional_jump(i8 %arg0, i8 %arg1) unnamed_addr { entry-block: @@ -49,10 +49,10 @@ dead: ; basic block containing `select` needs to contain explicit jumps to ; both successors. -; CHECK: BB#2: derived from LLVM BB %finish -; CHECK: BREQk <[[BRANCHED:BB#[0-9]+]]> -; CHECK: RJMPk <[[DIRECT:BB#[0-9]+]]> +; CHECK: %bb.2: derived from LLVM BB %finish +; CHECK: BREQk <[[BRANCHED:%bb.[0-9]+]]> +; CHECK: RJMPk <[[DIRECT:%bb.[0-9]+]]> ; CHECK: Successors according to CFG ; CHECK-SAME-DAG: {{.*}}[[BRANCHED]] ; CHECK-SAME-DAG: {{.*}}[[DIRECT]] -; CHECK: BB#3: derived from LLVM BB +; CHECK: %bb.3: derived from LLVM BB diff --git a/test/CodeGen/Generic/MachineBranchProb.ll b/test/CodeGen/Generic/MachineBranchProb.ll index 75e9a191e3d..dc4a52ab711 100644 --- a/test/CodeGen/Generic/MachineBranchProb.ll +++ b/test/CodeGen/Generic/MachineBranchProb.ll @@ -21,14 +21,14 @@ entry: i64 5, label %sw.bb1 i64 15, label %sw.bb ], !prof !0 -; CHECK: BB#0: derived from LLVM BB %entry -; CHECK: Successors according to CFG: BB#1({{[0-9a-fx/= ]+}}92.17%) BB#4({{[0-9a-fx/= ]+}}7.83%) -; CHECK: BB#4: derived from LLVM BB %entry -; CHECK: Successors according to CFG: BB#2({{[0-9a-fx/= ]+}}75.29%) BB#5({{[0-9a-fx/= ]+}}24.71%) -; CHECK: BB#5: derived from LLVM BB %entry -; CHECK: Successors according to CFG: BB#1({{[0-9a-fx/= ]+}}47.62%) BB#6({{[0-9a-fx/= ]+}}52.38%) -; CHECK: BB#6: derived from LLVM BB %entry -; CHECK: Successors according to CFG: BB#1({{[0-9a-fx/= ]+}}36.36%) BB#3({{[0-9a-fx/= ]+}}63.64%) +; CHECK: %bb.0: derived from LLVM BB %entry +; CHECK: Successors according to CFG: %bb.1({{[0-9a-fx/= ]+}}92.17%) %bb.4({{[0-9a-fx/= ]+}}7.83%) +; CHECK: %bb.4: derived from LLVM BB %entry +; CHECK: Successors according to CFG: %bb.2({{[0-9a-fx/= ]+}}75.29%) %bb.5({{[0-9a-fx/= ]+}}24.71%) +; CHECK: %bb.5: derived from LLVM BB %entry +; CHECK: Successors according to CFG: %bb.1({{[0-9a-fx/= ]+}}47.62%) %bb.6({{[0-9a-fx/= ]+}}52.38%) +; CHECK: %bb.6: derived from LLVM BB %entry +; CHECK: Successors according to CFG: %bb.1({{[0-9a-fx/= ]+}}36.36%) %bb.3({{[0-9a-fx/= ]+}}63.64%) sw.bb: ; this call will prevent simplifyCFG from optimizing the block away in ARM/AArch64. @@ -70,9 +70,9 @@ return: ret void ; right with weight 20. ; ; CHECK-LABEL: Machine code for function left_leaning_weight_balanced_tree: -; CHECK: BB#0: derived from LLVM BB %entry +; CHECK: %bb.0: derived from LLVM BB %entry ; CHECK-NOT: Successors -; CHECK: Successors according to CFG: BB#8({{[0-9a-fx/= ]+}}39.71%) BB#9({{[0-9a-fx/= ]+}}60.29%) +; CHECK: Successors according to CFG: %bb.8({{[0-9a-fx/= ]+}}39.71%) %bb.9({{[0-9a-fx/= ]+}}60.29%) } !1 = !{!"branch_weights", diff --git a/test/CodeGen/Hexagon/branch-folder-hoist-kills.mir b/test/CodeGen/Hexagon/branch-folder-hoist-kills.mir index 47da85b2308..c685a0c2740 100644 --- a/test/CodeGen/Hexagon/branch-folder-hoist-kills.mir +++ b/test/CodeGen/Hexagon/branch-folder-hoist-kills.mir @@ -11,7 +11,7 @@ # then created code, where the first predicated instruction has incorrect # implicit use of r0: # -# BB#0: +# %bb.0: # Live Ins: %R0 # %R1 = A2_sxth %R0 ; hoisted, kills r0 # A2_nop %P0 diff --git a/test/CodeGen/Hexagon/hwloop-redef-imm.mir b/test/CodeGen/Hexagon/hwloop-redef-imm.mir index 014908e20a7..7b6044c9a50 100644 --- a/test/CodeGen/Hexagon/hwloop-redef-imm.mir +++ b/test/CodeGen/Hexagon/hwloop-redef-imm.mir @@ -8,10 +8,10 @@ # loop setup in the preheader). # CHECK: [[R0:%[0-9]+]]:intregs = A2_tfrsi 1920 -# CHECK: J2_loop0r %bb.1.b1, [[R0]] +# CHECK: J2_loop0r %bb.1, [[R0]] # # CHECK: bb.1.b1 (address-taken): -# CHECK: ENDLOOP0 %bb.1.b1 +# CHECK: ENDLOOP0 %bb.1 --- | diff --git a/test/CodeGen/Hexagon/ifcvt-edge-weight.ll b/test/CodeGen/Hexagon/ifcvt-edge-weight.ll index 341567e1d02..250a81938bd 100644 --- a/test/CodeGen/Hexagon/ifcvt-edge-weight.ll +++ b/test/CodeGen/Hexagon/ifcvt-edge-weight.ll @@ -1,8 +1,8 @@ ; RUN: llc -march=hexagon -mcpu=hexagonv5 -hexagon-eif=0 -print-machineinstrs=if-converter %s -o /dev/null 2>&1 | FileCheck %s ; Check that the edge weights are updated correctly after if-conversion. -; CHECK: BB#3: -; CHECK: Successors according to CFG: BB#2({{[0-9a-fx/= ]+}}10.00%) BB#1({{[0-9a-fx/= ]+}}90.00%) +; CHECK: %bb.3: +; CHECK: Successors according to CFG: %bb.2({{[0-9a-fx/= ]+}}10.00%) %bb.1({{[0-9a-fx/= ]+}}90.00%) @a = external global i32 @d = external global i32 diff --git a/test/CodeGen/MIR/X86/frame-info-save-restore-points.mir b/test/CodeGen/MIR/X86/frame-info-save-restore-points.mir index d9b117bd9c2..f5d63287aff 100644 --- a/test/CodeGen/MIR/X86/frame-info-save-restore-points.mir +++ b/test/CodeGen/MIR/X86/frame-info-save-restore-points.mir @@ -30,33 +30,33 @@ liveins: - { reg: '%edi' } - { reg: '%esi' } # CHECK: frameInfo: -# CHECK: savePoint: '%bb.2.true' -# CHECK-NEXT: restorePoint: '%bb.2.true' +# CHECK: savePoint: '%bb.2' +# CHECK-NEXT: restorePoint: '%bb.2' # CHECK: stack frameInfo: maxAlignment: 4 hasCalls: true - savePoint: '%bb.2.true' - restorePoint: '%bb.2.true' + savePoint: '%bb.2' + restorePoint: '%bb.2' stack: - { id: 0, name: tmp, offset: 0, size: 4, alignment: 4 } body: | bb.0: - successors: %bb.2.true, %bb.1 + successors: %bb.2, %bb.1 liveins: %edi, %esi %eax = COPY %edi CMP32rr %eax, killed %esi, implicit-def %eflags - JL_1 %bb.2.true, implicit killed %eflags + JL_1 %bb.2, implicit killed %eflags bb.1: - successors: %bb.3.false + successors: %bb.3 liveins: %eax - JMP_1 %bb.3.false + JMP_1 %bb.3 bb.2.true: - successors: %bb.3.false + successors: %bb.3 liveins: %eax MOV32mr %stack.0.tmp, 1, _, 0, _, killed %eax diff --git a/test/CodeGen/MIR/X86/implicit-register-flag.mir b/test/CodeGen/MIR/X86/implicit-register-flag.mir index 70b1cc50094..dddbfc90cf6 100644 --- a/test/CodeGen/MIR/X86/implicit-register-flag.mir +++ b/test/CodeGen/MIR/X86/implicit-register-flag.mir @@ -31,11 +31,11 @@ name: foo body: | bb.0.entry: - successors: %bb.1.less, %bb.2.exit + successors: %bb.1, %bb.2 ; CHECK: CMP32ri8 %edi, 10, implicit-def %eflags - ; CHECK-NEXT: JG_1 %bb.2.exit, implicit %eflags + ; CHECK-NEXT: JG_1 %bb.2, implicit %eflags CMP32ri8 %edi, 10, implicit-def %eflags - JG_1 %bb.2.exit, implicit %eflags + JG_1 %bb.2, implicit %eflags bb.1.less: ; CHECK: %eax = MOV32r0 implicit-def %eflags diff --git a/test/CodeGen/MIR/X86/jump-table-info.mir b/test/CodeGen/MIR/X86/jump-table-info.mir index 52d562c8212..71dd46b8218 100644 --- a/test/CodeGen/MIR/X86/jump-table-info.mir +++ b/test/CodeGen/MIR/X86/jump-table-info.mir @@ -61,23 +61,23 @@ name: test_jumptable # CHECK-NEXT: kind: label-difference32 # CHECK-NEXT: entries: # CHECK-NEXT: - id: 0 -# CHECK-NEXT: blocks: [ '%bb.3.lbl1', '%bb.4.lbl2', '%bb.5.lbl3', '%bb.6.lbl4' ] +# CHECK-NEXT: blocks: [ '%bb.3', '%bb.4', '%bb.5', '%bb.6' ] # CHECK-NEXT: body: jumpTable: kind: label-difference32 entries: - id: 0 - blocks: [ '%bb.3.lbl1', '%bb.4.lbl2', '%bb.5.lbl3', '%bb.6.lbl4' ] + blocks: [ '%bb.3', '%bb.4', '%bb.5', '%bb.6' ] body: | bb.0.entry: - successors: %bb.2.def, %bb.1.entry + successors: %bb.2, %bb.1 %eax = MOV32rr %edi, implicit-def %rax CMP32ri8 %edi, 3, implicit-def %eflags - JA_1 %bb.2.def, implicit %eflags + JA_1 %bb.2, implicit %eflags bb.1.entry: - successors: %bb.3.lbl1, %bb.4.lbl2, %bb.5.lbl3, %bb.6.lbl4 + successors: %bb.3, %bb.4, %bb.5, %bb.6 ; CHECK: %rcx = LEA64r %rip, 1, %noreg, %jump-table.0, %noreg %rcx = LEA64r %rip, 1, _, %jump-table.0, _ %rax = MOVSX64rm32 %rcx, 4, %rax, 0, _ @@ -110,17 +110,17 @@ jumpTable: kind: label-difference32 entries: - id: 1 - blocks: [ '%bb.3.lbl1', '%bb.4.lbl2', '%bb.5.lbl3', '%bb.6.lbl4' ] + blocks: [ '%bb.3', '%bb.4', '%bb.5', '%bb.6' ] body: | bb.0.entry: - successors: %bb.2.def, %bb.1.entry + successors: %bb.2, %bb.1 %eax = MOV32rr %edi, implicit-def %rax CMP32ri8 %edi, 3, implicit-def %eflags - JA_1 %bb.2.def, implicit %eflags + JA_1 %bb.2, implicit %eflags bb.1.entry: - successors: %bb.3.lbl1, %bb.4.lbl2, %bb.5.lbl3, %bb.6.lbl4 + successors: %bb.3, %bb.4, %bb.5, %bb.6 ; Verify that the printer will use an id of 0 for this jump table: ; CHECK: %rcx = LEA64r %rip, 1, %noreg, %jump-table.0, %noreg %rcx = LEA64r %rip, 1, _, %jump-table.1, _ diff --git a/test/CodeGen/MIR/X86/machine-basic-block-operands.mir b/test/CodeGen/MIR/X86/machine-basic-block-operands.mir index f5915738679..a7866f239be 100644 --- a/test/CodeGen/MIR/X86/machine-basic-block-operands.mir +++ b/test/CodeGen/MIR/X86/machine-basic-block-operands.mir @@ -36,13 +36,13 @@ name: foo body: | ; CHECK: bb.0.entry bb.0.entry: - successors: %bb.1.less, %bb.2.exit + successors: %bb.1, %bb.2 %eax = MOV32rm %rdi, 1, _, 0, _ ; CHECK: CMP32ri8 %eax, 10 - ; CHECK-NEXT: JG_1 %bb.2.exit + ; CHECK-NEXT: JG_1 %bb.2 CMP32ri8 %eax, 10, implicit-def %eflags - JG_1 %bb.2.exit, implicit %eflags + JG_1 %bb.2, implicit %eflags ; CHECK: bb.1.less: bb.1.less: diff --git a/test/CodeGen/MIR/X86/newline-handling.mir b/test/CodeGen/MIR/X86/newline-handling.mir index ce53e49eddb..1a93c1a6425 100644 --- a/test/CodeGen/MIR/X86/newline-handling.mir +++ b/test/CodeGen/MIR/X86/newline-handling.mir @@ -35,10 +35,10 @@ liveins: # CHECK-LABEL: name: foo # CHECK: body: | # CHECK-NEXT: bb.0.entry: -# CHECK-NEXT: successors: %bb.1.less(0x40000000), %bb.2.exit(0x40000000) +# CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) # CHECK-NEXT: liveins: %edi # CHECK: CMP32ri8 %edi, 10, implicit-def %eflags -# CHECK-NEXT: JG_1 %bb.2.exit, implicit killed %eflags +# CHECK-NEXT: JG_1 %bb.2, implicit killed %eflags # CHECK: bb.1.less: # CHECK-NEXT: %eax = MOV32r0 implicit-def dead %eflags @@ -50,13 +50,13 @@ liveins: # CHECK-NEXT: RETQ killed %eax body: | bb.0.entry: - successors: %bb.1.less, %bb.2.exit + successors: %bb.1, %bb.2 liveins: %edi CMP32ri8 %edi, 10, implicit-def %eflags - JG_1 %bb.2.exit, implicit killed %eflags + JG_1 %bb.2, implicit killed %eflags bb.1.less: @@ -79,10 +79,10 @@ liveins: # CHECK-LABEL: name: bar # CHECK: body: | # CHECK-NEXT: bb.0.entry: -# CHECK-NEXT: successors: %bb.1.less(0x40000000), %bb.2.exit(0x40000000) +# CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) # CHECK-NEXT: liveins: %edi # CHECK: CMP32ri8 %edi, 10, implicit-def %eflags -# CHECK-NEXT: JG_1 %bb.2.exit, implicit killed %eflags +# CHECK-NEXT: JG_1 %bb.2, implicit killed %eflags # CHECK: bb.1.less: # CHECK-NEXT: %eax = MOV32r0 implicit-def dead %eflags @@ -95,10 +95,10 @@ liveins: body: | bb.0.entry: - successors: %bb.1.less, %bb.2.exit + successors: %bb.1, %bb.2 liveins: %edi CMP32ri8 %edi, 10, implicit-def %eflags - JG_1 %bb.2.exit, implicit killed %eflags + JG_1 %bb.2, implicit killed %eflags bb.1.less: %eax = MOV32r0 implicit-def dead %eflags RETQ killed %eax diff --git a/test/CodeGen/MIR/X86/successor-basic-blocks-weights.mir b/test/CodeGen/MIR/X86/successor-basic-blocks-weights.mir index 512ba4e41aa..5a22557f324 100644 --- a/test/CodeGen/MIR/X86/successor-basic-blocks-weights.mir +++ b/test/CodeGen/MIR/X86/successor-basic-blocks-weights.mir @@ -21,14 +21,14 @@ name: foo body: | ; CHECK-LABEL: bb.0.entry: - ; CHECK: successors: %bb.1.less(0x2a3d70a4), %bb.2.exit(0x55c28f5c) + ; CHECK: successors: %bb.1(0x2a3d70a4), %bb.2(0x55c28f5c) ; CHECK-LABEL: bb.1.less: bb.0.entry: - successors: %bb.1.less (33), %bb.2.exit(67) + successors: %bb.1 (33), %bb.2(67) liveins: %edi CMP32ri8 %edi, 10, implicit-def %eflags - JG_1 %bb.2.exit, implicit killed %eflags + JG_1 %bb.2, implicit killed %eflags bb.1.less: %eax = MOV32r0 implicit-def dead %eflags diff --git a/test/CodeGen/MSP430/BranchSelector.ll b/test/CodeGen/MSP430/BranchSelector.ll index 4dfd95bf41a..a36da626234 100644 --- a/test/CodeGen/MSP430/BranchSelector.ll +++ b/test/CodeGen/MSP430/BranchSelector.ll @@ -579,7 +579,7 @@ begin: ; This branch should not be expanded ; CHECK-LABEL: .LBB1_1: ; CHECK: jeq .LBB1_1 -; CHECK: BB#2: +; CHECK: %bb.2: ; CHECK: ret br i1 %lnot, label %begin, label %end diff --git a/test/CodeGen/Mips/compactbranches/empty-block.mir b/test/CodeGen/Mips/compactbranches/empty-block.mir index 7fb1afae912..5bfaef0cb69 100644 --- a/test/CodeGen/Mips/compactbranches/empty-block.mir +++ b/test/CodeGen/Mips/compactbranches/empty-block.mir @@ -5,11 +5,11 @@ # CHECK: blezc # CHECK: nop -# CHECK: # BB#1: +# CHECK: # %bb.1: # CHECK: .insn -# CHECK: # BB#2: +# CHECK: # %bb.2: # CHECK: .insn -# CHECK: # BB#3: +# CHECK: # %bb.3: # CHECK: jal --- | diff --git a/test/CodeGen/Mips/lcb4a.ll b/test/CodeGen/Mips/lcb4a.ll index 4a99ef26efc..016e895d12e 100644 --- a/test/CodeGen/Mips/lcb4a.ll +++ b/test/CodeGen/Mips/lcb4a.ll @@ -26,7 +26,7 @@ if.end: ; preds = %if.else, %if.then } ; ci: beqz $3, $BB0_2 -; ci: # BB#1: # %if.else +; ci: # %bb.1: # %if.else ; Function Attrs: nounwind optsize diff --git a/test/CodeGen/Mips/prevent-hoisting.ll b/test/CodeGen/Mips/prevent-hoisting.ll index ca71bf7d1af..1fc7462811c 100644 --- a/test/CodeGen/Mips/prevent-hoisting.ll +++ b/test/CodeGen/Mips/prevent-hoisting.ll @@ -16,7 +16,7 @@ ; CHECK: sll ; Check that at the start of a fallthrough block there is a instruction that writes to $1. -; CHECK: {{BB[0-9_#]+}}: +; CHECK: {{%bb.[0-9]+}}: ; CHECK: sll $1, $[[R0:[0-9]+]], 4 ; CHECK: lw $[[R1:[0-9]+]], %got(assignSE2partition)($[[R2:[0-9]+]]) diff --git a/test/CodeGen/PowerPC/2006-07-07-ComputeMaskedBits.ll b/test/CodeGen/PowerPC/2006-07-07-ComputeMaskedBits.ll index 56f4a4173ef..5b8b8147cce 100644 --- a/test/CodeGen/PowerPC/2006-07-07-ComputeMaskedBits.ll +++ b/test/CodeGen/PowerPC/2006-07-07-ComputeMaskedBits.ll @@ -6,7 +6,7 @@ define i32 @test(i32 %i) { ; CHECK-LABEL: test: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: addis 4, 2, .LC0@toc@ha ; CHECK-NEXT: extsw 3, 3 ; CHECK-NEXT: addis 5, 2, .LC1@toc@ha diff --git a/test/CodeGen/PowerPC/addegluecrash.ll b/test/CodeGen/PowerPC/addegluecrash.ll index f17b6dce9a9..642960f8490 100644 --- a/test/CodeGen/PowerPC/addegluecrash.ll +++ b/test/CodeGen/PowerPC/addegluecrash.ll @@ -5,7 +5,7 @@ target triple = "powerpc64le-unknown-linux-gnu" define void @bn_mul_comba8(i64* nocapture %r, i64* nocapture readonly %a, i64* nocapture readonly %b) { ; CHECK-LABEL: bn_mul_comba8: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: ld 6, 0(4) ; CHECK-NEXT: ld 7, 0(5) ; CHECK-NEXT: mulhdu 8, 7, 6 diff --git a/test/CodeGen/PowerPC/andc.ll b/test/CodeGen/PowerPC/andc.ll index df47bfc1e38..9bfbda2bbd7 100644 --- a/test/CodeGen/PowerPC/andc.ll +++ b/test/CodeGen/PowerPC/andc.ll @@ -3,7 +3,7 @@ define i1 @and_cmp1(i32 %x, i32 %y) { ; CHECK-LABEL: and_cmp1: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: andc 3, 4, 3 ; CHECK-NEXT: cntlzw 3, 3 ; CHECK-NEXT: rlwinm 3, 3, 27, 31, 31 @@ -15,7 +15,7 @@ define i1 @and_cmp1(i32 %x, i32 %y) { define i1 @and_cmp_const(i32 %x) { ; CHECK-LABEL: and_cmp_const: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: li 4, 43 ; CHECK-NEXT: andc 3, 4, 3 ; CHECK-NEXT: cntlzw 3, 3 @@ -28,7 +28,7 @@ define i1 @and_cmp_const(i32 %x) { define i1 @foo(i32 %i) { ; CHECK-LABEL: foo: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: lis 4, 4660 ; CHECK-NEXT: ori 4, 4, 22136 ; CHECK-NEXT: andc 3, 4, 3 @@ -42,7 +42,7 @@ define i1 @foo(i32 %i) { define <4 x i32> @hidden_not_v4i32(<4 x i32> %x) { ; CHECK-LABEL: hidden_not_v4i32: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vspltisw 3, 6 ; CHECK-NEXT: xxlandc 34, 35, 34 ; CHECK-NEXT: blr diff --git a/test/CodeGen/PowerPC/atomics-constant.ll b/test/CodeGen/PowerPC/atomics-constant.ll index 77825c608a3..559cd9eb656 100644 --- a/test/CodeGen/PowerPC/atomics-constant.ll +++ b/test/CodeGen/PowerPC/atomics-constant.ll @@ -7,7 +7,7 @@ target triple = "powerpc64le-unknown-linux-gnu" define i64 @foo() { ; CHECK-LABEL: foo: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis 3, 2, .LC0@toc@ha ; CHECK-NEXT: li 4, 0 ; CHECK-NEXT: ld 3, .LC0@toc@l(3) diff --git a/test/CodeGen/PowerPC/atomics-regression.ll b/test/CodeGen/PowerPC/atomics-regression.ll index c8fb1e74e73..7079f6dd52e 100644 --- a/test/CodeGen/PowerPC/atomics-regression.ll +++ b/test/CodeGen/PowerPC/atomics-regression.ll @@ -3,7 +3,7 @@ define i8 @test0(i8* %ptr) { ; PPC64LE-LABEL: test0: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lbz 3, 0(3) ; PPC64LE-NEXT: blr %val = load atomic i8, i8* %ptr unordered, align 1 @@ -12,7 +12,7 @@ define i8 @test0(i8* %ptr) { define i8 @test1(i8* %ptr) { ; PPC64LE-LABEL: test1: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lbz 3, 0(3) ; PPC64LE-NEXT: blr %val = load atomic i8, i8* %ptr monotonic, align 1 @@ -21,7 +21,7 @@ define i8 @test1(i8* %ptr) { define i8 @test2(i8* %ptr) { ; PPC64LE-LABEL: test2: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lbz 3, 0(3) ; PPC64LE-NEXT: cmpd 7, 3, 3 ; PPC64LE-NEXT: bne- 7, .+4 @@ -33,7 +33,7 @@ define i8 @test2(i8* %ptr) { define i8 @test3(i8* %ptr) { ; PPC64LE-LABEL: test3: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: ori 2, 2, 0 ; PPC64LE-NEXT: lbz 3, 0(3) @@ -47,7 +47,7 @@ define i8 @test3(i8* %ptr) { define i16 @test4(i16* %ptr) { ; PPC64LE-LABEL: test4: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lhz 3, 0(3) ; PPC64LE-NEXT: blr %val = load atomic i16, i16* %ptr unordered, align 2 @@ -56,7 +56,7 @@ define i16 @test4(i16* %ptr) { define i16 @test5(i16* %ptr) { ; PPC64LE-LABEL: test5: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lhz 3, 0(3) ; PPC64LE-NEXT: blr %val = load atomic i16, i16* %ptr monotonic, align 2 @@ -65,7 +65,7 @@ define i16 @test5(i16* %ptr) { define i16 @test6(i16* %ptr) { ; PPC64LE-LABEL: test6: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lhz 3, 0(3) ; PPC64LE-NEXT: cmpd 7, 3, 3 ; PPC64LE-NEXT: bne- 7, .+4 @@ -77,7 +77,7 @@ define i16 @test6(i16* %ptr) { define i16 @test7(i16* %ptr) { ; PPC64LE-LABEL: test7: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: ori 2, 2, 0 ; PPC64LE-NEXT: lhz 3, 0(3) @@ -91,7 +91,7 @@ define i16 @test7(i16* %ptr) { define i32 @test8(i32* %ptr) { ; PPC64LE-LABEL: test8: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwz 3, 0(3) ; PPC64LE-NEXT: blr %val = load atomic i32, i32* %ptr unordered, align 4 @@ -100,7 +100,7 @@ define i32 @test8(i32* %ptr) { define i32 @test9(i32* %ptr) { ; PPC64LE-LABEL: test9: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwz 3, 0(3) ; PPC64LE-NEXT: blr %val = load atomic i32, i32* %ptr monotonic, align 4 @@ -109,7 +109,7 @@ define i32 @test9(i32* %ptr) { define i32 @test10(i32* %ptr) { ; PPC64LE-LABEL: test10: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwz 3, 0(3) ; PPC64LE-NEXT: cmpd 7, 3, 3 ; PPC64LE-NEXT: bne- 7, .+4 @@ -121,7 +121,7 @@ define i32 @test10(i32* %ptr) { define i32 @test11(i32* %ptr) { ; PPC64LE-LABEL: test11: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: ori 2, 2, 0 ; PPC64LE-NEXT: lwz 3, 0(3) @@ -135,7 +135,7 @@ define i32 @test11(i32* %ptr) { define i64 @test12(i64* %ptr) { ; PPC64LE-LABEL: test12: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: ld 3, 0(3) ; PPC64LE-NEXT: blr %val = load atomic i64, i64* %ptr unordered, align 8 @@ -144,7 +144,7 @@ define i64 @test12(i64* %ptr) { define i64 @test13(i64* %ptr) { ; PPC64LE-LABEL: test13: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: ld 3, 0(3) ; PPC64LE-NEXT: blr %val = load atomic i64, i64* %ptr monotonic, align 8 @@ -153,7 +153,7 @@ define i64 @test13(i64* %ptr) { define i64 @test14(i64* %ptr) { ; PPC64LE-LABEL: test14: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: ld 3, 0(3) ; PPC64LE-NEXT: cmpd 7, 3, 3 ; PPC64LE-NEXT: bne- 7, .+4 @@ -165,7 +165,7 @@ define i64 @test14(i64* %ptr) { define i64 @test15(i64* %ptr) { ; PPC64LE-LABEL: test15: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: ori 2, 2, 0 ; PPC64LE-NEXT: ld 3, 0(3) @@ -179,7 +179,7 @@ define i64 @test15(i64* %ptr) { define void @test16(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test16: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: stb 4, 0(3) ; PPC64LE-NEXT: blr store atomic i8 %val, i8* %ptr unordered, align 1 @@ -188,7 +188,7 @@ define void @test16(i8* %ptr, i8 %val) { define void @test17(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test17: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: stb 4, 0(3) ; PPC64LE-NEXT: blr store atomic i8 %val, i8* %ptr monotonic, align 1 @@ -197,7 +197,7 @@ define void @test17(i8* %ptr, i8 %val) { define void @test18(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test18: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: stb 4, 0(3) ; PPC64LE-NEXT: blr @@ -207,7 +207,7 @@ define void @test18(i8* %ptr, i8 %val) { define void @test19(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test19: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: stb 4, 0(3) ; PPC64LE-NEXT: blr @@ -217,7 +217,7 @@ define void @test19(i8* %ptr, i8 %val) { define void @test20(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test20: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sth 4, 0(3) ; PPC64LE-NEXT: blr store atomic i16 %val, i16* %ptr unordered, align 2 @@ -226,7 +226,7 @@ define void @test20(i16* %ptr, i16 %val) { define void @test21(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test21: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sth 4, 0(3) ; PPC64LE-NEXT: blr store atomic i16 %val, i16* %ptr monotonic, align 2 @@ -235,7 +235,7 @@ define void @test21(i16* %ptr, i16 %val) { define void @test22(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test22: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: sth 4, 0(3) ; PPC64LE-NEXT: blr @@ -245,7 +245,7 @@ define void @test22(i16* %ptr, i16 %val) { define void @test23(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test23: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: sth 4, 0(3) ; PPC64LE-NEXT: blr @@ -255,7 +255,7 @@ define void @test23(i16* %ptr, i16 %val) { define void @test24(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test24: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: stw 4, 0(3) ; PPC64LE-NEXT: blr store atomic i32 %val, i32* %ptr unordered, align 4 @@ -264,7 +264,7 @@ define void @test24(i32* %ptr, i32 %val) { define void @test25(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test25: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: stw 4, 0(3) ; PPC64LE-NEXT: blr store atomic i32 %val, i32* %ptr monotonic, align 4 @@ -273,7 +273,7 @@ define void @test25(i32* %ptr, i32 %val) { define void @test26(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test26: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: stw 4, 0(3) ; PPC64LE-NEXT: blr @@ -283,7 +283,7 @@ define void @test26(i32* %ptr, i32 %val) { define void @test27(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test27: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: stw 4, 0(3) ; PPC64LE-NEXT: blr @@ -293,7 +293,7 @@ define void @test27(i32* %ptr, i32 %val) { define void @test28(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test28: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: std 4, 0(3) ; PPC64LE-NEXT: blr store atomic i64 %val, i64* %ptr unordered, align 8 @@ -302,7 +302,7 @@ define void @test28(i64* %ptr, i64 %val) { define void @test29(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test29: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: std 4, 0(3) ; PPC64LE-NEXT: blr store atomic i64 %val, i64* %ptr monotonic, align 8 @@ -311,7 +311,7 @@ define void @test29(i64* %ptr, i64 %val) { define void @test30(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test30: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: std 4, 0(3) ; PPC64LE-NEXT: blr @@ -321,7 +321,7 @@ define void @test30(i64* %ptr, i64 %val) { define void @test31(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test31: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: std 4, 0(3) ; PPC64LE-NEXT: blr @@ -331,7 +331,7 @@ define void @test31(i64* %ptr, i64 %val) { define void @test32() { ; PPC64LE-LABEL: test32: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr fence acquire @@ -340,7 +340,7 @@ define void @test32() { define void @test33() { ; PPC64LE-LABEL: test33: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr fence release @@ -349,7 +349,7 @@ define void @test33() { define void @test34() { ; PPC64LE-LABEL: test34: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr fence acq_rel @@ -358,7 +358,7 @@ define void @test34() { define void @test35() { ; PPC64LE-LABEL: test35: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: blr fence seq_cst @@ -367,7 +367,7 @@ define void @test35() { define void @test36() { ; PPC64LE-LABEL: test36: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr fence syncscope("singlethread") acquire @@ -376,7 +376,7 @@ define void @test36() { define void @test37() { ; PPC64LE-LABEL: test37: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr fence syncscope("singlethread") release @@ -385,7 +385,7 @@ define void @test37() { define void @test38() { ; PPC64LE-LABEL: test38: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr fence syncscope("singlethread") acq_rel @@ -394,7 +394,7 @@ define void @test38() { define void @test39() { ; PPC64LE-LABEL: test39: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: blr fence syncscope("singlethread") seq_cst @@ -403,7 +403,7 @@ define void @test39() { define void @test40(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test40: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: b .LBB40_2 ; PPC64LE-NEXT: .p2align 5 ; PPC64LE-NEXT: .LBB40_1: @@ -413,7 +413,7 @@ define void @test40(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: beq 0, .LBB40_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val monotonic monotonic @@ -422,15 +422,15 @@ define void @test40(i8* %ptr, i8 %cmp, i8 %val) { define void @test41(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test41: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB41_1: ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB41_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB41_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB41_4: @@ -443,15 +443,15 @@ define void @test41(i8* %ptr, i8 %cmp, i8 %val) { define void @test42(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test42: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB42_1: ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB42_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB42_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB42_4: @@ -464,7 +464,7 @@ define void @test42(i8* %ptr, i8 %cmp, i8 %val) { define void @test43(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test43: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: b .LBB43_2 ; PPC64LE-NEXT: .p2align 5 @@ -475,7 +475,7 @@ define void @test43(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: beq 0, .LBB43_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val release monotonic @@ -484,7 +484,7 @@ define void @test43(i8* %ptr, i8 %cmp, i8 %val) { define void @test44(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test44: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: b .LBB44_2 ; PPC64LE-NEXT: .p2align 5 @@ -495,7 +495,7 @@ define void @test44(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: beq 0, .LBB44_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val release acquire @@ -504,16 +504,16 @@ define void @test44(i8* %ptr, i8 %cmp, i8 %val) { define void @test45(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test45: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB45_1: ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB45_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB45_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB45_4: @@ -526,16 +526,16 @@ define void @test45(i8* %ptr, i8 %cmp, i8 %val) { define void @test46(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test46: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB46_1: ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB46_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB46_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB46_4: @@ -548,16 +548,16 @@ define void @test46(i8* %ptr, i8 %cmp, i8 %val) { define void @test47(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test47: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB47_1: ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB47_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB47_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB47_4: @@ -570,16 +570,16 @@ define void @test47(i8* %ptr, i8 %cmp, i8 %val) { define void @test48(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test48: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB48_1: ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB48_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB48_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB48_4: @@ -592,16 +592,16 @@ define void @test48(i8* %ptr, i8 %cmp, i8 %val) { define void @test49(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test49: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB49_1: ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB49_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB49_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB49_4: @@ -614,7 +614,7 @@ define void @test49(i8* %ptr, i8 %cmp, i8 %val) { define void @test50(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test50: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: b .LBB50_2 ; PPC64LE-NEXT: .p2align 5 ; PPC64LE-NEXT: .LBB50_1: @@ -624,7 +624,7 @@ define void @test50(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: beq 0, .LBB50_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val monotonic monotonic @@ -633,15 +633,15 @@ define void @test50(i16* %ptr, i16 %cmp, i16 %val) { define void @test51(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test51: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB51_1: ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB51_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB51_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB51_4: @@ -654,15 +654,15 @@ define void @test51(i16* %ptr, i16 %cmp, i16 %val) { define void @test52(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test52: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB52_1: ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB52_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB52_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB52_4: @@ -675,7 +675,7 @@ define void @test52(i16* %ptr, i16 %cmp, i16 %val) { define void @test53(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test53: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: b .LBB53_2 ; PPC64LE-NEXT: .p2align 5 @@ -686,7 +686,7 @@ define void @test53(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: beq 0, .LBB53_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val release monotonic @@ -695,7 +695,7 @@ define void @test53(i16* %ptr, i16 %cmp, i16 %val) { define void @test54(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test54: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: b .LBB54_2 ; PPC64LE-NEXT: .p2align 5 @@ -706,7 +706,7 @@ define void @test54(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: beq 0, .LBB54_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val release acquire @@ -715,16 +715,16 @@ define void @test54(i16* %ptr, i16 %cmp, i16 %val) { define void @test55(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test55: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB55_1: ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB55_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB55_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB55_4: @@ -737,16 +737,16 @@ define void @test55(i16* %ptr, i16 %cmp, i16 %val) { define void @test56(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test56: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB56_1: ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB56_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB56_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB56_4: @@ -759,16 +759,16 @@ define void @test56(i16* %ptr, i16 %cmp, i16 %val) { define void @test57(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test57: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB57_1: ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB57_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB57_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB57_4: @@ -781,16 +781,16 @@ define void @test57(i16* %ptr, i16 %cmp, i16 %val) { define void @test58(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test58: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB58_1: ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB58_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB58_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB58_4: @@ -803,16 +803,16 @@ define void @test58(i16* %ptr, i16 %cmp, i16 %val) { define void @test59(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test59: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB59_1: ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB59_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB59_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB59_4: @@ -825,7 +825,7 @@ define void @test59(i16* %ptr, i16 %cmp, i16 %val) { define void @test60(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test60: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: b .LBB60_2 ; PPC64LE-NEXT: .p2align 5 ; PPC64LE-NEXT: .LBB60_1: @@ -835,7 +835,7 @@ define void @test60(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: beq 0, .LBB60_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val monotonic monotonic @@ -844,15 +844,15 @@ define void @test60(i32* %ptr, i32 %cmp, i32 %val) { define void @test61(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test61: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB61_1: ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB61_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB61_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB61_4: @@ -865,15 +865,15 @@ define void @test61(i32* %ptr, i32 %cmp, i32 %val) { define void @test62(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test62: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB62_1: ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB62_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB62_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB62_4: @@ -886,7 +886,7 @@ define void @test62(i32* %ptr, i32 %cmp, i32 %val) { define void @test63(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test63: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: b .LBB63_2 ; PPC64LE-NEXT: .p2align 5 @@ -897,7 +897,7 @@ define void @test63(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: beq 0, .LBB63_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val release monotonic @@ -906,7 +906,7 @@ define void @test63(i32* %ptr, i32 %cmp, i32 %val) { define void @test64(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test64: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: b .LBB64_2 ; PPC64LE-NEXT: .p2align 5 @@ -917,7 +917,7 @@ define void @test64(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: beq 0, .LBB64_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val release acquire @@ -926,16 +926,16 @@ define void @test64(i32* %ptr, i32 %cmp, i32 %val) { define void @test65(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test65: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB65_1: ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB65_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB65_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB65_4: @@ -948,16 +948,16 @@ define void @test65(i32* %ptr, i32 %cmp, i32 %val) { define void @test66(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test66: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB66_1: ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB66_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB66_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB66_4: @@ -970,16 +970,16 @@ define void @test66(i32* %ptr, i32 %cmp, i32 %val) { define void @test67(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test67: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB67_1: ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB67_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB67_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB67_4: @@ -992,16 +992,16 @@ define void @test67(i32* %ptr, i32 %cmp, i32 %val) { define void @test68(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test68: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB68_1: ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB68_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB68_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB68_4: @@ -1014,16 +1014,16 @@ define void @test68(i32* %ptr, i32 %cmp, i32 %val) { define void @test69(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test69: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB69_1: ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB69_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB69_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB69_4: @@ -1036,7 +1036,7 @@ define void @test69(i32* %ptr, i32 %cmp, i32 %val) { define void @test70(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test70: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: b .LBB70_2 ; PPC64LE-NEXT: .p2align 5 ; PPC64LE-NEXT: .LBB70_1: @@ -1046,7 +1046,7 @@ define void @test70(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: beq 0, .LBB70_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val monotonic monotonic @@ -1055,15 +1055,15 @@ define void @test70(i64* %ptr, i64 %cmp, i64 %val) { define void @test71(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test71: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB71_1: ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: bne 0, .LBB71_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB71_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB71_4: @@ -1076,15 +1076,15 @@ define void @test71(i64* %ptr, i64 %cmp, i64 %val) { define void @test72(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test72: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB72_1: ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: bne 0, .LBB72_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB72_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB72_4: @@ -1097,7 +1097,7 @@ define void @test72(i64* %ptr, i64 %cmp, i64 %val) { define void @test73(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test73: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: b .LBB73_2 ; PPC64LE-NEXT: .p2align 5 @@ -1108,7 +1108,7 @@ define void @test73(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: beq 0, .LBB73_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val release monotonic @@ -1117,7 +1117,7 @@ define void @test73(i64* %ptr, i64 %cmp, i64 %val) { define void @test74(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test74: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: b .LBB74_2 ; PPC64LE-NEXT: .p2align 5 @@ -1128,7 +1128,7 @@ define void @test74(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: beq 0, .LBB74_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val release acquire @@ -1137,16 +1137,16 @@ define void @test74(i64* %ptr, i64 %cmp, i64 %val) { define void @test75(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test75: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB75_1: ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: bne 0, .LBB75_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB75_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB75_4: @@ -1159,16 +1159,16 @@ define void @test75(i64* %ptr, i64 %cmp, i64 %val) { define void @test76(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test76: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB76_1: ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: bne 0, .LBB76_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB76_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB76_4: @@ -1181,16 +1181,16 @@ define void @test76(i64* %ptr, i64 %cmp, i64 %val) { define void @test77(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test77: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB77_1: ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: bne 0, .LBB77_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB77_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB77_4: @@ -1203,16 +1203,16 @@ define void @test77(i64* %ptr, i64 %cmp, i64 %val) { define void @test78(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test78: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB78_1: ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: bne 0, .LBB78_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB78_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB78_4: @@ -1225,16 +1225,16 @@ define void @test78(i64* %ptr, i64 %cmp, i64 %val) { define void @test79(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test79: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB79_1: ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: bne 0, .LBB79_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB79_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB79_4: @@ -1247,7 +1247,7 @@ define void @test79(i64* %ptr, i64 %cmp, i64 %val) { define void @test80(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test80: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: b .LBB80_2 ; PPC64LE-NEXT: .p2align 5 ; PPC64LE-NEXT: .LBB80_1: @@ -1257,7 +1257,7 @@ define void @test80(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: beq 0, .LBB80_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") monotonic monotonic @@ -1266,15 +1266,15 @@ define void @test80(i8* %ptr, i8 %cmp, i8 %val) { define void @test81(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test81: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB81_1: ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB81_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB81_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB81_4: @@ -1287,15 +1287,15 @@ define void @test81(i8* %ptr, i8 %cmp, i8 %val) { define void @test82(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test82: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB82_1: ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB82_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB82_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB82_4: @@ -1308,7 +1308,7 @@ define void @test82(i8* %ptr, i8 %cmp, i8 %val) { define void @test83(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test83: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: b .LBB83_2 ; PPC64LE-NEXT: .p2align 5 @@ -1319,7 +1319,7 @@ define void @test83(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: beq 0, .LBB83_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") release monotonic @@ -1328,7 +1328,7 @@ define void @test83(i8* %ptr, i8 %cmp, i8 %val) { define void @test84(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test84: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: b .LBB84_2 ; PPC64LE-NEXT: .p2align 5 @@ -1339,7 +1339,7 @@ define void @test84(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: beq 0, .LBB84_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val syncscope("singlethread") release acquire @@ -1348,16 +1348,16 @@ define void @test84(i8* %ptr, i8 %cmp, i8 %val) { define void @test85(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test85: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB85_1: ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB85_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB85_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB85_4: @@ -1370,16 +1370,16 @@ define void @test85(i8* %ptr, i8 %cmp, i8 %val) { define void @test86(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test86: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB86_1: ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB86_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB86_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB86_4: @@ -1392,16 +1392,16 @@ define void @test86(i8* %ptr, i8 %cmp, i8 %val) { define void @test87(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test87: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB87_1: ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB87_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB87_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB87_4: @@ -1414,16 +1414,16 @@ define void @test87(i8* %ptr, i8 %cmp, i8 %val) { define void @test88(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test88: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB88_1: ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB88_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB88_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB88_4: @@ -1436,16 +1436,16 @@ define void @test88(i8* %ptr, i8 %cmp, i8 %val) { define void @test89(i8* %ptr, i8 %cmp, i8 %val) { ; PPC64LE-LABEL: test89: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB89_1: ; PPC64LE-NEXT: lbarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB89_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB89_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB89_4: @@ -1458,7 +1458,7 @@ define void @test89(i8* %ptr, i8 %cmp, i8 %val) { define void @test90(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test90: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: b .LBB90_2 ; PPC64LE-NEXT: .p2align 5 ; PPC64LE-NEXT: .LBB90_1: @@ -1468,7 +1468,7 @@ define void @test90(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: beq 0, .LBB90_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") monotonic monotonic @@ -1477,15 +1477,15 @@ define void @test90(i16* %ptr, i16 %cmp, i16 %val) { define void @test91(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test91: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB91_1: ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB91_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB91_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB91_4: @@ -1498,15 +1498,15 @@ define void @test91(i16* %ptr, i16 %cmp, i16 %val) { define void @test92(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test92: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB92_1: ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB92_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB92_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB92_4: @@ -1519,7 +1519,7 @@ define void @test92(i16* %ptr, i16 %cmp, i16 %val) { define void @test93(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test93: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: b .LBB93_2 ; PPC64LE-NEXT: .p2align 5 @@ -1530,7 +1530,7 @@ define void @test93(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: beq 0, .LBB93_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") release monotonic @@ -1539,7 +1539,7 @@ define void @test93(i16* %ptr, i16 %cmp, i16 %val) { define void @test94(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test94: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: b .LBB94_2 ; PPC64LE-NEXT: .p2align 5 @@ -1550,7 +1550,7 @@ define void @test94(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: beq 0, .LBB94_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val syncscope("singlethread") release acquire @@ -1559,16 +1559,16 @@ define void @test94(i16* %ptr, i16 %cmp, i16 %val) { define void @test95(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test95: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB95_1: ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB95_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB95_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB95_4: @@ -1581,16 +1581,16 @@ define void @test95(i16* %ptr, i16 %cmp, i16 %val) { define void @test96(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test96: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB96_1: ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB96_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB96_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB96_4: @@ -1603,16 +1603,16 @@ define void @test96(i16* %ptr, i16 %cmp, i16 %val) { define void @test97(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test97: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB97_1: ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB97_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB97_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB97_4: @@ -1625,16 +1625,16 @@ define void @test97(i16* %ptr, i16 %cmp, i16 %val) { define void @test98(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test98: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB98_1: ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB98_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB98_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB98_4: @@ -1647,16 +1647,16 @@ define void @test98(i16* %ptr, i16 %cmp, i16 %val) { define void @test99(i16* %ptr, i16 %cmp, i16 %val) { ; PPC64LE-LABEL: test99: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB99_1: ; PPC64LE-NEXT: lharx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB99_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB99_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB99_4: @@ -1669,7 +1669,7 @@ define void @test99(i16* %ptr, i16 %cmp, i16 %val) { define void @test100(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test100: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: b .LBB100_2 ; PPC64LE-NEXT: .p2align 5 ; PPC64LE-NEXT: .LBB100_1: @@ -1679,7 +1679,7 @@ define void @test100(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: beq 0, .LBB100_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") monotonic monotonic @@ -1688,15 +1688,15 @@ define void @test100(i32* %ptr, i32 %cmp, i32 %val) { define void @test101(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test101: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB101_1: ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB101_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB101_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB101_4: @@ -1709,15 +1709,15 @@ define void @test101(i32* %ptr, i32 %cmp, i32 %val) { define void @test102(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test102: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB102_1: ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB102_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB102_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB102_4: @@ -1730,7 +1730,7 @@ define void @test102(i32* %ptr, i32 %cmp, i32 %val) { define void @test103(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test103: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: b .LBB103_2 ; PPC64LE-NEXT: .p2align 5 @@ -1741,7 +1741,7 @@ define void @test103(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: beq 0, .LBB103_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") release monotonic @@ -1750,7 +1750,7 @@ define void @test103(i32* %ptr, i32 %cmp, i32 %val) { define void @test104(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test104: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: b .LBB104_2 ; PPC64LE-NEXT: .p2align 5 @@ -1761,7 +1761,7 @@ define void @test104(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: beq 0, .LBB104_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val syncscope("singlethread") release acquire @@ -1770,16 +1770,16 @@ define void @test104(i32* %ptr, i32 %cmp, i32 %val) { define void @test105(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test105: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB105_1: ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB105_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB105_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB105_4: @@ -1792,16 +1792,16 @@ define void @test105(i32* %ptr, i32 %cmp, i32 %val) { define void @test106(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test106: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB106_1: ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB106_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB106_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB106_4: @@ -1814,16 +1814,16 @@ define void @test106(i32* %ptr, i32 %cmp, i32 %val) { define void @test107(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test107: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB107_1: ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB107_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB107_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB107_4: @@ -1836,16 +1836,16 @@ define void @test107(i32* %ptr, i32 %cmp, i32 %val) { define void @test108(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test108: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB108_1: ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB108_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB108_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB108_4: @@ -1858,16 +1858,16 @@ define void @test108(i32* %ptr, i32 %cmp, i32 %val) { define void @test109(i32* %ptr, i32 %cmp, i32 %val) { ; PPC64LE-LABEL: test109: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB109_1: ; PPC64LE-NEXT: lwarx 6, 0, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bne 0, .LBB109_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB109_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB109_4: @@ -1880,7 +1880,7 @@ define void @test109(i32* %ptr, i32 %cmp, i32 %val) { define void @test110(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test110: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: b .LBB110_2 ; PPC64LE-NEXT: .p2align 5 ; PPC64LE-NEXT: .LBB110_1: @@ -1890,7 +1890,7 @@ define void @test110(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: beq 0, .LBB110_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") monotonic monotonic @@ -1899,15 +1899,15 @@ define void @test110(i64* %ptr, i64 %cmp, i64 %val) { define void @test111(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test111: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB111_1: ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: bne 0, .LBB111_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB111_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB111_4: @@ -1920,15 +1920,15 @@ define void @test111(i64* %ptr, i64 %cmp, i64 %val) { define void @test112(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test112: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB112_1: ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: bne 0, .LBB112_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB112_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB112_4: @@ -1941,7 +1941,7 @@ define void @test112(i64* %ptr, i64 %cmp, i64 %val) { define void @test113(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test113: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: b .LBB113_2 ; PPC64LE-NEXT: .p2align 5 @@ -1952,7 +1952,7 @@ define void @test113(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: beq 0, .LBB113_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") release monotonic @@ -1961,7 +1961,7 @@ define void @test113(i64* %ptr, i64 %cmp, i64 %val) { define void @test114(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test114: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: b .LBB114_2 ; PPC64LE-NEXT: .p2align 5 @@ -1972,7 +1972,7 @@ define void @test114(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: beq 0, .LBB114_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: blr %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val syncscope("singlethread") release acquire @@ -1981,16 +1981,16 @@ define void @test114(i64* %ptr, i64 %cmp, i64 %val) { define void @test115(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test115: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB115_1: ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: bne 0, .LBB115_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB115_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB115_4: @@ -2003,16 +2003,16 @@ define void @test115(i64* %ptr, i64 %cmp, i64 %val) { define void @test116(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test116: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB116_1: ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: bne 0, .LBB116_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB116_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB116_4: @@ -2025,16 +2025,16 @@ define void @test116(i64* %ptr, i64 %cmp, i64 %val) { define void @test117(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test117: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB117_1: ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: bne 0, .LBB117_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB117_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB117_4: @@ -2047,16 +2047,16 @@ define void @test117(i64* %ptr, i64 %cmp, i64 %val) { define void @test118(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test118: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB118_1: ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: bne 0, .LBB118_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB118_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB118_4: @@ -2069,16 +2069,16 @@ define void @test118(i64* %ptr, i64 %cmp, i64 %val) { define void @test119(i64* %ptr, i64 %cmp, i64 %val) { ; PPC64LE-LABEL: test119: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB119_1: ; PPC64LE-NEXT: ldarx 6, 0, 3 ; PPC64LE-NEXT: cmpd 4, 6 ; PPC64LE-NEXT: bne 0, .LBB119_4 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 5, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB119_1 -; PPC64LE-NEXT: # BB#3: +; PPC64LE-NEXT: # %bb.3: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr ; PPC64LE-NEXT: .LBB119_4: @@ -2091,12 +2091,12 @@ define void @test119(i64* %ptr, i64 %cmp, i64 %val) { define i8 @test120(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test120: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB120_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB120_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i8* %ptr, i8 %val monotonic @@ -2105,13 +2105,13 @@ define i8 @test120(i8* %ptr, i8 %val) { define i8 @test121(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test121: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB121_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: stbcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB121_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i8* %ptr, i8 %val acquire @@ -2120,13 +2120,13 @@ define i8 @test121(i8* %ptr, i8 %val) { define i8 @test122(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test122: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB122_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB122_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i8* %ptr, i8 %val release @@ -2135,13 +2135,13 @@ define i8 @test122(i8* %ptr, i8 %val) { define i8 @test123(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test123: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB123_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB123_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2151,13 +2151,13 @@ define i8 @test123(i8* %ptr, i8 %val) { define i8 @test124(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test124: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB124_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB124_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2167,12 +2167,12 @@ define i8 @test124(i8* %ptr, i8 %val) { define i16 @test125(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test125: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB125_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB125_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i16* %ptr, i16 %val monotonic @@ -2181,13 +2181,13 @@ define i16 @test125(i16* %ptr, i16 %val) { define i16 @test126(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test126: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB126_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: sthcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB126_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i16* %ptr, i16 %val acquire @@ -2196,13 +2196,13 @@ define i16 @test126(i16* %ptr, i16 %val) { define i16 @test127(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test127: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB127_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB127_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i16* %ptr, i16 %val release @@ -2211,13 +2211,13 @@ define i16 @test127(i16* %ptr, i16 %val) { define i16 @test128(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test128: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB128_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB128_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2227,13 +2227,13 @@ define i16 @test128(i16* %ptr, i16 %val) { define i16 @test129(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test129: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB129_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB129_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2243,12 +2243,12 @@ define i16 @test129(i16* %ptr, i16 %val) { define i32 @test130(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test130: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB130_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB130_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i32* %ptr, i32 %val monotonic @@ -2257,13 +2257,13 @@ define i32 @test130(i32* %ptr, i32 %val) { define i32 @test131(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test131: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB131_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: stwcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB131_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i32* %ptr, i32 %val acquire @@ -2272,13 +2272,13 @@ define i32 @test131(i32* %ptr, i32 %val) { define i32 @test132(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test132: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB132_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB132_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i32* %ptr, i32 %val release @@ -2287,13 +2287,13 @@ define i32 @test132(i32* %ptr, i32 %val) { define i32 @test133(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test133: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB133_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB133_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2303,13 +2303,13 @@ define i32 @test133(i32* %ptr, i32 %val) { define i32 @test134(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test134: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB134_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB134_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2319,12 +2319,12 @@ define i32 @test134(i32* %ptr, i32 %val) { define i64 @test135(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test135: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB135_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB135_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i64* %ptr, i64 %val monotonic @@ -2333,13 +2333,13 @@ define i64 @test135(i64* %ptr, i64 %val) { define i64 @test136(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test136: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB136_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: stdcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB136_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i64* %ptr, i64 %val acquire @@ -2348,13 +2348,13 @@ define i64 @test136(i64* %ptr, i64 %val) { define i64 @test137(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test137: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB137_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB137_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i64* %ptr, i64 %val release @@ -2363,13 +2363,13 @@ define i64 @test137(i64* %ptr, i64 %val) { define i64 @test138(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test138: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB138_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB138_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2379,13 +2379,13 @@ define i64 @test138(i64* %ptr, i64 %val) { define i64 @test139(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test139: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB139_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB139_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2395,13 +2395,13 @@ define i64 @test139(i64* %ptr, i64 %val) { define i8 @test140(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test140: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB140_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB140_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw add i8* %ptr, i8 %val monotonic @@ -2410,14 +2410,14 @@ define i8 @test140(i8* %ptr, i8 %val) { define i8 @test141(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test141: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB141_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: add 6, 4, 3 ; PPC64LE-NEXT: stbcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB141_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw add i8* %ptr, i8 %val acquire @@ -2426,14 +2426,14 @@ define i8 @test141(i8* %ptr, i8 %val) { define i8 @test142(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test142: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB142_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB142_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw add i8* %ptr, i8 %val release @@ -2442,14 +2442,14 @@ define i8 @test142(i8* %ptr, i8 %val) { define i8 @test143(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test143: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB143_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB143_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2459,14 +2459,14 @@ define i8 @test143(i8* %ptr, i8 %val) { define i8 @test144(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test144: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB144_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB144_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2476,13 +2476,13 @@ define i8 @test144(i8* %ptr, i8 %val) { define i16 @test145(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test145: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB145_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB145_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw add i16* %ptr, i16 %val monotonic @@ -2491,14 +2491,14 @@ define i16 @test145(i16* %ptr, i16 %val) { define i16 @test146(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test146: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB146_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: add 6, 4, 3 ; PPC64LE-NEXT: sthcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB146_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw add i16* %ptr, i16 %val acquire @@ -2507,14 +2507,14 @@ define i16 @test146(i16* %ptr, i16 %val) { define i16 @test147(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test147: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB147_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB147_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw add i16* %ptr, i16 %val release @@ -2523,14 +2523,14 @@ define i16 @test147(i16* %ptr, i16 %val) { define i16 @test148(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test148: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB148_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB148_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2540,14 +2540,14 @@ define i16 @test148(i16* %ptr, i16 %val) { define i16 @test149(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test149: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB149_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB149_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2557,13 +2557,13 @@ define i16 @test149(i16* %ptr, i16 %val) { define i32 @test150(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test150: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB150_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB150_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw add i32* %ptr, i32 %val monotonic @@ -2572,14 +2572,14 @@ define i32 @test150(i32* %ptr, i32 %val) { define i32 @test151(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test151: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB151_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: add 6, 4, 3 ; PPC64LE-NEXT: stwcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB151_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw add i32* %ptr, i32 %val acquire @@ -2588,14 +2588,14 @@ define i32 @test151(i32* %ptr, i32 %val) { define i32 @test152(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test152: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB152_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB152_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw add i32* %ptr, i32 %val release @@ -2604,14 +2604,14 @@ define i32 @test152(i32* %ptr, i32 %val) { define i32 @test153(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test153: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB153_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB153_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2621,14 +2621,14 @@ define i32 @test153(i32* %ptr, i32 %val) { define i32 @test154(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test154: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB154_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB154_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2638,13 +2638,13 @@ define i32 @test154(i32* %ptr, i32 %val) { define i64 @test155(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test155: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB155_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB155_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw add i64* %ptr, i64 %val monotonic @@ -2653,14 +2653,14 @@ define i64 @test155(i64* %ptr, i64 %val) { define i64 @test156(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test156: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB156_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: add 6, 4, 3 ; PPC64LE-NEXT: stdcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB156_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw add i64* %ptr, i64 %val acquire @@ -2669,14 +2669,14 @@ define i64 @test156(i64* %ptr, i64 %val) { define i64 @test157(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test157: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB157_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB157_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw add i64* %ptr, i64 %val release @@ -2685,14 +2685,14 @@ define i64 @test157(i64* %ptr, i64 %val) { define i64 @test158(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test158: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB158_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB158_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2702,14 +2702,14 @@ define i64 @test158(i64* %ptr, i64 %val) { define i64 @test159(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test159: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB159_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB159_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2719,13 +2719,13 @@ define i64 @test159(i64* %ptr, i64 %val) { define i8 @test160(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test160: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB160_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB160_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw sub i8* %ptr, i8 %val monotonic @@ -2734,14 +2734,14 @@ define i8 @test160(i8* %ptr, i8 %val) { define i8 @test161(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test161: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB161_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: subf 6, 4, 3 ; PPC64LE-NEXT: stbcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB161_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw sub i8* %ptr, i8 %val acquire @@ -2750,14 +2750,14 @@ define i8 @test161(i8* %ptr, i8 %val) { define i8 @test162(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test162: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB162_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB162_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw sub i8* %ptr, i8 %val release @@ -2766,14 +2766,14 @@ define i8 @test162(i8* %ptr, i8 %val) { define i8 @test163(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test163: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB163_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB163_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2783,14 +2783,14 @@ define i8 @test163(i8* %ptr, i8 %val) { define i8 @test164(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test164: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB164_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB164_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2800,13 +2800,13 @@ define i8 @test164(i8* %ptr, i8 %val) { define i16 @test165(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test165: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB165_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB165_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw sub i16* %ptr, i16 %val monotonic @@ -2815,14 +2815,14 @@ define i16 @test165(i16* %ptr, i16 %val) { define i16 @test166(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test166: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB166_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: subf 6, 4, 3 ; PPC64LE-NEXT: sthcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB166_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw sub i16* %ptr, i16 %val acquire @@ -2831,14 +2831,14 @@ define i16 @test166(i16* %ptr, i16 %val) { define i16 @test167(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test167: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB167_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB167_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw sub i16* %ptr, i16 %val release @@ -2847,14 +2847,14 @@ define i16 @test167(i16* %ptr, i16 %val) { define i16 @test168(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test168: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB168_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB168_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2864,14 +2864,14 @@ define i16 @test168(i16* %ptr, i16 %val) { define i16 @test169(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test169: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB169_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB169_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2881,13 +2881,13 @@ define i16 @test169(i16* %ptr, i16 %val) { define i32 @test170(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test170: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB170_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB170_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw sub i32* %ptr, i32 %val monotonic @@ -2896,14 +2896,14 @@ define i32 @test170(i32* %ptr, i32 %val) { define i32 @test171(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test171: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB171_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: subf 6, 4, 3 ; PPC64LE-NEXT: stwcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB171_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw sub i32* %ptr, i32 %val acquire @@ -2912,14 +2912,14 @@ define i32 @test171(i32* %ptr, i32 %val) { define i32 @test172(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test172: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB172_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB172_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw sub i32* %ptr, i32 %val release @@ -2928,14 +2928,14 @@ define i32 @test172(i32* %ptr, i32 %val) { define i32 @test173(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test173: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB173_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB173_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2945,14 +2945,14 @@ define i32 @test173(i32* %ptr, i32 %val) { define i32 @test174(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test174: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB174_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB174_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -2962,13 +2962,13 @@ define i32 @test174(i32* %ptr, i32 %val) { define i64 @test175(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test175: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB175_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: sub 6, 5, 4 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB175_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw sub i64* %ptr, i64 %val monotonic @@ -2977,14 +2977,14 @@ define i64 @test175(i64* %ptr, i64 %val) { define i64 @test176(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test176: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB176_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: sub 6, 3, 4 ; PPC64LE-NEXT: stdcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB176_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw sub i64* %ptr, i64 %val acquire @@ -2993,14 +2993,14 @@ define i64 @test176(i64* %ptr, i64 %val) { define i64 @test177(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test177: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB177_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: sub 6, 5, 4 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB177_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw sub i64* %ptr, i64 %val release @@ -3009,14 +3009,14 @@ define i64 @test177(i64* %ptr, i64 %val) { define i64 @test178(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test178: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB178_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: sub 6, 5, 4 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB178_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3026,14 +3026,14 @@ define i64 @test178(i64* %ptr, i64 %val) { define i64 @test179(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test179: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB179_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: sub 6, 5, 4 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB179_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3043,13 +3043,13 @@ define i64 @test179(i64* %ptr, i64 %val) { define i8 @test180(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test180: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB180_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB180_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw and i8* %ptr, i8 %val monotonic @@ -3058,14 +3058,14 @@ define i8 @test180(i8* %ptr, i8 %val) { define i8 @test181(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test181: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB181_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: and 6, 4, 3 ; PPC64LE-NEXT: stbcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB181_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw and i8* %ptr, i8 %val acquire @@ -3074,14 +3074,14 @@ define i8 @test181(i8* %ptr, i8 %val) { define i8 @test182(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test182: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB182_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB182_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw and i8* %ptr, i8 %val release @@ -3090,14 +3090,14 @@ define i8 @test182(i8* %ptr, i8 %val) { define i8 @test183(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test183: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB183_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB183_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3107,14 +3107,14 @@ define i8 @test183(i8* %ptr, i8 %val) { define i8 @test184(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test184: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB184_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB184_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3124,13 +3124,13 @@ define i8 @test184(i8* %ptr, i8 %val) { define i16 @test185(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test185: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB185_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB185_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw and i16* %ptr, i16 %val monotonic @@ -3139,14 +3139,14 @@ define i16 @test185(i16* %ptr, i16 %val) { define i16 @test186(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test186: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB186_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: and 6, 4, 3 ; PPC64LE-NEXT: sthcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB186_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw and i16* %ptr, i16 %val acquire @@ -3155,14 +3155,14 @@ define i16 @test186(i16* %ptr, i16 %val) { define i16 @test187(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test187: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB187_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB187_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw and i16* %ptr, i16 %val release @@ -3171,14 +3171,14 @@ define i16 @test187(i16* %ptr, i16 %val) { define i16 @test188(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test188: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB188_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB188_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3188,14 +3188,14 @@ define i16 @test188(i16* %ptr, i16 %val) { define i16 @test189(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test189: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB189_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB189_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3205,13 +3205,13 @@ define i16 @test189(i16* %ptr, i16 %val) { define i32 @test190(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test190: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB190_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB190_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw and i32* %ptr, i32 %val monotonic @@ -3220,14 +3220,14 @@ define i32 @test190(i32* %ptr, i32 %val) { define i32 @test191(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test191: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB191_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: and 6, 4, 3 ; PPC64LE-NEXT: stwcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB191_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw and i32* %ptr, i32 %val acquire @@ -3236,14 +3236,14 @@ define i32 @test191(i32* %ptr, i32 %val) { define i32 @test192(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test192: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB192_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB192_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw and i32* %ptr, i32 %val release @@ -3252,14 +3252,14 @@ define i32 @test192(i32* %ptr, i32 %val) { define i32 @test193(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test193: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB193_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB193_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3269,14 +3269,14 @@ define i32 @test193(i32* %ptr, i32 %val) { define i32 @test194(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test194: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB194_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB194_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3286,13 +3286,13 @@ define i32 @test194(i32* %ptr, i32 %val) { define i64 @test195(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test195: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB195_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB195_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw and i64* %ptr, i64 %val monotonic @@ -3301,14 +3301,14 @@ define i64 @test195(i64* %ptr, i64 %val) { define i64 @test196(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test196: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB196_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: and 6, 4, 3 ; PPC64LE-NEXT: stdcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB196_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw and i64* %ptr, i64 %val acquire @@ -3317,14 +3317,14 @@ define i64 @test196(i64* %ptr, i64 %val) { define i64 @test197(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test197: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB197_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB197_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw and i64* %ptr, i64 %val release @@ -3333,14 +3333,14 @@ define i64 @test197(i64* %ptr, i64 %val) { define i64 @test198(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test198: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB198_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB198_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3350,14 +3350,14 @@ define i64 @test198(i64* %ptr, i64 %val) { define i64 @test199(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test199: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB199_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB199_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3367,13 +3367,13 @@ define i64 @test199(i64* %ptr, i64 %val) { define i8 @test200(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test200: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB200_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB200_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw nand i8* %ptr, i8 %val monotonic @@ -3382,14 +3382,14 @@ define i8 @test200(i8* %ptr, i8 %val) { define i8 @test201(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test201: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB201_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: nand 6, 4, 3 ; PPC64LE-NEXT: stbcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB201_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw nand i8* %ptr, i8 %val acquire @@ -3398,14 +3398,14 @@ define i8 @test201(i8* %ptr, i8 %val) { define i8 @test202(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test202: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB202_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB202_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw nand i8* %ptr, i8 %val release @@ -3414,14 +3414,14 @@ define i8 @test202(i8* %ptr, i8 %val) { define i8 @test203(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test203: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB203_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB203_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3431,14 +3431,14 @@ define i8 @test203(i8* %ptr, i8 %val) { define i8 @test204(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test204: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB204_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB204_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3448,13 +3448,13 @@ define i8 @test204(i8* %ptr, i8 %val) { define i16 @test205(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test205: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB205_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB205_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw nand i16* %ptr, i16 %val monotonic @@ -3463,14 +3463,14 @@ define i16 @test205(i16* %ptr, i16 %val) { define i16 @test206(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test206: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB206_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: nand 6, 4, 3 ; PPC64LE-NEXT: sthcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB206_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw nand i16* %ptr, i16 %val acquire @@ -3479,14 +3479,14 @@ define i16 @test206(i16* %ptr, i16 %val) { define i16 @test207(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test207: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB207_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB207_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw nand i16* %ptr, i16 %val release @@ -3495,14 +3495,14 @@ define i16 @test207(i16* %ptr, i16 %val) { define i16 @test208(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test208: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB208_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB208_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3512,14 +3512,14 @@ define i16 @test208(i16* %ptr, i16 %val) { define i16 @test209(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test209: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB209_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB209_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3529,13 +3529,13 @@ define i16 @test209(i16* %ptr, i16 %val) { define i32 @test210(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test210: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB210_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB210_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw nand i32* %ptr, i32 %val monotonic @@ -3544,14 +3544,14 @@ define i32 @test210(i32* %ptr, i32 %val) { define i32 @test211(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test211: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB211_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: nand 6, 4, 3 ; PPC64LE-NEXT: stwcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB211_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw nand i32* %ptr, i32 %val acquire @@ -3560,14 +3560,14 @@ define i32 @test211(i32* %ptr, i32 %val) { define i32 @test212(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test212: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB212_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB212_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw nand i32* %ptr, i32 %val release @@ -3576,14 +3576,14 @@ define i32 @test212(i32* %ptr, i32 %val) { define i32 @test213(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test213: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB213_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB213_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3593,14 +3593,14 @@ define i32 @test213(i32* %ptr, i32 %val) { define i32 @test214(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test214: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB214_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB214_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3610,13 +3610,13 @@ define i32 @test214(i32* %ptr, i32 %val) { define i64 @test215(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test215: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB215_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB215_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw nand i64* %ptr, i64 %val monotonic @@ -3625,14 +3625,14 @@ define i64 @test215(i64* %ptr, i64 %val) { define i64 @test216(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test216: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB216_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: nand 6, 4, 3 ; PPC64LE-NEXT: stdcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB216_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw nand i64* %ptr, i64 %val acquire @@ -3641,14 +3641,14 @@ define i64 @test216(i64* %ptr, i64 %val) { define i64 @test217(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test217: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB217_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB217_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw nand i64* %ptr, i64 %val release @@ -3657,14 +3657,14 @@ define i64 @test217(i64* %ptr, i64 %val) { define i64 @test218(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test218: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB218_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB218_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3674,14 +3674,14 @@ define i64 @test218(i64* %ptr, i64 %val) { define i64 @test219(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test219: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB219_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB219_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3691,13 +3691,13 @@ define i64 @test219(i64* %ptr, i64 %val) { define i8 @test220(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test220: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB220_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB220_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw or i8* %ptr, i8 %val monotonic @@ -3706,14 +3706,14 @@ define i8 @test220(i8* %ptr, i8 %val) { define i8 @test221(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test221: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB221_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: or 6, 4, 3 ; PPC64LE-NEXT: stbcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB221_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw or i8* %ptr, i8 %val acquire @@ -3722,14 +3722,14 @@ define i8 @test221(i8* %ptr, i8 %val) { define i8 @test222(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test222: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB222_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB222_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw or i8* %ptr, i8 %val release @@ -3738,14 +3738,14 @@ define i8 @test222(i8* %ptr, i8 %val) { define i8 @test223(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test223: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB223_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB223_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3755,14 +3755,14 @@ define i8 @test223(i8* %ptr, i8 %val) { define i8 @test224(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test224: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB224_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB224_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3772,13 +3772,13 @@ define i8 @test224(i8* %ptr, i8 %val) { define i16 @test225(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test225: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB225_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB225_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw or i16* %ptr, i16 %val monotonic @@ -3787,14 +3787,14 @@ define i16 @test225(i16* %ptr, i16 %val) { define i16 @test226(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test226: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB226_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: or 6, 4, 3 ; PPC64LE-NEXT: sthcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB226_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw or i16* %ptr, i16 %val acquire @@ -3803,14 +3803,14 @@ define i16 @test226(i16* %ptr, i16 %val) { define i16 @test227(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test227: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB227_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB227_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw or i16* %ptr, i16 %val release @@ -3819,14 +3819,14 @@ define i16 @test227(i16* %ptr, i16 %val) { define i16 @test228(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test228: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB228_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB228_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3836,14 +3836,14 @@ define i16 @test228(i16* %ptr, i16 %val) { define i16 @test229(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test229: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB229_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB229_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3853,13 +3853,13 @@ define i16 @test229(i16* %ptr, i16 %val) { define i32 @test230(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test230: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB230_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB230_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw or i32* %ptr, i32 %val monotonic @@ -3868,14 +3868,14 @@ define i32 @test230(i32* %ptr, i32 %val) { define i32 @test231(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test231: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB231_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: or 6, 4, 3 ; PPC64LE-NEXT: stwcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB231_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw or i32* %ptr, i32 %val acquire @@ -3884,14 +3884,14 @@ define i32 @test231(i32* %ptr, i32 %val) { define i32 @test232(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test232: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB232_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB232_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw or i32* %ptr, i32 %val release @@ -3900,14 +3900,14 @@ define i32 @test232(i32* %ptr, i32 %val) { define i32 @test233(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test233: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB233_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB233_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3917,14 +3917,14 @@ define i32 @test233(i32* %ptr, i32 %val) { define i32 @test234(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test234: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB234_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB234_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3934,13 +3934,13 @@ define i32 @test234(i32* %ptr, i32 %val) { define i64 @test235(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test235: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB235_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB235_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw or i64* %ptr, i64 %val monotonic @@ -3949,14 +3949,14 @@ define i64 @test235(i64* %ptr, i64 %val) { define i64 @test236(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test236: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB236_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: or 6, 4, 3 ; PPC64LE-NEXT: stdcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB236_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw or i64* %ptr, i64 %val acquire @@ -3965,14 +3965,14 @@ define i64 @test236(i64* %ptr, i64 %val) { define i64 @test237(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test237: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB237_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB237_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw or i64* %ptr, i64 %val release @@ -3981,14 +3981,14 @@ define i64 @test237(i64* %ptr, i64 %val) { define i64 @test238(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test238: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB238_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB238_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -3998,14 +3998,14 @@ define i64 @test238(i64* %ptr, i64 %val) { define i64 @test239(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test239: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB239_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB239_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -4015,13 +4015,13 @@ define i64 @test239(i64* %ptr, i64 %val) { define i8 @test240(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test240: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB240_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB240_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xor i8* %ptr, i8 %val monotonic @@ -4030,14 +4030,14 @@ define i8 @test240(i8* %ptr, i8 %val) { define i8 @test241(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test241: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB241_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: xor 6, 4, 3 ; PPC64LE-NEXT: stbcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB241_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw xor i8* %ptr, i8 %val acquire @@ -4046,14 +4046,14 @@ define i8 @test241(i8* %ptr, i8 %val) { define i8 @test242(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test242: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB242_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB242_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xor i8* %ptr, i8 %val release @@ -4062,14 +4062,14 @@ define i8 @test242(i8* %ptr, i8 %val) { define i8 @test243(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test243: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB243_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB243_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -4079,14 +4079,14 @@ define i8 @test243(i8* %ptr, i8 %val) { define i8 @test244(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test244: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB244_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB244_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -4096,13 +4096,13 @@ define i8 @test244(i8* %ptr, i8 %val) { define i16 @test245(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test245: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB245_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB245_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xor i16* %ptr, i16 %val monotonic @@ -4111,14 +4111,14 @@ define i16 @test245(i16* %ptr, i16 %val) { define i16 @test246(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test246: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB246_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: xor 6, 4, 3 ; PPC64LE-NEXT: sthcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB246_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw xor i16* %ptr, i16 %val acquire @@ -4127,14 +4127,14 @@ define i16 @test246(i16* %ptr, i16 %val) { define i16 @test247(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test247: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB247_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB247_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xor i16* %ptr, i16 %val release @@ -4143,14 +4143,14 @@ define i16 @test247(i16* %ptr, i16 %val) { define i16 @test248(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test248: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB248_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB248_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -4160,14 +4160,14 @@ define i16 @test248(i16* %ptr, i16 %val) { define i16 @test249(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test249: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB249_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB249_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -4177,13 +4177,13 @@ define i16 @test249(i16* %ptr, i16 %val) { define i32 @test250(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test250: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB250_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB250_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xor i32* %ptr, i32 %val monotonic @@ -4192,14 +4192,14 @@ define i32 @test250(i32* %ptr, i32 %val) { define i32 @test251(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test251: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB251_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: xor 6, 4, 3 ; PPC64LE-NEXT: stwcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB251_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw xor i32* %ptr, i32 %val acquire @@ -4208,14 +4208,14 @@ define i32 @test251(i32* %ptr, i32 %val) { define i32 @test252(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test252: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB252_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB252_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xor i32* %ptr, i32 %val release @@ -4224,14 +4224,14 @@ define i32 @test252(i32* %ptr, i32 %val) { define i32 @test253(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test253: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB253_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB253_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -4241,14 +4241,14 @@ define i32 @test253(i32* %ptr, i32 %val) { define i32 @test254(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test254: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB254_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB254_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -4258,13 +4258,13 @@ define i32 @test254(i32* %ptr, i32 %val) { define i64 @test255(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test255: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB255_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB255_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xor i64* %ptr, i64 %val monotonic @@ -4273,14 +4273,14 @@ define i64 @test255(i64* %ptr, i64 %val) { define i64 @test256(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test256: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB256_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: xor 6, 4, 3 ; PPC64LE-NEXT: stdcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB256_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw xor i64* %ptr, i64 %val acquire @@ -4289,14 +4289,14 @@ define i64 @test256(i64* %ptr, i64 %val) { define i64 @test257(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test257: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB257_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB257_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xor i64* %ptr, i64 %val release @@ -4305,14 +4305,14 @@ define i64 @test257(i64* %ptr, i64 %val) { define i64 @test258(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test258: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB258_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB258_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -4322,14 +4322,14 @@ define i64 @test258(i64* %ptr, i64 %val) { define i64 @test259(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test259: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB259_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB259_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -4339,13 +4339,13 @@ define i64 @test259(i64* %ptr, i64 %val) { define i8 @test260(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test260: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB260_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: extsb 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB260_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB260_1 ; PPC64LE-NEXT: .LBB260_3: @@ -4357,14 +4357,14 @@ define i8 @test260(i8* %ptr, i8 %val) { define i8 @test261(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test261: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB261_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: extsb 6, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB261_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB261_1 ; PPC64LE-NEXT: .LBB261_3: @@ -4376,14 +4376,14 @@ define i8 @test261(i8* %ptr, i8 %val) { define i8 @test262(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test262: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB262_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: extsb 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB262_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB262_1 ; PPC64LE-NEXT: .LBB262_3: @@ -4395,14 +4395,14 @@ define i8 @test262(i8* %ptr, i8 %val) { define i8 @test263(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test263: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB263_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: extsb 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB263_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB263_1 ; PPC64LE-NEXT: .LBB263_3: @@ -4415,14 +4415,14 @@ define i8 @test263(i8* %ptr, i8 %val) { define i8 @test264(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test264: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB264_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: extsb 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB264_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB264_1 ; PPC64LE-NEXT: .LBB264_3: @@ -4435,13 +4435,13 @@ define i8 @test264(i8* %ptr, i8 %val) { define i16 @test265(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test265: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB265_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: extsh 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB265_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB265_1 ; PPC64LE-NEXT: .LBB265_3: @@ -4453,14 +4453,14 @@ define i16 @test265(i16* %ptr, i16 %val) { define i16 @test266(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test266: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB266_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: extsh 6, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB266_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB266_1 ; PPC64LE-NEXT: .LBB266_3: @@ -4472,14 +4472,14 @@ define i16 @test266(i16* %ptr, i16 %val) { define i16 @test267(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test267: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB267_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: extsh 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB267_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB267_1 ; PPC64LE-NEXT: .LBB267_3: @@ -4491,14 +4491,14 @@ define i16 @test267(i16* %ptr, i16 %val) { define i16 @test268(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test268: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB268_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: extsh 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB268_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB268_1 ; PPC64LE-NEXT: .LBB268_3: @@ -4511,14 +4511,14 @@ define i16 @test268(i16* %ptr, i16 %val) { define i16 @test269(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test269: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB269_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: extsh 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB269_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB269_1 ; PPC64LE-NEXT: .LBB269_3: @@ -4531,12 +4531,12 @@ define i16 @test269(i16* %ptr, i16 %val) { define i32 @test270(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test270: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB270_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmpw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB270_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB270_1 ; PPC64LE-NEXT: .LBB270_3: @@ -4548,13 +4548,13 @@ define i32 @test270(i32* %ptr, i32 %val) { define i32 @test271(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test271: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB271_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: cmpw 4, 3 ; PPC64LE-NEXT: ble 0, .LBB271_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB271_1 ; PPC64LE-NEXT: .LBB271_3: @@ -4566,13 +4566,13 @@ define i32 @test271(i32* %ptr, i32 %val) { define i32 @test272(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test272: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB272_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmpw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB272_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB272_1 ; PPC64LE-NEXT: .LBB272_3: @@ -4584,13 +4584,13 @@ define i32 @test272(i32* %ptr, i32 %val) { define i32 @test273(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test273: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB273_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmpw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB273_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB273_1 ; PPC64LE-NEXT: .LBB273_3: @@ -4603,13 +4603,13 @@ define i32 @test273(i32* %ptr, i32 %val) { define i32 @test274(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test274: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB274_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmpw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB274_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB274_1 ; PPC64LE-NEXT: .LBB274_3: @@ -4622,12 +4622,12 @@ define i32 @test274(i32* %ptr, i32 %val) { define i64 @test275(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test275: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB275_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpd 4, 5 ; PPC64LE-NEXT: ble 0, .LBB275_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB275_1 ; PPC64LE-NEXT: .LBB275_3: @@ -4639,13 +4639,13 @@ define i64 @test275(i64* %ptr, i64 %val) { define i64 @test276(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test276: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB276_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: cmpd 4, 3 ; PPC64LE-NEXT: ble 0, .LBB276_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB276_1 ; PPC64LE-NEXT: .LBB276_3: @@ -4657,13 +4657,13 @@ define i64 @test276(i64* %ptr, i64 %val) { define i64 @test277(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test277: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB277_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpd 4, 5 ; PPC64LE-NEXT: ble 0, .LBB277_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB277_1 ; PPC64LE-NEXT: .LBB277_3: @@ -4675,13 +4675,13 @@ define i64 @test277(i64* %ptr, i64 %val) { define i64 @test278(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test278: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB278_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpd 4, 5 ; PPC64LE-NEXT: ble 0, .LBB278_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB278_1 ; PPC64LE-NEXT: .LBB278_3: @@ -4694,13 +4694,13 @@ define i64 @test278(i64* %ptr, i64 %val) { define i64 @test279(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test279: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB279_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpd 4, 5 ; PPC64LE-NEXT: ble 0, .LBB279_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB279_1 ; PPC64LE-NEXT: .LBB279_3: @@ -4713,13 +4713,13 @@ define i64 @test279(i64* %ptr, i64 %val) { define i8 @test280(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test280: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB280_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: extsb 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB280_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB280_1 ; PPC64LE-NEXT: .LBB280_3: @@ -4731,14 +4731,14 @@ define i8 @test280(i8* %ptr, i8 %val) { define i8 @test281(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test281: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB281_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: extsb 6, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB281_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB281_1 ; PPC64LE-NEXT: .LBB281_3: @@ -4750,14 +4750,14 @@ define i8 @test281(i8* %ptr, i8 %val) { define i8 @test282(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test282: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB282_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: extsb 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB282_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB282_1 ; PPC64LE-NEXT: .LBB282_3: @@ -4769,14 +4769,14 @@ define i8 @test282(i8* %ptr, i8 %val) { define i8 @test283(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test283: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB283_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: extsb 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB283_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB283_1 ; PPC64LE-NEXT: .LBB283_3: @@ -4789,14 +4789,14 @@ define i8 @test283(i8* %ptr, i8 %val) { define i8 @test284(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test284: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB284_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: extsb 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB284_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB284_1 ; PPC64LE-NEXT: .LBB284_3: @@ -4809,13 +4809,13 @@ define i8 @test284(i8* %ptr, i8 %val) { define i16 @test285(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test285: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB285_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: extsh 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB285_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB285_1 ; PPC64LE-NEXT: .LBB285_3: @@ -4827,14 +4827,14 @@ define i16 @test285(i16* %ptr, i16 %val) { define i16 @test286(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test286: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB286_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: extsh 6, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB286_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB286_1 ; PPC64LE-NEXT: .LBB286_3: @@ -4846,14 +4846,14 @@ define i16 @test286(i16* %ptr, i16 %val) { define i16 @test287(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test287: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB287_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: extsh 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB287_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB287_1 ; PPC64LE-NEXT: .LBB287_3: @@ -4865,14 +4865,14 @@ define i16 @test287(i16* %ptr, i16 %val) { define i16 @test288(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test288: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB288_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: extsh 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB288_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB288_1 ; PPC64LE-NEXT: .LBB288_3: @@ -4885,14 +4885,14 @@ define i16 @test288(i16* %ptr, i16 %val) { define i16 @test289(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test289: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB289_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: extsh 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB289_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB289_1 ; PPC64LE-NEXT: .LBB289_3: @@ -4905,12 +4905,12 @@ define i16 @test289(i16* %ptr, i16 %val) { define i32 @test290(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test290: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB290_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmpw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB290_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB290_1 ; PPC64LE-NEXT: .LBB290_3: @@ -4922,13 +4922,13 @@ define i32 @test290(i32* %ptr, i32 %val) { define i32 @test291(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test291: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB291_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: cmpw 4, 3 ; PPC64LE-NEXT: bge 0, .LBB291_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB291_1 ; PPC64LE-NEXT: .LBB291_3: @@ -4940,13 +4940,13 @@ define i32 @test291(i32* %ptr, i32 %val) { define i32 @test292(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test292: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB292_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmpw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB292_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB292_1 ; PPC64LE-NEXT: .LBB292_3: @@ -4958,13 +4958,13 @@ define i32 @test292(i32* %ptr, i32 %val) { define i32 @test293(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test293: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB293_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmpw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB293_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB293_1 ; PPC64LE-NEXT: .LBB293_3: @@ -4977,13 +4977,13 @@ define i32 @test293(i32* %ptr, i32 %val) { define i32 @test294(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test294: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB294_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmpw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB294_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB294_1 ; PPC64LE-NEXT: .LBB294_3: @@ -4996,12 +4996,12 @@ define i32 @test294(i32* %ptr, i32 %val) { define i64 @test295(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test295: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB295_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpd 4, 5 ; PPC64LE-NEXT: bge 0, .LBB295_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB295_1 ; PPC64LE-NEXT: .LBB295_3: @@ -5013,13 +5013,13 @@ define i64 @test295(i64* %ptr, i64 %val) { define i64 @test296(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test296: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB296_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: cmpd 4, 3 ; PPC64LE-NEXT: bge 0, .LBB296_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB296_1 ; PPC64LE-NEXT: .LBB296_3: @@ -5031,13 +5031,13 @@ define i64 @test296(i64* %ptr, i64 %val) { define i64 @test297(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test297: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB297_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpd 4, 5 ; PPC64LE-NEXT: bge 0, .LBB297_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB297_1 ; PPC64LE-NEXT: .LBB297_3: @@ -5049,13 +5049,13 @@ define i64 @test297(i64* %ptr, i64 %val) { define i64 @test298(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test298: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB298_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpd 4, 5 ; PPC64LE-NEXT: bge 0, .LBB298_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB298_1 ; PPC64LE-NEXT: .LBB298_3: @@ -5068,13 +5068,13 @@ define i64 @test298(i64* %ptr, i64 %val) { define i64 @test299(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test299: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB299_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpd 4, 5 ; PPC64LE-NEXT: bge 0, .LBB299_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB299_1 ; PPC64LE-NEXT: .LBB299_3: @@ -5087,12 +5087,12 @@ define i64 @test299(i64* %ptr, i64 %val) { define i8 @test300(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test300: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB300_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB300_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB300_1 ; PPC64LE-NEXT: .LBB300_3: @@ -5104,13 +5104,13 @@ define i8 @test300(i8* %ptr, i8 %val) { define i8 @test301(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test301: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB301_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: cmplw 4, 3 ; PPC64LE-NEXT: ble 0, .LBB301_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB301_1 ; PPC64LE-NEXT: .LBB301_3: @@ -5122,13 +5122,13 @@ define i8 @test301(i8* %ptr, i8 %val) { define i8 @test302(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test302: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB302_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB302_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB302_1 ; PPC64LE-NEXT: .LBB302_3: @@ -5140,13 +5140,13 @@ define i8 @test302(i8* %ptr, i8 %val) { define i8 @test303(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test303: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB303_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB303_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB303_1 ; PPC64LE-NEXT: .LBB303_3: @@ -5159,13 +5159,13 @@ define i8 @test303(i8* %ptr, i8 %val) { define i8 @test304(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test304: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB304_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB304_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB304_1 ; PPC64LE-NEXT: .LBB304_3: @@ -5178,12 +5178,12 @@ define i8 @test304(i8* %ptr, i8 %val) { define i16 @test305(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test305: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB305_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB305_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB305_1 ; PPC64LE-NEXT: .LBB305_3: @@ -5195,13 +5195,13 @@ define i16 @test305(i16* %ptr, i16 %val) { define i16 @test306(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test306: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB306_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: cmplw 4, 3 ; PPC64LE-NEXT: ble 0, .LBB306_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB306_1 ; PPC64LE-NEXT: .LBB306_3: @@ -5213,13 +5213,13 @@ define i16 @test306(i16* %ptr, i16 %val) { define i16 @test307(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test307: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB307_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB307_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB307_1 ; PPC64LE-NEXT: .LBB307_3: @@ -5231,13 +5231,13 @@ define i16 @test307(i16* %ptr, i16 %val) { define i16 @test308(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test308: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB308_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB308_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB308_1 ; PPC64LE-NEXT: .LBB308_3: @@ -5250,13 +5250,13 @@ define i16 @test308(i16* %ptr, i16 %val) { define i16 @test309(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test309: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB309_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB309_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB309_1 ; PPC64LE-NEXT: .LBB309_3: @@ -5269,12 +5269,12 @@ define i16 @test309(i16* %ptr, i16 %val) { define i32 @test310(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test310: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB310_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB310_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB310_1 ; PPC64LE-NEXT: .LBB310_3: @@ -5286,13 +5286,13 @@ define i32 @test310(i32* %ptr, i32 %val) { define i32 @test311(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test311: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB311_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: cmplw 4, 3 ; PPC64LE-NEXT: ble 0, .LBB311_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB311_1 ; PPC64LE-NEXT: .LBB311_3: @@ -5304,13 +5304,13 @@ define i32 @test311(i32* %ptr, i32 %val) { define i32 @test312(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test312: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB312_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB312_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB312_1 ; PPC64LE-NEXT: .LBB312_3: @@ -5322,13 +5322,13 @@ define i32 @test312(i32* %ptr, i32 %val) { define i32 @test313(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test313: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB313_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB313_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB313_1 ; PPC64LE-NEXT: .LBB313_3: @@ -5341,13 +5341,13 @@ define i32 @test313(i32* %ptr, i32 %val) { define i32 @test314(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test314: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB314_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB314_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB314_1 ; PPC64LE-NEXT: .LBB314_3: @@ -5360,12 +5360,12 @@ define i32 @test314(i32* %ptr, i32 %val) { define i64 @test315(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test315: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB315_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpld 4, 5 ; PPC64LE-NEXT: ble 0, .LBB315_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB315_1 ; PPC64LE-NEXT: .LBB315_3: @@ -5377,13 +5377,13 @@ define i64 @test315(i64* %ptr, i64 %val) { define i64 @test316(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test316: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB316_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: cmpld 4, 3 ; PPC64LE-NEXT: ble 0, .LBB316_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB316_1 ; PPC64LE-NEXT: .LBB316_3: @@ -5395,13 +5395,13 @@ define i64 @test316(i64* %ptr, i64 %val) { define i64 @test317(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test317: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB317_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpld 4, 5 ; PPC64LE-NEXT: ble 0, .LBB317_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB317_1 ; PPC64LE-NEXT: .LBB317_3: @@ -5413,13 +5413,13 @@ define i64 @test317(i64* %ptr, i64 %val) { define i64 @test318(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test318: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB318_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpld 4, 5 ; PPC64LE-NEXT: ble 0, .LBB318_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB318_1 ; PPC64LE-NEXT: .LBB318_3: @@ -5432,13 +5432,13 @@ define i64 @test318(i64* %ptr, i64 %val) { define i64 @test319(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test319: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB319_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpld 4, 5 ; PPC64LE-NEXT: ble 0, .LBB319_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB319_1 ; PPC64LE-NEXT: .LBB319_3: @@ -5451,12 +5451,12 @@ define i64 @test319(i64* %ptr, i64 %val) { define i8 @test320(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test320: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB320_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB320_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB320_1 ; PPC64LE-NEXT: .LBB320_3: @@ -5468,13 +5468,13 @@ define i8 @test320(i8* %ptr, i8 %val) { define i8 @test321(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test321: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB321_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: cmplw 4, 3 ; PPC64LE-NEXT: bge 0, .LBB321_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB321_1 ; PPC64LE-NEXT: .LBB321_3: @@ -5486,13 +5486,13 @@ define i8 @test321(i8* %ptr, i8 %val) { define i8 @test322(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test322: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB322_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB322_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB322_1 ; PPC64LE-NEXT: .LBB322_3: @@ -5504,13 +5504,13 @@ define i8 @test322(i8* %ptr, i8 %val) { define i8 @test323(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test323: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB323_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB323_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB323_1 ; PPC64LE-NEXT: .LBB323_3: @@ -5523,13 +5523,13 @@ define i8 @test323(i8* %ptr, i8 %val) { define i8 @test324(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test324: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB324_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB324_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB324_1 ; PPC64LE-NEXT: .LBB324_3: @@ -5542,12 +5542,12 @@ define i8 @test324(i8* %ptr, i8 %val) { define i16 @test325(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test325: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB325_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB325_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB325_1 ; PPC64LE-NEXT: .LBB325_3: @@ -5559,13 +5559,13 @@ define i16 @test325(i16* %ptr, i16 %val) { define i16 @test326(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test326: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB326_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: cmplw 4, 3 ; PPC64LE-NEXT: bge 0, .LBB326_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB326_1 ; PPC64LE-NEXT: .LBB326_3: @@ -5577,13 +5577,13 @@ define i16 @test326(i16* %ptr, i16 %val) { define i16 @test327(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test327: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB327_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB327_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB327_1 ; PPC64LE-NEXT: .LBB327_3: @@ -5595,13 +5595,13 @@ define i16 @test327(i16* %ptr, i16 %val) { define i16 @test328(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test328: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB328_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB328_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB328_1 ; PPC64LE-NEXT: .LBB328_3: @@ -5614,13 +5614,13 @@ define i16 @test328(i16* %ptr, i16 %val) { define i16 @test329(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test329: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB329_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB329_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB329_1 ; PPC64LE-NEXT: .LBB329_3: @@ -5633,12 +5633,12 @@ define i16 @test329(i16* %ptr, i16 %val) { define i32 @test330(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test330: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB330_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB330_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB330_1 ; PPC64LE-NEXT: .LBB330_3: @@ -5650,13 +5650,13 @@ define i32 @test330(i32* %ptr, i32 %val) { define i32 @test331(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test331: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB331_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: cmplw 4, 3 ; PPC64LE-NEXT: bge 0, .LBB331_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB331_1 ; PPC64LE-NEXT: .LBB331_3: @@ -5668,13 +5668,13 @@ define i32 @test331(i32* %ptr, i32 %val) { define i32 @test332(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test332: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB332_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB332_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB332_1 ; PPC64LE-NEXT: .LBB332_3: @@ -5686,13 +5686,13 @@ define i32 @test332(i32* %ptr, i32 %val) { define i32 @test333(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test333: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB333_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB333_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB333_1 ; PPC64LE-NEXT: .LBB333_3: @@ -5705,13 +5705,13 @@ define i32 @test333(i32* %ptr, i32 %val) { define i32 @test334(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test334: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB334_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB334_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB334_1 ; PPC64LE-NEXT: .LBB334_3: @@ -5724,12 +5724,12 @@ define i32 @test334(i32* %ptr, i32 %val) { define i64 @test335(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test335: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB335_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpld 4, 5 ; PPC64LE-NEXT: bge 0, .LBB335_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB335_1 ; PPC64LE-NEXT: .LBB335_3: @@ -5741,13 +5741,13 @@ define i64 @test335(i64* %ptr, i64 %val) { define i64 @test336(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test336: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB336_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: cmpld 4, 3 ; PPC64LE-NEXT: bge 0, .LBB336_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB336_1 ; PPC64LE-NEXT: .LBB336_3: @@ -5759,13 +5759,13 @@ define i64 @test336(i64* %ptr, i64 %val) { define i64 @test337(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test337: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB337_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpld 4, 5 ; PPC64LE-NEXT: bge 0, .LBB337_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB337_1 ; PPC64LE-NEXT: .LBB337_3: @@ -5777,13 +5777,13 @@ define i64 @test337(i64* %ptr, i64 %val) { define i64 @test338(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test338: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB338_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpld 4, 5 ; PPC64LE-NEXT: bge 0, .LBB338_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB338_1 ; PPC64LE-NEXT: .LBB338_3: @@ -5796,13 +5796,13 @@ define i64 @test338(i64* %ptr, i64 %val) { define i64 @test339(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test339: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB339_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpld 4, 5 ; PPC64LE-NEXT: bge 0, .LBB339_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB339_1 ; PPC64LE-NEXT: .LBB339_3: @@ -5815,12 +5815,12 @@ define i64 @test339(i64* %ptr, i64 %val) { define i8 @test340(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test340: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB340_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB340_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i8* %ptr, i8 %val syncscope("singlethread") monotonic @@ -5829,13 +5829,13 @@ define i8 @test340(i8* %ptr, i8 %val) { define i8 @test341(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test341: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB341_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: stbcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB341_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i8* %ptr, i8 %val syncscope("singlethread") acquire @@ -5844,13 +5844,13 @@ define i8 @test341(i8* %ptr, i8 %val) { define i8 @test342(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test342: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB342_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB342_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i8* %ptr, i8 %val syncscope("singlethread") release @@ -5859,13 +5859,13 @@ define i8 @test342(i8* %ptr, i8 %val) { define i8 @test343(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test343: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB343_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB343_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -5875,13 +5875,13 @@ define i8 @test343(i8* %ptr, i8 %val) { define i8 @test344(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test344: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB344_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB344_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -5891,12 +5891,12 @@ define i8 @test344(i8* %ptr, i8 %val) { define i16 @test345(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test345: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB345_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB345_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i16* %ptr, i16 %val syncscope("singlethread") monotonic @@ -5905,13 +5905,13 @@ define i16 @test345(i16* %ptr, i16 %val) { define i16 @test346(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test346: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB346_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: sthcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB346_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i16* %ptr, i16 %val syncscope("singlethread") acquire @@ -5920,13 +5920,13 @@ define i16 @test346(i16* %ptr, i16 %val) { define i16 @test347(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test347: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB347_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB347_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i16* %ptr, i16 %val syncscope("singlethread") release @@ -5935,13 +5935,13 @@ define i16 @test347(i16* %ptr, i16 %val) { define i16 @test348(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test348: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB348_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB348_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -5951,13 +5951,13 @@ define i16 @test348(i16* %ptr, i16 %val) { define i16 @test349(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test349: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB349_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB349_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -5967,12 +5967,12 @@ define i16 @test349(i16* %ptr, i16 %val) { define i32 @test350(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test350: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB350_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB350_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i32* %ptr, i32 %val syncscope("singlethread") monotonic @@ -5981,13 +5981,13 @@ define i32 @test350(i32* %ptr, i32 %val) { define i32 @test351(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test351: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB351_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: stwcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB351_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i32* %ptr, i32 %val syncscope("singlethread") acquire @@ -5996,13 +5996,13 @@ define i32 @test351(i32* %ptr, i32 %val) { define i32 @test352(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test352: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB352_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB352_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i32* %ptr, i32 %val syncscope("singlethread") release @@ -6011,13 +6011,13 @@ define i32 @test352(i32* %ptr, i32 %val) { define i32 @test353(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test353: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB353_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB353_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6027,13 +6027,13 @@ define i32 @test353(i32* %ptr, i32 %val) { define i32 @test354(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test354: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB354_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB354_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6043,12 +6043,12 @@ define i32 @test354(i32* %ptr, i32 %val) { define i64 @test355(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test355: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB355_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB355_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i64* %ptr, i64 %val syncscope("singlethread") monotonic @@ -6057,13 +6057,13 @@ define i64 @test355(i64* %ptr, i64 %val) { define i64 @test356(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test356: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB356_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: stdcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB356_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i64* %ptr, i64 %val syncscope("singlethread") acquire @@ -6072,13 +6072,13 @@ define i64 @test356(i64* %ptr, i64 %val) { define i64 @test357(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test357: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB357_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB357_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xchg i64* %ptr, i64 %val syncscope("singlethread") release @@ -6087,13 +6087,13 @@ define i64 @test357(i64* %ptr, i64 %val) { define i64 @test358(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test358: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB358_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB358_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6103,13 +6103,13 @@ define i64 @test358(i64* %ptr, i64 %val) { define i64 @test359(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test359: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB359_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB359_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6119,13 +6119,13 @@ define i64 @test359(i64* %ptr, i64 %val) { define i8 @test360(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test360: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB360_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB360_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw add i8* %ptr, i8 %val syncscope("singlethread") monotonic @@ -6134,14 +6134,14 @@ define i8 @test360(i8* %ptr, i8 %val) { define i8 @test361(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test361: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB361_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: add 6, 4, 3 ; PPC64LE-NEXT: stbcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB361_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw add i8* %ptr, i8 %val syncscope("singlethread") acquire @@ -6150,14 +6150,14 @@ define i8 @test361(i8* %ptr, i8 %val) { define i8 @test362(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test362: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB362_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB362_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw add i8* %ptr, i8 %val syncscope("singlethread") release @@ -6166,14 +6166,14 @@ define i8 @test362(i8* %ptr, i8 %val) { define i8 @test363(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test363: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB363_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB363_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6183,14 +6183,14 @@ define i8 @test363(i8* %ptr, i8 %val) { define i8 @test364(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test364: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB364_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB364_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6200,13 +6200,13 @@ define i8 @test364(i8* %ptr, i8 %val) { define i16 @test365(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test365: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB365_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB365_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw add i16* %ptr, i16 %val syncscope("singlethread") monotonic @@ -6215,14 +6215,14 @@ define i16 @test365(i16* %ptr, i16 %val) { define i16 @test366(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test366: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB366_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: add 6, 4, 3 ; PPC64LE-NEXT: sthcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB366_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw add i16* %ptr, i16 %val syncscope("singlethread") acquire @@ -6231,14 +6231,14 @@ define i16 @test366(i16* %ptr, i16 %val) { define i16 @test367(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test367: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB367_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB367_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw add i16* %ptr, i16 %val syncscope("singlethread") release @@ -6247,14 +6247,14 @@ define i16 @test367(i16* %ptr, i16 %val) { define i16 @test368(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test368: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB368_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB368_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6264,14 +6264,14 @@ define i16 @test368(i16* %ptr, i16 %val) { define i16 @test369(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test369: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB369_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB369_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6281,13 +6281,13 @@ define i16 @test369(i16* %ptr, i16 %val) { define i32 @test370(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test370: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB370_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB370_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw add i32* %ptr, i32 %val syncscope("singlethread") monotonic @@ -6296,14 +6296,14 @@ define i32 @test370(i32* %ptr, i32 %val) { define i32 @test371(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test371: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB371_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: add 6, 4, 3 ; PPC64LE-NEXT: stwcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB371_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw add i32* %ptr, i32 %val syncscope("singlethread") acquire @@ -6312,14 +6312,14 @@ define i32 @test371(i32* %ptr, i32 %val) { define i32 @test372(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test372: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB372_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB372_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw add i32* %ptr, i32 %val syncscope("singlethread") release @@ -6328,14 +6328,14 @@ define i32 @test372(i32* %ptr, i32 %val) { define i32 @test373(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test373: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB373_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB373_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6345,14 +6345,14 @@ define i32 @test373(i32* %ptr, i32 %val) { define i32 @test374(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test374: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB374_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB374_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6362,13 +6362,13 @@ define i32 @test374(i32* %ptr, i32 %val) { define i64 @test375(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test375: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB375_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB375_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw add i64* %ptr, i64 %val syncscope("singlethread") monotonic @@ -6377,14 +6377,14 @@ define i64 @test375(i64* %ptr, i64 %val) { define i64 @test376(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test376: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB376_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: add 6, 4, 3 ; PPC64LE-NEXT: stdcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB376_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw add i64* %ptr, i64 %val syncscope("singlethread") acquire @@ -6393,14 +6393,14 @@ define i64 @test376(i64* %ptr, i64 %val) { define i64 @test377(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test377: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB377_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB377_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw add i64* %ptr, i64 %val syncscope("singlethread") release @@ -6409,14 +6409,14 @@ define i64 @test377(i64* %ptr, i64 %val) { define i64 @test378(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test378: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB378_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB378_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6426,14 +6426,14 @@ define i64 @test378(i64* %ptr, i64 %val) { define i64 @test379(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test379: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB379_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: add 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB379_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6443,13 +6443,13 @@ define i64 @test379(i64* %ptr, i64 %val) { define i8 @test380(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test380: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB380_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB380_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw sub i8* %ptr, i8 %val syncscope("singlethread") monotonic @@ -6458,14 +6458,14 @@ define i8 @test380(i8* %ptr, i8 %val) { define i8 @test381(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test381: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB381_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: subf 6, 4, 3 ; PPC64LE-NEXT: stbcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB381_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw sub i8* %ptr, i8 %val syncscope("singlethread") acquire @@ -6474,14 +6474,14 @@ define i8 @test381(i8* %ptr, i8 %val) { define i8 @test382(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test382: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB382_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB382_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw sub i8* %ptr, i8 %val syncscope("singlethread") release @@ -6490,14 +6490,14 @@ define i8 @test382(i8* %ptr, i8 %val) { define i8 @test383(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test383: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB383_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB383_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6507,14 +6507,14 @@ define i8 @test383(i8* %ptr, i8 %val) { define i8 @test384(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test384: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB384_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB384_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6524,13 +6524,13 @@ define i8 @test384(i8* %ptr, i8 %val) { define i16 @test385(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test385: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB385_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB385_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw sub i16* %ptr, i16 %val syncscope("singlethread") monotonic @@ -6539,14 +6539,14 @@ define i16 @test385(i16* %ptr, i16 %val) { define i16 @test386(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test386: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB386_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: subf 6, 4, 3 ; PPC64LE-NEXT: sthcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB386_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw sub i16* %ptr, i16 %val syncscope("singlethread") acquire @@ -6555,14 +6555,14 @@ define i16 @test386(i16* %ptr, i16 %val) { define i16 @test387(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test387: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB387_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB387_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw sub i16* %ptr, i16 %val syncscope("singlethread") release @@ -6571,14 +6571,14 @@ define i16 @test387(i16* %ptr, i16 %val) { define i16 @test388(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test388: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB388_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB388_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6588,14 +6588,14 @@ define i16 @test388(i16* %ptr, i16 %val) { define i16 @test389(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test389: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB389_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB389_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6605,13 +6605,13 @@ define i16 @test389(i16* %ptr, i16 %val) { define i32 @test390(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test390: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB390_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB390_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw sub i32* %ptr, i32 %val syncscope("singlethread") monotonic @@ -6620,14 +6620,14 @@ define i32 @test390(i32* %ptr, i32 %val) { define i32 @test391(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test391: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB391_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: subf 6, 4, 3 ; PPC64LE-NEXT: stwcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB391_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw sub i32* %ptr, i32 %val syncscope("singlethread") acquire @@ -6636,14 +6636,14 @@ define i32 @test391(i32* %ptr, i32 %val) { define i32 @test392(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test392: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB392_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB392_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw sub i32* %ptr, i32 %val syncscope("singlethread") release @@ -6652,14 +6652,14 @@ define i32 @test392(i32* %ptr, i32 %val) { define i32 @test393(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test393: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB393_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB393_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6669,14 +6669,14 @@ define i32 @test393(i32* %ptr, i32 %val) { define i32 @test394(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test394: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB394_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: subf 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB394_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6686,13 +6686,13 @@ define i32 @test394(i32* %ptr, i32 %val) { define i64 @test395(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test395: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB395_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: sub 6, 5, 4 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB395_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw sub i64* %ptr, i64 %val syncscope("singlethread") monotonic @@ -6701,14 +6701,14 @@ define i64 @test395(i64* %ptr, i64 %val) { define i64 @test396(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test396: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB396_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: sub 6, 3, 4 ; PPC64LE-NEXT: stdcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB396_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw sub i64* %ptr, i64 %val syncscope("singlethread") acquire @@ -6717,14 +6717,14 @@ define i64 @test396(i64* %ptr, i64 %val) { define i64 @test397(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test397: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB397_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: sub 6, 5, 4 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB397_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw sub i64* %ptr, i64 %val syncscope("singlethread") release @@ -6733,14 +6733,14 @@ define i64 @test397(i64* %ptr, i64 %val) { define i64 @test398(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test398: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB398_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: sub 6, 5, 4 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB398_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6750,14 +6750,14 @@ define i64 @test398(i64* %ptr, i64 %val) { define i64 @test399(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test399: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB399_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: sub 6, 5, 4 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB399_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6767,13 +6767,13 @@ define i64 @test399(i64* %ptr, i64 %val) { define i8 @test400(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test400: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB400_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB400_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw and i8* %ptr, i8 %val syncscope("singlethread") monotonic @@ -6782,14 +6782,14 @@ define i8 @test400(i8* %ptr, i8 %val) { define i8 @test401(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test401: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB401_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: and 6, 4, 3 ; PPC64LE-NEXT: stbcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB401_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw and i8* %ptr, i8 %val syncscope("singlethread") acquire @@ -6798,14 +6798,14 @@ define i8 @test401(i8* %ptr, i8 %val) { define i8 @test402(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test402: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB402_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB402_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw and i8* %ptr, i8 %val syncscope("singlethread") release @@ -6814,14 +6814,14 @@ define i8 @test402(i8* %ptr, i8 %val) { define i8 @test403(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test403: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB403_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB403_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6831,14 +6831,14 @@ define i8 @test403(i8* %ptr, i8 %val) { define i8 @test404(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test404: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB404_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB404_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6848,13 +6848,13 @@ define i8 @test404(i8* %ptr, i8 %val) { define i16 @test405(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test405: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB405_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB405_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw and i16* %ptr, i16 %val syncscope("singlethread") monotonic @@ -6863,14 +6863,14 @@ define i16 @test405(i16* %ptr, i16 %val) { define i16 @test406(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test406: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB406_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: and 6, 4, 3 ; PPC64LE-NEXT: sthcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB406_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw and i16* %ptr, i16 %val syncscope("singlethread") acquire @@ -6879,14 +6879,14 @@ define i16 @test406(i16* %ptr, i16 %val) { define i16 @test407(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test407: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB407_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB407_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw and i16* %ptr, i16 %val syncscope("singlethread") release @@ -6895,14 +6895,14 @@ define i16 @test407(i16* %ptr, i16 %val) { define i16 @test408(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test408: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB408_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB408_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6912,14 +6912,14 @@ define i16 @test408(i16* %ptr, i16 %val) { define i16 @test409(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test409: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB409_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB409_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6929,13 +6929,13 @@ define i16 @test409(i16* %ptr, i16 %val) { define i32 @test410(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test410: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB410_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB410_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw and i32* %ptr, i32 %val syncscope("singlethread") monotonic @@ -6944,14 +6944,14 @@ define i32 @test410(i32* %ptr, i32 %val) { define i32 @test411(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test411: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB411_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: and 6, 4, 3 ; PPC64LE-NEXT: stwcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB411_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw and i32* %ptr, i32 %val syncscope("singlethread") acquire @@ -6960,14 +6960,14 @@ define i32 @test411(i32* %ptr, i32 %val) { define i32 @test412(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test412: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB412_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB412_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw and i32* %ptr, i32 %val syncscope("singlethread") release @@ -6976,14 +6976,14 @@ define i32 @test412(i32* %ptr, i32 %val) { define i32 @test413(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test413: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB413_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB413_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -6993,14 +6993,14 @@ define i32 @test413(i32* %ptr, i32 %val) { define i32 @test414(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test414: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB414_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB414_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7010,13 +7010,13 @@ define i32 @test414(i32* %ptr, i32 %val) { define i64 @test415(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test415: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB415_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB415_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw and i64* %ptr, i64 %val syncscope("singlethread") monotonic @@ -7025,14 +7025,14 @@ define i64 @test415(i64* %ptr, i64 %val) { define i64 @test416(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test416: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB416_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: and 6, 4, 3 ; PPC64LE-NEXT: stdcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB416_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw and i64* %ptr, i64 %val syncscope("singlethread") acquire @@ -7041,14 +7041,14 @@ define i64 @test416(i64* %ptr, i64 %val) { define i64 @test417(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test417: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB417_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB417_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw and i64* %ptr, i64 %val syncscope("singlethread") release @@ -7057,14 +7057,14 @@ define i64 @test417(i64* %ptr, i64 %val) { define i64 @test418(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test418: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB418_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB418_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7074,14 +7074,14 @@ define i64 @test418(i64* %ptr, i64 %val) { define i64 @test419(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test419: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB419_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: and 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB419_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7091,13 +7091,13 @@ define i64 @test419(i64* %ptr, i64 %val) { define i8 @test420(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test420: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB420_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB420_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw nand i8* %ptr, i8 %val syncscope("singlethread") monotonic @@ -7106,14 +7106,14 @@ define i8 @test420(i8* %ptr, i8 %val) { define i8 @test421(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test421: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB421_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: nand 6, 4, 3 ; PPC64LE-NEXT: stbcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB421_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw nand i8* %ptr, i8 %val syncscope("singlethread") acquire @@ -7122,14 +7122,14 @@ define i8 @test421(i8* %ptr, i8 %val) { define i8 @test422(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test422: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB422_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB422_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw nand i8* %ptr, i8 %val syncscope("singlethread") release @@ -7138,14 +7138,14 @@ define i8 @test422(i8* %ptr, i8 %val) { define i8 @test423(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test423: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB423_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB423_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7155,14 +7155,14 @@ define i8 @test423(i8* %ptr, i8 %val) { define i8 @test424(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test424: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB424_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB424_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7172,13 +7172,13 @@ define i8 @test424(i8* %ptr, i8 %val) { define i16 @test425(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test425: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB425_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB425_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw nand i16* %ptr, i16 %val syncscope("singlethread") monotonic @@ -7187,14 +7187,14 @@ define i16 @test425(i16* %ptr, i16 %val) { define i16 @test426(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test426: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB426_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: nand 6, 4, 3 ; PPC64LE-NEXT: sthcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB426_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw nand i16* %ptr, i16 %val syncscope("singlethread") acquire @@ -7203,14 +7203,14 @@ define i16 @test426(i16* %ptr, i16 %val) { define i16 @test427(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test427: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB427_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB427_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw nand i16* %ptr, i16 %val syncscope("singlethread") release @@ -7219,14 +7219,14 @@ define i16 @test427(i16* %ptr, i16 %val) { define i16 @test428(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test428: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB428_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB428_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7236,14 +7236,14 @@ define i16 @test428(i16* %ptr, i16 %val) { define i16 @test429(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test429: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB429_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB429_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7253,13 +7253,13 @@ define i16 @test429(i16* %ptr, i16 %val) { define i32 @test430(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test430: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB430_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB430_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw nand i32* %ptr, i32 %val syncscope("singlethread") monotonic @@ -7268,14 +7268,14 @@ define i32 @test430(i32* %ptr, i32 %val) { define i32 @test431(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test431: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB431_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: nand 6, 4, 3 ; PPC64LE-NEXT: stwcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB431_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw nand i32* %ptr, i32 %val syncscope("singlethread") acquire @@ -7284,14 +7284,14 @@ define i32 @test431(i32* %ptr, i32 %val) { define i32 @test432(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test432: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB432_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB432_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw nand i32* %ptr, i32 %val syncscope("singlethread") release @@ -7300,14 +7300,14 @@ define i32 @test432(i32* %ptr, i32 %val) { define i32 @test433(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test433: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB433_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB433_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7317,14 +7317,14 @@ define i32 @test433(i32* %ptr, i32 %val) { define i32 @test434(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test434: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB434_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB434_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7334,13 +7334,13 @@ define i32 @test434(i32* %ptr, i32 %val) { define i64 @test435(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test435: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB435_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB435_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw nand i64* %ptr, i64 %val syncscope("singlethread") monotonic @@ -7349,14 +7349,14 @@ define i64 @test435(i64* %ptr, i64 %val) { define i64 @test436(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test436: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB436_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: nand 6, 4, 3 ; PPC64LE-NEXT: stdcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB436_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw nand i64* %ptr, i64 %val syncscope("singlethread") acquire @@ -7365,14 +7365,14 @@ define i64 @test436(i64* %ptr, i64 %val) { define i64 @test437(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test437: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB437_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB437_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw nand i64* %ptr, i64 %val syncscope("singlethread") release @@ -7381,14 +7381,14 @@ define i64 @test437(i64* %ptr, i64 %val) { define i64 @test438(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test438: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB438_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB438_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7398,14 +7398,14 @@ define i64 @test438(i64* %ptr, i64 %val) { define i64 @test439(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test439: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB439_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: nand 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB439_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7415,13 +7415,13 @@ define i64 @test439(i64* %ptr, i64 %val) { define i8 @test440(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test440: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB440_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB440_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw or i8* %ptr, i8 %val syncscope("singlethread") monotonic @@ -7430,14 +7430,14 @@ define i8 @test440(i8* %ptr, i8 %val) { define i8 @test441(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test441: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB441_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: or 6, 4, 3 ; PPC64LE-NEXT: stbcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB441_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw or i8* %ptr, i8 %val syncscope("singlethread") acquire @@ -7446,14 +7446,14 @@ define i8 @test441(i8* %ptr, i8 %val) { define i8 @test442(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test442: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB442_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB442_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw or i8* %ptr, i8 %val syncscope("singlethread") release @@ -7462,14 +7462,14 @@ define i8 @test442(i8* %ptr, i8 %val) { define i8 @test443(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test443: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB443_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB443_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7479,14 +7479,14 @@ define i8 @test443(i8* %ptr, i8 %val) { define i8 @test444(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test444: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB444_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB444_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7496,13 +7496,13 @@ define i8 @test444(i8* %ptr, i8 %val) { define i16 @test445(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test445: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB445_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB445_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw or i16* %ptr, i16 %val syncscope("singlethread") monotonic @@ -7511,14 +7511,14 @@ define i16 @test445(i16* %ptr, i16 %val) { define i16 @test446(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test446: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB446_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: or 6, 4, 3 ; PPC64LE-NEXT: sthcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB446_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw or i16* %ptr, i16 %val syncscope("singlethread") acquire @@ -7527,14 +7527,14 @@ define i16 @test446(i16* %ptr, i16 %val) { define i16 @test447(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test447: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB447_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB447_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw or i16* %ptr, i16 %val syncscope("singlethread") release @@ -7543,14 +7543,14 @@ define i16 @test447(i16* %ptr, i16 %val) { define i16 @test448(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test448: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB448_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB448_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7560,14 +7560,14 @@ define i16 @test448(i16* %ptr, i16 %val) { define i16 @test449(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test449: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB449_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB449_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7577,13 +7577,13 @@ define i16 @test449(i16* %ptr, i16 %val) { define i32 @test450(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test450: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB450_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB450_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw or i32* %ptr, i32 %val syncscope("singlethread") monotonic @@ -7592,14 +7592,14 @@ define i32 @test450(i32* %ptr, i32 %val) { define i32 @test451(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test451: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB451_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: or 6, 4, 3 ; PPC64LE-NEXT: stwcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB451_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw or i32* %ptr, i32 %val syncscope("singlethread") acquire @@ -7608,14 +7608,14 @@ define i32 @test451(i32* %ptr, i32 %val) { define i32 @test452(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test452: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB452_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB452_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw or i32* %ptr, i32 %val syncscope("singlethread") release @@ -7624,14 +7624,14 @@ define i32 @test452(i32* %ptr, i32 %val) { define i32 @test453(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test453: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB453_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB453_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7641,14 +7641,14 @@ define i32 @test453(i32* %ptr, i32 %val) { define i32 @test454(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test454: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB454_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB454_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7658,13 +7658,13 @@ define i32 @test454(i32* %ptr, i32 %val) { define i64 @test455(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test455: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB455_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB455_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw or i64* %ptr, i64 %val syncscope("singlethread") monotonic @@ -7673,14 +7673,14 @@ define i64 @test455(i64* %ptr, i64 %val) { define i64 @test456(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test456: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB456_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: or 6, 4, 3 ; PPC64LE-NEXT: stdcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB456_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw or i64* %ptr, i64 %val syncscope("singlethread") acquire @@ -7689,14 +7689,14 @@ define i64 @test456(i64* %ptr, i64 %val) { define i64 @test457(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test457: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB457_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB457_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw or i64* %ptr, i64 %val syncscope("singlethread") release @@ -7705,14 +7705,14 @@ define i64 @test457(i64* %ptr, i64 %val) { define i64 @test458(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test458: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB458_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB458_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7722,14 +7722,14 @@ define i64 @test458(i64* %ptr, i64 %val) { define i64 @test459(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test459: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB459_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: or 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB459_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7739,13 +7739,13 @@ define i64 @test459(i64* %ptr, i64 %val) { define i8 @test460(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test460: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB460_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB460_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xor i8* %ptr, i8 %val syncscope("singlethread") monotonic @@ -7754,14 +7754,14 @@ define i8 @test460(i8* %ptr, i8 %val) { define i8 @test461(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test461: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB461_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: xor 6, 4, 3 ; PPC64LE-NEXT: stbcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB461_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw xor i8* %ptr, i8 %val syncscope("singlethread") acquire @@ -7770,14 +7770,14 @@ define i8 @test461(i8* %ptr, i8 %val) { define i8 @test462(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test462: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB462_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB462_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xor i8* %ptr, i8 %val syncscope("singlethread") release @@ -7786,14 +7786,14 @@ define i8 @test462(i8* %ptr, i8 %val) { define i8 @test463(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test463: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB463_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB463_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7803,14 +7803,14 @@ define i8 @test463(i8* %ptr, i8 %val) { define i8 @test464(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test464: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB464_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stbcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB464_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7820,13 +7820,13 @@ define i8 @test464(i8* %ptr, i8 %val) { define i16 @test465(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test465: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB465_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB465_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xor i16* %ptr, i16 %val syncscope("singlethread") monotonic @@ -7835,14 +7835,14 @@ define i16 @test465(i16* %ptr, i16 %val) { define i16 @test466(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test466: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB466_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: xor 6, 4, 3 ; PPC64LE-NEXT: sthcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB466_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw xor i16* %ptr, i16 %val syncscope("singlethread") acquire @@ -7851,14 +7851,14 @@ define i16 @test466(i16* %ptr, i16 %val) { define i16 @test467(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test467: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB467_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB467_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xor i16* %ptr, i16 %val syncscope("singlethread") release @@ -7867,14 +7867,14 @@ define i16 @test467(i16* %ptr, i16 %val) { define i16 @test468(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test468: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB468_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB468_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7884,14 +7884,14 @@ define i16 @test468(i16* %ptr, i16 %val) { define i16 @test469(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test469: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB469_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: sthcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB469_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7901,13 +7901,13 @@ define i16 @test469(i16* %ptr, i16 %val) { define i32 @test470(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test470: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB470_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB470_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xor i32* %ptr, i32 %val syncscope("singlethread") monotonic @@ -7916,14 +7916,14 @@ define i32 @test470(i32* %ptr, i32 %val) { define i32 @test471(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test471: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB471_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: xor 6, 4, 3 ; PPC64LE-NEXT: stwcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB471_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw xor i32* %ptr, i32 %val syncscope("singlethread") acquire @@ -7932,14 +7932,14 @@ define i32 @test471(i32* %ptr, i32 %val) { define i32 @test472(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test472: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB472_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB472_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xor i32* %ptr, i32 %val syncscope("singlethread") release @@ -7948,14 +7948,14 @@ define i32 @test472(i32* %ptr, i32 %val) { define i32 @test473(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test473: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB473_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB473_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7965,14 +7965,14 @@ define i32 @test473(i32* %ptr, i32 %val) { define i32 @test474(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test474: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB474_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stwcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB474_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -7982,13 +7982,13 @@ define i32 @test474(i32* %ptr, i32 %val) { define i64 @test475(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test475: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB475_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB475_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xor i64* %ptr, i64 %val syncscope("singlethread") monotonic @@ -7997,14 +7997,14 @@ define i64 @test475(i64* %ptr, i64 %val) { define i64 @test476(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test476: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB476_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: xor 6, 4, 3 ; PPC64LE-NEXT: stdcx. 6, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB476_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr %ret = atomicrmw xor i64* %ptr, i64 %val syncscope("singlethread") acquire @@ -8013,14 +8013,14 @@ define i64 @test476(i64* %ptr, i64 %val) { define i64 @test477(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test477: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB477_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB477_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: blr %ret = atomicrmw xor i64* %ptr, i64 %val syncscope("singlethread") release @@ -8029,14 +8029,14 @@ define i64 @test477(i64* %ptr, i64 %val) { define i64 @test478(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test478: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB478_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB478_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -8046,14 +8046,14 @@ define i64 @test478(i64* %ptr, i64 %val) { define i64 @test479(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test479: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB479_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: xor 6, 4, 5 ; PPC64LE-NEXT: stdcx. 6, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB479_1 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: mr 3, 5 ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: blr @@ -8063,13 +8063,13 @@ define i64 @test479(i64* %ptr, i64 %val) { define i8 @test480(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test480: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB480_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: extsb 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB480_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB480_1 ; PPC64LE-NEXT: .LBB480_3: @@ -8081,14 +8081,14 @@ define i8 @test480(i8* %ptr, i8 %val) { define i8 @test481(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test481: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB481_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: extsb 6, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB481_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB481_1 ; PPC64LE-NEXT: .LBB481_3: @@ -8100,14 +8100,14 @@ define i8 @test481(i8* %ptr, i8 %val) { define i8 @test482(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test482: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB482_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: extsb 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB482_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB482_1 ; PPC64LE-NEXT: .LBB482_3: @@ -8119,14 +8119,14 @@ define i8 @test482(i8* %ptr, i8 %val) { define i8 @test483(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test483: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB483_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: extsb 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB483_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB483_1 ; PPC64LE-NEXT: .LBB483_3: @@ -8139,14 +8139,14 @@ define i8 @test483(i8* %ptr, i8 %val) { define i8 @test484(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test484: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB484_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: extsb 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB484_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB484_1 ; PPC64LE-NEXT: .LBB484_3: @@ -8159,13 +8159,13 @@ define i8 @test484(i8* %ptr, i8 %val) { define i16 @test485(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test485: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB485_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: extsh 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB485_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB485_1 ; PPC64LE-NEXT: .LBB485_3: @@ -8177,14 +8177,14 @@ define i16 @test485(i16* %ptr, i16 %val) { define i16 @test486(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test486: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB486_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: extsh 6, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB486_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB486_1 ; PPC64LE-NEXT: .LBB486_3: @@ -8196,14 +8196,14 @@ define i16 @test486(i16* %ptr, i16 %val) { define i16 @test487(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test487: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB487_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: extsh 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB487_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB487_1 ; PPC64LE-NEXT: .LBB487_3: @@ -8215,14 +8215,14 @@ define i16 @test487(i16* %ptr, i16 %val) { define i16 @test488(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test488: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB488_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: extsh 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB488_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB488_1 ; PPC64LE-NEXT: .LBB488_3: @@ -8235,14 +8235,14 @@ define i16 @test488(i16* %ptr, i16 %val) { define i16 @test489(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test489: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB489_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: extsh 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: ble 0, .LBB489_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB489_1 ; PPC64LE-NEXT: .LBB489_3: @@ -8255,12 +8255,12 @@ define i16 @test489(i16* %ptr, i16 %val) { define i32 @test490(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test490: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB490_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmpw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB490_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB490_1 ; PPC64LE-NEXT: .LBB490_3: @@ -8272,13 +8272,13 @@ define i32 @test490(i32* %ptr, i32 %val) { define i32 @test491(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test491: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB491_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: cmpw 4, 3 ; PPC64LE-NEXT: ble 0, .LBB491_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB491_1 ; PPC64LE-NEXT: .LBB491_3: @@ -8290,13 +8290,13 @@ define i32 @test491(i32* %ptr, i32 %val) { define i32 @test492(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test492: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB492_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmpw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB492_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB492_1 ; PPC64LE-NEXT: .LBB492_3: @@ -8308,13 +8308,13 @@ define i32 @test492(i32* %ptr, i32 %val) { define i32 @test493(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test493: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB493_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmpw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB493_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB493_1 ; PPC64LE-NEXT: .LBB493_3: @@ -8327,13 +8327,13 @@ define i32 @test493(i32* %ptr, i32 %val) { define i32 @test494(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test494: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB494_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmpw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB494_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB494_1 ; PPC64LE-NEXT: .LBB494_3: @@ -8346,12 +8346,12 @@ define i32 @test494(i32* %ptr, i32 %val) { define i64 @test495(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test495: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB495_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpd 4, 5 ; PPC64LE-NEXT: ble 0, .LBB495_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB495_1 ; PPC64LE-NEXT: .LBB495_3: @@ -8363,13 +8363,13 @@ define i64 @test495(i64* %ptr, i64 %val) { define i64 @test496(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test496: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB496_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: cmpd 4, 3 ; PPC64LE-NEXT: ble 0, .LBB496_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB496_1 ; PPC64LE-NEXT: .LBB496_3: @@ -8381,13 +8381,13 @@ define i64 @test496(i64* %ptr, i64 %val) { define i64 @test497(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test497: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB497_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpd 4, 5 ; PPC64LE-NEXT: ble 0, .LBB497_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB497_1 ; PPC64LE-NEXT: .LBB497_3: @@ -8399,13 +8399,13 @@ define i64 @test497(i64* %ptr, i64 %val) { define i64 @test498(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test498: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB498_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpd 4, 5 ; PPC64LE-NEXT: ble 0, .LBB498_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB498_1 ; PPC64LE-NEXT: .LBB498_3: @@ -8418,13 +8418,13 @@ define i64 @test498(i64* %ptr, i64 %val) { define i64 @test499(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test499: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB499_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpd 4, 5 ; PPC64LE-NEXT: ble 0, .LBB499_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB499_1 ; PPC64LE-NEXT: .LBB499_3: @@ -8437,13 +8437,13 @@ define i64 @test499(i64* %ptr, i64 %val) { define i8 @test500(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test500: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB500_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: extsb 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB500_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB500_1 ; PPC64LE-NEXT: .LBB500_3: @@ -8455,14 +8455,14 @@ define i8 @test500(i8* %ptr, i8 %val) { define i8 @test501(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test501: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB501_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: extsb 6, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB501_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB501_1 ; PPC64LE-NEXT: .LBB501_3: @@ -8474,14 +8474,14 @@ define i8 @test501(i8* %ptr, i8 %val) { define i8 @test502(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test502: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB502_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: extsb 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB502_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB502_1 ; PPC64LE-NEXT: .LBB502_3: @@ -8493,14 +8493,14 @@ define i8 @test502(i8* %ptr, i8 %val) { define i8 @test503(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test503: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB503_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: extsb 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB503_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB503_1 ; PPC64LE-NEXT: .LBB503_3: @@ -8513,14 +8513,14 @@ define i8 @test503(i8* %ptr, i8 %val) { define i8 @test504(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test504: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB504_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: extsb 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB504_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB504_1 ; PPC64LE-NEXT: .LBB504_3: @@ -8533,13 +8533,13 @@ define i8 @test504(i8* %ptr, i8 %val) { define i16 @test505(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test505: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB505_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: extsh 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB505_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB505_1 ; PPC64LE-NEXT: .LBB505_3: @@ -8551,14 +8551,14 @@ define i16 @test505(i16* %ptr, i16 %val) { define i16 @test506(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test506: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB506_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: extsh 6, 3 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB506_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB506_1 ; PPC64LE-NEXT: .LBB506_3: @@ -8570,14 +8570,14 @@ define i16 @test506(i16* %ptr, i16 %val) { define i16 @test507(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test507: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB507_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: extsh 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB507_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB507_1 ; PPC64LE-NEXT: .LBB507_3: @@ -8589,14 +8589,14 @@ define i16 @test507(i16* %ptr, i16 %val) { define i16 @test508(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test508: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB508_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: extsh 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB508_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB508_1 ; PPC64LE-NEXT: .LBB508_3: @@ -8609,14 +8609,14 @@ define i16 @test508(i16* %ptr, i16 %val) { define i16 @test509(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test509: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB509_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: extsh 6, 5 ; PPC64LE-NEXT: cmpw 4, 6 ; PPC64LE-NEXT: bge 0, .LBB509_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB509_1 ; PPC64LE-NEXT: .LBB509_3: @@ -8629,12 +8629,12 @@ define i16 @test509(i16* %ptr, i16 %val) { define i32 @test510(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test510: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB510_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmpw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB510_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB510_1 ; PPC64LE-NEXT: .LBB510_3: @@ -8646,13 +8646,13 @@ define i32 @test510(i32* %ptr, i32 %val) { define i32 @test511(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test511: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB511_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: cmpw 4, 3 ; PPC64LE-NEXT: bge 0, .LBB511_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB511_1 ; PPC64LE-NEXT: .LBB511_3: @@ -8664,13 +8664,13 @@ define i32 @test511(i32* %ptr, i32 %val) { define i32 @test512(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test512: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB512_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmpw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB512_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB512_1 ; PPC64LE-NEXT: .LBB512_3: @@ -8682,13 +8682,13 @@ define i32 @test512(i32* %ptr, i32 %val) { define i32 @test513(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test513: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB513_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmpw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB513_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB513_1 ; PPC64LE-NEXT: .LBB513_3: @@ -8701,13 +8701,13 @@ define i32 @test513(i32* %ptr, i32 %val) { define i32 @test514(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test514: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB514_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmpw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB514_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB514_1 ; PPC64LE-NEXT: .LBB514_3: @@ -8720,12 +8720,12 @@ define i32 @test514(i32* %ptr, i32 %val) { define i64 @test515(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test515: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB515_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpd 4, 5 ; PPC64LE-NEXT: bge 0, .LBB515_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB515_1 ; PPC64LE-NEXT: .LBB515_3: @@ -8737,13 +8737,13 @@ define i64 @test515(i64* %ptr, i64 %val) { define i64 @test516(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test516: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB516_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: cmpd 4, 3 ; PPC64LE-NEXT: bge 0, .LBB516_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB516_1 ; PPC64LE-NEXT: .LBB516_3: @@ -8755,13 +8755,13 @@ define i64 @test516(i64* %ptr, i64 %val) { define i64 @test517(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test517: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB517_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpd 4, 5 ; PPC64LE-NEXT: bge 0, .LBB517_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB517_1 ; PPC64LE-NEXT: .LBB517_3: @@ -8773,13 +8773,13 @@ define i64 @test517(i64* %ptr, i64 %val) { define i64 @test518(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test518: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB518_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpd 4, 5 ; PPC64LE-NEXT: bge 0, .LBB518_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB518_1 ; PPC64LE-NEXT: .LBB518_3: @@ -8792,13 +8792,13 @@ define i64 @test518(i64* %ptr, i64 %val) { define i64 @test519(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test519: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB519_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpd 4, 5 ; PPC64LE-NEXT: bge 0, .LBB519_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB519_1 ; PPC64LE-NEXT: .LBB519_3: @@ -8811,12 +8811,12 @@ define i64 @test519(i64* %ptr, i64 %val) { define i8 @test520(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test520: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB520_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB520_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB520_1 ; PPC64LE-NEXT: .LBB520_3: @@ -8828,13 +8828,13 @@ define i8 @test520(i8* %ptr, i8 %val) { define i8 @test521(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test521: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB521_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: cmplw 4, 3 ; PPC64LE-NEXT: ble 0, .LBB521_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB521_1 ; PPC64LE-NEXT: .LBB521_3: @@ -8846,13 +8846,13 @@ define i8 @test521(i8* %ptr, i8 %val) { define i8 @test522(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test522: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB522_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB522_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB522_1 ; PPC64LE-NEXT: .LBB522_3: @@ -8864,13 +8864,13 @@ define i8 @test522(i8* %ptr, i8 %val) { define i8 @test523(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test523: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB523_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB523_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB523_1 ; PPC64LE-NEXT: .LBB523_3: @@ -8883,13 +8883,13 @@ define i8 @test523(i8* %ptr, i8 %val) { define i8 @test524(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test524: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB524_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB524_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB524_1 ; PPC64LE-NEXT: .LBB524_3: @@ -8902,12 +8902,12 @@ define i8 @test524(i8* %ptr, i8 %val) { define i16 @test525(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test525: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB525_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB525_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB525_1 ; PPC64LE-NEXT: .LBB525_3: @@ -8919,13 +8919,13 @@ define i16 @test525(i16* %ptr, i16 %val) { define i16 @test526(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test526: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB526_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: cmplw 4, 3 ; PPC64LE-NEXT: ble 0, .LBB526_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB526_1 ; PPC64LE-NEXT: .LBB526_3: @@ -8937,13 +8937,13 @@ define i16 @test526(i16* %ptr, i16 %val) { define i16 @test527(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test527: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB527_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB527_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB527_1 ; PPC64LE-NEXT: .LBB527_3: @@ -8955,13 +8955,13 @@ define i16 @test527(i16* %ptr, i16 %val) { define i16 @test528(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test528: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB528_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB528_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB528_1 ; PPC64LE-NEXT: .LBB528_3: @@ -8974,13 +8974,13 @@ define i16 @test528(i16* %ptr, i16 %val) { define i16 @test529(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test529: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB529_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB529_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB529_1 ; PPC64LE-NEXT: .LBB529_3: @@ -8993,12 +8993,12 @@ define i16 @test529(i16* %ptr, i16 %val) { define i32 @test530(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test530: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB530_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB530_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB530_1 ; PPC64LE-NEXT: .LBB530_3: @@ -9010,13 +9010,13 @@ define i32 @test530(i32* %ptr, i32 %val) { define i32 @test531(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test531: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB531_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: cmplw 4, 3 ; PPC64LE-NEXT: ble 0, .LBB531_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB531_1 ; PPC64LE-NEXT: .LBB531_3: @@ -9028,13 +9028,13 @@ define i32 @test531(i32* %ptr, i32 %val) { define i32 @test532(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test532: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB532_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB532_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB532_1 ; PPC64LE-NEXT: .LBB532_3: @@ -9046,13 +9046,13 @@ define i32 @test532(i32* %ptr, i32 %val) { define i32 @test533(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test533: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB533_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB533_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB533_1 ; PPC64LE-NEXT: .LBB533_3: @@ -9065,13 +9065,13 @@ define i32 @test533(i32* %ptr, i32 %val) { define i32 @test534(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test534: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB534_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: ble 0, .LBB534_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB534_1 ; PPC64LE-NEXT: .LBB534_3: @@ -9084,12 +9084,12 @@ define i32 @test534(i32* %ptr, i32 %val) { define i64 @test535(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test535: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB535_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpld 4, 5 ; PPC64LE-NEXT: ble 0, .LBB535_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB535_1 ; PPC64LE-NEXT: .LBB535_3: @@ -9101,13 +9101,13 @@ define i64 @test535(i64* %ptr, i64 %val) { define i64 @test536(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test536: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB536_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: cmpld 4, 3 ; PPC64LE-NEXT: ble 0, .LBB536_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB536_1 ; PPC64LE-NEXT: .LBB536_3: @@ -9119,13 +9119,13 @@ define i64 @test536(i64* %ptr, i64 %val) { define i64 @test537(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test537: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB537_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpld 4, 5 ; PPC64LE-NEXT: ble 0, .LBB537_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB537_1 ; PPC64LE-NEXT: .LBB537_3: @@ -9137,13 +9137,13 @@ define i64 @test537(i64* %ptr, i64 %val) { define i64 @test538(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test538: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB538_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpld 4, 5 ; PPC64LE-NEXT: ble 0, .LBB538_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB538_1 ; PPC64LE-NEXT: .LBB538_3: @@ -9156,13 +9156,13 @@ define i64 @test538(i64* %ptr, i64 %val) { define i64 @test539(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test539: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB539_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpld 4, 5 ; PPC64LE-NEXT: ble 0, .LBB539_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB539_1 ; PPC64LE-NEXT: .LBB539_3: @@ -9175,12 +9175,12 @@ define i64 @test539(i64* %ptr, i64 %val) { define i8 @test540(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test540: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB540_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB540_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB540_1 ; PPC64LE-NEXT: .LBB540_3: @@ -9192,13 +9192,13 @@ define i8 @test540(i8* %ptr, i8 %val) { define i8 @test541(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test541: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB541_1: ; PPC64LE-NEXT: lbarx 3, 0, 5 ; PPC64LE-NEXT: cmplw 4, 3 ; PPC64LE-NEXT: bge 0, .LBB541_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB541_1 ; PPC64LE-NEXT: .LBB541_3: @@ -9210,13 +9210,13 @@ define i8 @test541(i8* %ptr, i8 %val) { define i8 @test542(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test542: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB542_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB542_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB542_1 ; PPC64LE-NEXT: .LBB542_3: @@ -9228,13 +9228,13 @@ define i8 @test542(i8* %ptr, i8 %val) { define i8 @test543(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test543: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB543_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB543_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB543_1 ; PPC64LE-NEXT: .LBB543_3: @@ -9247,13 +9247,13 @@ define i8 @test543(i8* %ptr, i8 %val) { define i8 @test544(i8* %ptr, i8 %val) { ; PPC64LE-LABEL: test544: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB544_1: ; PPC64LE-NEXT: lbarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB544_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stbcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB544_1 ; PPC64LE-NEXT: .LBB544_3: @@ -9266,12 +9266,12 @@ define i8 @test544(i8* %ptr, i8 %val) { define i16 @test545(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test545: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB545_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB545_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB545_1 ; PPC64LE-NEXT: .LBB545_3: @@ -9283,13 +9283,13 @@ define i16 @test545(i16* %ptr, i16 %val) { define i16 @test546(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test546: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB546_1: ; PPC64LE-NEXT: lharx 3, 0, 5 ; PPC64LE-NEXT: cmplw 4, 3 ; PPC64LE-NEXT: bge 0, .LBB546_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB546_1 ; PPC64LE-NEXT: .LBB546_3: @@ -9301,13 +9301,13 @@ define i16 @test546(i16* %ptr, i16 %val) { define i16 @test547(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test547: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB547_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB547_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB547_1 ; PPC64LE-NEXT: .LBB547_3: @@ -9319,13 +9319,13 @@ define i16 @test547(i16* %ptr, i16 %val) { define i16 @test548(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test548: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB548_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB548_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB548_1 ; PPC64LE-NEXT: .LBB548_3: @@ -9338,13 +9338,13 @@ define i16 @test548(i16* %ptr, i16 %val) { define i16 @test549(i16* %ptr, i16 %val) { ; PPC64LE-LABEL: test549: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB549_1: ; PPC64LE-NEXT: lharx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB549_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: sthcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB549_1 ; PPC64LE-NEXT: .LBB549_3: @@ -9357,12 +9357,12 @@ define i16 @test549(i16* %ptr, i16 %val) { define i32 @test550(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test550: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB550_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB550_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB550_1 ; PPC64LE-NEXT: .LBB550_3: @@ -9374,13 +9374,13 @@ define i32 @test550(i32* %ptr, i32 %val) { define i32 @test551(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test551: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB551_1: ; PPC64LE-NEXT: lwarx 3, 0, 5 ; PPC64LE-NEXT: cmplw 4, 3 ; PPC64LE-NEXT: bge 0, .LBB551_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB551_1 ; PPC64LE-NEXT: .LBB551_3: @@ -9392,13 +9392,13 @@ define i32 @test551(i32* %ptr, i32 %val) { define i32 @test552(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test552: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB552_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB552_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB552_1 ; PPC64LE-NEXT: .LBB552_3: @@ -9410,13 +9410,13 @@ define i32 @test552(i32* %ptr, i32 %val) { define i32 @test553(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test553: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB553_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB553_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB553_1 ; PPC64LE-NEXT: .LBB553_3: @@ -9429,13 +9429,13 @@ define i32 @test553(i32* %ptr, i32 %val) { define i32 @test554(i32* %ptr, i32 %val) { ; PPC64LE-LABEL: test554: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB554_1: ; PPC64LE-NEXT: lwarx 5, 0, 3 ; PPC64LE-NEXT: cmplw 4, 5 ; PPC64LE-NEXT: bge 0, .LBB554_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stwcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB554_1 ; PPC64LE-NEXT: .LBB554_3: @@ -9448,12 +9448,12 @@ define i32 @test554(i32* %ptr, i32 %val) { define i64 @test555(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test555: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: .LBB555_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpld 4, 5 ; PPC64LE-NEXT: bge 0, .LBB555_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB555_1 ; PPC64LE-NEXT: .LBB555_3: @@ -9465,13 +9465,13 @@ define i64 @test555(i64* %ptr, i64 %val) { define i64 @test556(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test556: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: mr 5, 3 ; PPC64LE-NEXT: .LBB556_1: ; PPC64LE-NEXT: ldarx 3, 0, 5 ; PPC64LE-NEXT: cmpld 4, 3 ; PPC64LE-NEXT: bge 0, .LBB556_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 5 ; PPC64LE-NEXT: bne 0, .LBB556_1 ; PPC64LE-NEXT: .LBB556_3: @@ -9483,13 +9483,13 @@ define i64 @test556(i64* %ptr, i64 %val) { define i64 @test557(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test557: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB557_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpld 4, 5 ; PPC64LE-NEXT: bge 0, .LBB557_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB557_1 ; PPC64LE-NEXT: .LBB557_3: @@ -9501,13 +9501,13 @@ define i64 @test557(i64* %ptr, i64 %val) { define i64 @test558(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test558: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwsync ; PPC64LE-NEXT: .LBB558_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpld 4, 5 ; PPC64LE-NEXT: bge 0, .LBB558_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB558_1 ; PPC64LE-NEXT: .LBB558_3: @@ -9520,13 +9520,13 @@ define i64 @test558(i64* %ptr, i64 %val) { define i64 @test559(i64* %ptr, i64 %val) { ; PPC64LE-LABEL: test559: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: sync ; PPC64LE-NEXT: .LBB559_1: ; PPC64LE-NEXT: ldarx 5, 0, 3 ; PPC64LE-NEXT: cmpld 4, 5 ; PPC64LE-NEXT: bge 0, .LBB559_3 -; PPC64LE-NEXT: # BB#2: +; PPC64LE-NEXT: # %bb.2: ; PPC64LE-NEXT: stdcx. 4, 0, 3 ; PPC64LE-NEXT: bne 0, .LBB559_1 ; PPC64LE-NEXT: .LBB559_3: @@ -9540,7 +9540,7 @@ define i64 @test559(i64* %ptr, i64 %val) { ; The second load should never be scheduled before isync. define i32 @test_ordering0(i32* %ptr1, i32* %ptr2) { ; PPC64LE-LABEL: test_ordering0: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwz 4, 0(3) ; PPC64LE-NEXT: cmpd 7, 4, 4 ; PPC64LE-NEXT: bne- 7, .+4 @@ -9557,7 +9557,7 @@ define i32 @test_ordering0(i32* %ptr1, i32* %ptr2) { ; The second store should never be scheduled before isync. define i32 @test_ordering1(i32* %ptr1, i32 %val1, i32* %ptr2) { ; PPC64LE-LABEL: test_ordering1: -; PPC64LE: # BB#0: +; PPC64LE: # %bb.0: ; PPC64LE-NEXT: lwz 3, 0(3) ; PPC64LE-NEXT: cmpd 7, 3, 3 ; PPC64LE-NEXT: bne- 7, .+4 diff --git a/test/CodeGen/PowerPC/branch_coalesce.ll b/test/CodeGen/PowerPC/branch_coalesce.ll index 007eef27b2d..a57dec111bc 100644 --- a/test/CodeGen/PowerPC/branch_coalesce.ll +++ b/test/CodeGen/PowerPC/branch_coalesce.ll @@ -23,10 +23,10 @@ define double @testBranchCoal(double %a, double %b, double %c, i32 %x) { ; CHECK: blr ; CHECK-NOCOALESCE-LABEL: testBranchCoal: -; CHECK-NOCOALESCE: # BB#0: # %entry +; CHECK-NOCOALESCE: # %bb.0: # %entry ; CHECK-NOCOALESCE-NEXT: cmplwi 0, 6, 0 ; CHECK-NOCOALESCE-NEXT: bne 0, .LBB0_5 -; CHECK-NOCOALESCE-NEXT: # BB#1: # %entry +; CHECK-NOCOALESCE-NEXT: # %bb.1: # %entry ; CHECK-NOCOALESCE-NEXT: bne 0, .LBB0_6 ; CHECK-NOCOALESCE-NEXT: .LBB0_2: # %entry ; CHECK-NOCOALESCE-NEXT: beq 0, .LBB0_4 diff --git a/test/CodeGen/PowerPC/fabs.ll b/test/CodeGen/PowerPC/fabs.ll index c8cbd00b4dc..369803af979 100644 --- a/test/CodeGen/PowerPC/fabs.ll +++ b/test/CodeGen/PowerPC/fabs.ll @@ -2,7 +2,7 @@ define double @fabs(double %f) { ; CHECK-LABEL: fabs: -; CHECK: ; BB#0: +; CHECK: ; %bb.0: ; CHECK-NEXT: fabs f1, f1 ; CHECK-NEXT: blr ; @@ -12,7 +12,7 @@ define double @fabs(double %f) { define float @bitcast_fabs(float %x) { ; CHECK-LABEL: bitcast_fabs: -; CHECK: ; BB#0: +; CHECK: ; %bb.0: ; CHECK-NEXT: stfs f1, -8(r1) ; CHECK-NEXT: nop ; CHECK-NEXT: nop diff --git a/test/CodeGen/PowerPC/fma-aggr-FMF.ll b/test/CodeGen/PowerPC/fma-aggr-FMF.ll index 8e97115bd1f..e861c9df37a 100644 --- a/test/CodeGen/PowerPC/fma-aggr-FMF.ll +++ b/test/CodeGen/PowerPC/fma-aggr-FMF.ll @@ -3,7 +3,7 @@ define float @can_fma_with_fewer_uses(float %f1, float %f2, float %f3, float %f4) { ; CHECK-LABEL: can_fma_with_fewer_uses: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: xsmulsp 0, 1, 2 ; CHECK-NEXT: fmr 1, 0 ; CHECK-NEXT: xsmaddasp 1, 3, 4 @@ -21,7 +21,7 @@ define float @can_fma_with_fewer_uses(float %f1, float %f2, float %f3, float %f4 ; around beside the fma. define float @no_fma_with_fewer_uses(float %f1, float %f2, float %f3, float %f4) { ; CHECK-LABEL: no_fma_with_fewer_uses: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: xsmulsp 0, 3, 4 ; CHECK-NEXT: xsmulsp 13, 1, 2 ; CHECK-NEXT: xsmaddasp 0, 1, 2 diff --git a/test/CodeGen/PowerPC/fp64-to-int16.ll b/test/CodeGen/PowerPC/fp64-to-int16.ll index 0c5274d9426..360a9866518 100644 --- a/test/CodeGen/PowerPC/fp64-to-int16.ll +++ b/test/CodeGen/PowerPC/fp64-to-int16.ll @@ -4,7 +4,7 @@ target triple = "powerpc64le--linux-gnu" define i1 @Test(double %a) { ; CHECK-LABEL: Test: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xscvdpsxws 1, 1 ; CHECK-NEXT: mfvsrwz 3, 1 ; CHECK-NEXT: xori 3, 3, 65534 diff --git a/test/CodeGen/PowerPC/hello-reloc.s b/test/CodeGen/PowerPC/hello-reloc.s index bbf1e7cacbd..66bc9a84f5c 100644 --- a/test/CodeGen/PowerPC/hello-reloc.s +++ b/test/CodeGen/PowerPC/hello-reloc.s @@ -11,7 +11,7 @@ .globl _main .align 4 _main: ; @main -; BB#0: ; %entry +; %bb.0: ; %entry mflr r0 stw r31, -4(r1) stw r0, 8(r1) diff --git a/test/CodeGen/PowerPC/licm-remat.ll b/test/CodeGen/PowerPC/licm-remat.ll index cbd1af62b84..393c56bcb86 100644 --- a/test/CodeGen/PowerPC/licm-remat.ll +++ b/test/CodeGen/PowerPC/licm-remat.ll @@ -18,7 +18,7 @@ declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture r define linkonce_odr void @ZN6snappyDecompressor_(%"class.snappy::SnappyDecompressor"* %this, %"class.snappy::SnappyIOVecWriter"* %writer) { ; CHECK-LABEL: ZN6snappyDecompressor_: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: addis 3, 2, _ZN6snappy8internalL8wordmaskE@toc@ha ; CHECK-DAG: addi 25, 3, _ZN6snappy8internalL8wordmaskE@toc@l ; CHECK-DAG: addis 4, 2, _ZN6snappy8internalL10char_tableE@toc@ha diff --git a/test/CodeGen/PowerPC/licm-tocReg.ll b/test/CodeGen/PowerPC/licm-tocReg.ll index 824d554991a..efbec9091a5 100644 --- a/test/CodeGen/PowerPC/licm-tocReg.ll +++ b/test/CodeGen/PowerPC/licm-tocReg.ll @@ -64,7 +64,7 @@ define signext i32 @test(i32 (i32)* nocapture %FP) local_unnamed_addr #0 { ; CHECK-LABEL: test: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis 6, 2, .LC0@toc@ha ; CHECK-NEXT: addis 4, 2, .LC1@toc@ha ; CHECK-NEXT: ld 5, .LC1@toc@l(4) diff --git a/test/CodeGen/PowerPC/logic-ops-on-compares.ll b/test/CodeGen/PowerPC/logic-ops-on-compares.ll index e448afd03ea..b1b26f0ab76 100644 --- a/test/CodeGen/PowerPC/logic-ops-on-compares.ll +++ b/test/CodeGen/PowerPC/logic-ops-on-compares.ll @@ -43,11 +43,11 @@ return: ; preds = %if.end, %if.then define void @neg_truncate_i32_eq(i32 *%ptr) { ; CHECK-LABEL: neg_truncate_i32_eq: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: lwz r3, 0(r3) ; CHECK-NEXT: rldicl. r3, r3, 0, 63 ; CHECK-NEXT: bclr 12, eq, 0 -; CHECK-NEXT: # BB#1: # %if.end29.thread136 +; CHECK-NEXT: # %bb.1: # %if.end29.thread136 entry: %0 = load i32, i32* %ptr, align 4 %rem17127 = and i32 %0, 1 @@ -101,11 +101,11 @@ return: ; preds = %if.end, %if.then define void @neg_truncate_i64_eq(i64 *%ptr) { ; CHECK-LABEL: neg_truncate_i64_eq: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: ld r3, 0(r3) ; CHECK-NEXT: rldicl. r3, r3, 0, 63 ; CHECK-NEXT: bclr 12, eq, 0 -; CHECK-NEXT: # BB#1: # %if.end29.thread136 +; CHECK-NEXT: # %bb.1: # %if.end29.thread136 entry: %0 = load i64, i64* %ptr, align 4 %rem17127 = and i64 %0, 1 @@ -161,11 +161,11 @@ return: ; preds = %if.end, %if.then define void @neg_truncate_i64_ne(i64 *%ptr) { ; CHECK-LABEL: neg_truncate_i64_ne: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: ld r3, 0(r3) ; CHECK-NEXT: andi. r3, r3, 1 ; CHECK-NEXT: bclr 12, gt, 0 -; CHECK-NEXT: # BB#1: # %if.end29.thread136 +; CHECK-NEXT: # %bb.1: # %if.end29.thread136 entry: %0 = load i64, i64* %ptr, align 4 %rem17127 = and i64 %0, 1 diff --git a/test/CodeGen/PowerPC/machine-combiner.ll b/test/CodeGen/PowerPC/machine-combiner.ll index e026017710e..c7337e3637e 100644 --- a/test/CodeGen/PowerPC/machine-combiner.ll +++ b/test/CodeGen/PowerPC/machine-combiner.ll @@ -8,7 +8,7 @@ target triple = "powerpc64-unknown-linux-gnu" define float @reassociate_adds1(float %x0, float %x1, float %x2, float %x3) { ; CHECK-LABEL: reassociate_adds1: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK: fadds [[REG0:[0-9]+]], 1, 2 ; CHECK: fadds [[REG1:[0-9]+]], 3, 4 ; CHECK: fadds 1, [[REG0]], [[REG1]] @@ -22,7 +22,7 @@ define float @reassociate_adds1(float %x0, float %x1, float %x2, float %x3) { define float @reassociate_adds2(float %x0, float %x1, float %x2, float %x3) { ; CHECK-LABEL: reassociate_adds2: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK: fadds [[REG0:[0-9]+]], 1, 2 ; CHECK: fadds [[REG1:[0-9]+]], 3, 4 ; CHECK: fadds 1, [[REG0]], [[REG1]] @@ -36,7 +36,7 @@ define float @reassociate_adds2(float %x0, float %x1, float %x2, float %x3) { define float @reassociate_adds3(float %x0, float %x1, float %x2, float %x3) { ; CHECK-LABEL: reassociate_adds3: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK: fadds [[REG0:[0-9]+]], 1, 2 ; CHECK: fadds [[REG1:[0-9]+]], 3, 4 ; CHECK: fadds 1, [[REG0]], [[REG1]] @@ -50,7 +50,7 @@ define float @reassociate_adds3(float %x0, float %x1, float %x2, float %x3) { define float @reassociate_adds4(float %x0, float %x1, float %x2, float %x3) { ; CHECK-LABEL: reassociate_adds4: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK: fadds [[REG0:[0-9]+]], 1, 2 ; CHECK: fadds [[REG1:[0-9]+]], 3, 4 ; CHECK: fadds 1, [[REG0]], [[REG1]] @@ -67,7 +67,7 @@ define float @reassociate_adds4(float %x0, float %x1, float %x2, float %x3) { define float @reassociate_adds5(float %x0, float %x1, float %x2, float %x3, float %x4, float %x5, float %x6, float %x7) { ; CHECK-LABEL: reassociate_adds5: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK: fadds [[REG12:[0-9]+]], 5, 6 ; CHECK: fadds [[REG0:[0-9]+]], 1, 2 ; CHECK: fadds [[REG11:[0-9]+]], 3, 4 @@ -91,7 +91,7 @@ define float @reassociate_adds5(float %x0, float %x1, float %x2, float %x3, floa define <4 x float> @vector_reassociate_adds1(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) { ; CHECK-LABEL: vector_reassociate_adds1: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-QPX: qvfadds [[REG0:[0-9]+]], 1, 2 ; CHECK-QPX: qvfadds [[REG1:[0-9]+]], 3, 4 ; CHECK-QPX: qvfadds 1, [[REG0]], [[REG1]] @@ -108,7 +108,7 @@ define <4 x float> @vector_reassociate_adds1(<4 x float> %x0, <4 x float> %x1, < define <4 x float> @vector_reassociate_adds2(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) { ; CHECK-LABEL: vector_reassociate_adds2: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-QPX: qvfadds [[REG0:[0-9]+]], 1, 2 ; CHECK-QPX: qvfadds [[REG1:[0-9]+]], 3, 4 ; CHECK-QPX: qvfadds 1, [[REG0]], [[REG1]] @@ -125,7 +125,7 @@ define <4 x float> @vector_reassociate_adds2(<4 x float> %x0, <4 x float> %x1, < define <4 x float> @vector_reassociate_adds3(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) { ; CHECK-LABEL: vector_reassociate_adds3: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-QPX: qvfadds [[REG0:[0-9]+]], 1, 2 ; CHECK-QPX: qvfadds [[REG1:[0-9]+]], 3, 4 ; CHECK-QPX: qvfadds 1, [[REG0]], [[REG1]] @@ -142,7 +142,7 @@ define <4 x float> @vector_reassociate_adds3(<4 x float> %x0, <4 x float> %x1, < define <4 x float> @vector_reassociate_adds4(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) { ; CHECK-LABEL: vector_reassociate_adds4: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-QPX: qvfadds [[REG0:[0-9]+]], 1, 2 ; CHECK-QPX: qvfadds [[REG1:[0-9]+]], 3, 4 ; CHECK-QPX: qvfadds 1, [[REG0]], [[REG1]] diff --git a/test/CodeGen/PowerPC/memCmpUsedInZeroEqualityComparison.ll b/test/CodeGen/PowerPC/memCmpUsedInZeroEqualityComparison.ll index e91b9acaee0..3bfc0de1b87 100644 --- a/test/CodeGen/PowerPC/memCmpUsedInZeroEqualityComparison.ll +++ b/test/CodeGen/PowerPC/memCmpUsedInZeroEqualityComparison.ll @@ -17,7 +17,7 @@ declare signext i32 @memcmp(i8* nocapture, i8* nocapture, i64) local_unnamed_add ; Check 4 bytes - requires 1 load for each param. define signext i32 @zeroEqualityTest02(i8* %x, i8* %y) { ; CHECK-LABEL: zeroEqualityTest02: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: lwz 3, 0(3) ; CHECK-NEXT: lwz 4, 0(4) ; CHECK-NEXT: xor 3, 3, 4 @@ -34,12 +34,12 @@ define signext i32 @zeroEqualityTest02(i8* %x, i8* %y) { ; Check 16 bytes - requires 2 loads for each param (or use vectors?). define signext i32 @zeroEqualityTest01(i8* %x, i8* %y) { ; CHECK-LABEL: zeroEqualityTest01: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: ld 5, 0(3) ; CHECK-NEXT: ld 6, 0(4) ; CHECK-NEXT: cmpld 5, 6 ; CHECK-NEXT: bne 0, .LBB1_2 -; CHECK-NEXT: # BB#1: # %loadbb1 +; CHECK-NEXT: # %bb.1: # %loadbb1 ; CHECK-NEXT: ld 3, 8(3) ; CHECK-NEXT: ld 4, 8(4) ; CHECK-NEXT: cmpld 3, 4 @@ -59,17 +59,17 @@ define signext i32 @zeroEqualityTest01(i8* %x, i8* %y) { ; Check 7 bytes - requires 3 loads for each param. define signext i32 @zeroEqualityTest03(i8* %x, i8* %y) { ; CHECK-LABEL: zeroEqualityTest03: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: lwz 5, 0(3) ; CHECK-NEXT: lwz 6, 0(4) ; CHECK-NEXT: cmplw 5, 6 ; CHECK-NEXT: bne 0, .LBB2_3 -; CHECK-NEXT: # BB#1: # %loadbb1 +; CHECK-NEXT: # %bb.1: # %loadbb1 ; CHECK-NEXT: lhz 5, 4(3) ; CHECK-NEXT: lhz 6, 4(4) ; CHECK-NEXT: cmplw 5, 6 ; CHECK-NEXT: bne 0, .LBB2_3 -; CHECK-NEXT: # BB#2: # %loadbb2 +; CHECK-NEXT: # %bb.2: # %loadbb2 ; CHECK-NEXT: lbz 3, 6(3) ; CHECK-NEXT: lbz 4, 6(4) ; CHECK-NEXT: cmplw 3, 4 @@ -89,7 +89,7 @@ define signext i32 @zeroEqualityTest03(i8* %x, i8* %y) { ; Validate with > 0 define signext i32 @zeroEqualityTest04() { ; CHECK-LABEL: zeroEqualityTest04: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: addis 3, 2, .LzeroEqualityTest02.buffer1@toc@ha ; CHECK-NEXT: addis 4, 2, .LzeroEqualityTest02.buffer2@toc@ha ; CHECK-NEXT: addi 6, 3, .LzeroEqualityTest02.buffer1@toc@l @@ -98,7 +98,7 @@ define signext i32 @zeroEqualityTest04() { ; CHECK-NEXT: ldbrx 4, 0, 5 ; CHECK-NEXT: cmpld 3, 4 ; CHECK-NEXT: bne 0, .LBB3_2 -; CHECK-NEXT: # BB#1: # %loadbb1 +; CHECK-NEXT: # %bb.1: # %loadbb1 ; CHECK-NEXT: li 4, 8 ; CHECK-NEXT: ldbrx 3, 6, 4 ; CHECK-NEXT: ldbrx 4, 5, 4 @@ -125,7 +125,7 @@ define signext i32 @zeroEqualityTest04() { ; Validate with < 0 define signext i32 @zeroEqualityTest05() { ; CHECK-LABEL: zeroEqualityTest05: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: addis 3, 2, .LzeroEqualityTest03.buffer1@toc@ha ; CHECK-NEXT: addis 4, 2, .LzeroEqualityTest03.buffer2@toc@ha ; CHECK-NEXT: addi 6, 3, .LzeroEqualityTest03.buffer1@toc@l @@ -134,7 +134,7 @@ define signext i32 @zeroEqualityTest05() { ; CHECK-NEXT: ldbrx 4, 0, 5 ; CHECK-NEXT: cmpld 3, 4 ; CHECK-NEXT: bne 0, .LBB4_2 -; CHECK-NEXT: # BB#1: # %loadbb1 +; CHECK-NEXT: # %bb.1: # %loadbb1 ; CHECK-NEXT: li 4, 8 ; CHECK-NEXT: ldbrx 3, 6, 4 ; CHECK-NEXT: ldbrx 4, 5, 4 @@ -160,7 +160,7 @@ define signext i32 @zeroEqualityTest05() { ; Validate with memcmp()?: define signext i32 @equalityFoldTwoConstants() { ; CHECK-LABEL: equalityFoldTwoConstants: -; CHECK: # BB#0: # %endblock +; CHECK: # %bb.0: # %endblock ; CHECK-NEXT: li 3, 1 ; CHECK-NEXT: blr %call = tail call signext i32 @memcmp(i8* bitcast ([15 x i32]* @zeroEqualityTest04.buffer1 to i8*), i8* bitcast ([15 x i32]* @zeroEqualityTest04.buffer2 to i8*), i64 16) @@ -171,13 +171,13 @@ define signext i32 @equalityFoldTwoConstants() { define signext i32 @equalityFoldOneConstant(i8* %X) { ; CHECK-LABEL: equalityFoldOneConstant: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: li 5, 1 ; CHECK-NEXT: ld 4, 0(3) ; CHECK-NEXT: sldi 5, 5, 32 ; CHECK-NEXT: cmpld 4, 5 ; CHECK-NEXT: bne 0, .LBB6_2 -; CHECK-NEXT: # BB#1: # %loadbb1 +; CHECK-NEXT: # %bb.1: # %loadbb1 ; CHECK-NEXT: li 4, 3 ; CHECK-NEXT: ld 3, 8(3) ; CHECK-NEXT: sldi 4, 4, 32 @@ -199,7 +199,7 @@ define signext i32 @equalityFoldOneConstant(i8* %X) { define i1 @length2_eq_nobuiltin_attr(i8* %X, i8* %Y) { ; CHECK-LABEL: length2_eq_nobuiltin_attr: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: mflr 0 ; CHECK-NEXT: std 0, 16(1) ; CHECK-NEXT: stdu 1, -32(1) diff --git a/test/CodeGen/PowerPC/memcmp.ll b/test/CodeGen/PowerPC/memcmp.ll index 392be4d712c..4aa5b400dd7 100644 --- a/test/CodeGen/PowerPC/memcmp.ll +++ b/test/CodeGen/PowerPC/memcmp.ll @@ -3,7 +3,7 @@ define signext i32 @memcmp8(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) { ; CHECK-LABEL: memcmp8: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: ldbrx 3, 0, 3 ; CHECK-NEXT: ldbrx 4, 0, 4 ; CHECK-NEXT: subfc 5, 3, 4 @@ -23,7 +23,7 @@ define signext i32 @memcmp8(i32* nocapture readonly %buffer1, i32* nocapture rea define signext i32 @memcmp4(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) { ; CHECK-LABEL: memcmp4: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: lwbrx 3, 0, 3 ; CHECK-NEXT: lwbrx 4, 0, 4 ; CHECK-NEXT: sub 5, 4, 3 @@ -41,7 +41,7 @@ define signext i32 @memcmp4(i32* nocapture readonly %buffer1, i32* nocapture rea define signext i32 @memcmp2(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) { ; CHECK-LABEL: memcmp2: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: lhbrx 3, 0, 3 ; CHECK-NEXT: lhbrx 4, 0, 4 ; CHECK-NEXT: subf 3, 4, 3 @@ -55,7 +55,7 @@ define signext i32 @memcmp2(i32* nocapture readonly %buffer1, i32* nocapture rea define signext i32 @memcmp1(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) { ; CHECK-LABEL: memcmp1: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: lbz 3, 0(3) ; CHECK-NEXT: lbz 4, 0(4) ; CHECK-NEXT: subf 3, 4, 3 diff --git a/test/CodeGen/PowerPC/negate-i1.ll b/test/CodeGen/PowerPC/negate-i1.ll index c6a7867fe9d..a56048d67a8 100644 --- a/test/CodeGen/PowerPC/negate-i1.ll +++ b/test/CodeGen/PowerPC/negate-i1.ll @@ -4,7 +4,7 @@ define i32 @select_i32_neg1_or_0(i1 %a) { ; CHECK-LABEL: select_i32_neg1_or_0: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: clrldi 3, 3, 63 ; CHECK-NEXT: neg 3, 3 ; CHECK-NEXT: blr @@ -15,7 +15,7 @@ define i32 @select_i32_neg1_or_0(i1 %a) { define i32 @select_i32_neg1_or_0_zeroext(i1 zeroext %a) { ; CHECK-LABEL: select_i32_neg1_or_0_zeroext: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: neg 3, 3 ; CHECK-NEXT: blr ; diff --git a/test/CodeGen/PowerPC/ppc32-nest.ll b/test/CodeGen/PowerPC/ppc32-nest.ll index 221e8be2951..b933edcf616 100644 --- a/test/CodeGen/PowerPC/ppc32-nest.ll +++ b/test/CodeGen/PowerPC/ppc32-nest.ll @@ -7,7 +7,7 @@ target triple = "powerpc-unknown-linux-gnu" define i8* @nest_receiver(i8* nest %arg) nounwind { ; CHECK-LABEL: nest_receiver: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: mr 3, 11 ; CHECK-NEXT: blr diff --git a/test/CodeGen/PowerPC/ppc64-nest.ll b/test/CodeGen/PowerPC/ppc64-nest.ll index 14872632e81..cd2366cfa45 100644 --- a/test/CodeGen/PowerPC/ppc64-nest.ll +++ b/test/CodeGen/PowerPC/ppc64-nest.ll @@ -7,7 +7,7 @@ target triple = "powerpc64-unknown-linux-gnu" define i8* @nest_receiver(i8* nest %arg) nounwind { ; CHECK-LABEL: nest_receiver: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: mr 3, 11 ; CHECK-NEXT: blr diff --git a/test/CodeGen/PowerPC/pr32140.ll b/test/CodeGen/PowerPC/pr32140.ll index 827a90404e4..3feb9bd9c9e 100644 --- a/test/CodeGen/PowerPC/pr32140.ll +++ b/test/CodeGen/PowerPC/pr32140.ll @@ -9,7 +9,7 @@ define void @bswapStorei64Toi32() { ; CHECK-LABEL: bswapStorei64Toi32: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: lwa 3, 0(3) ; CHECK-NEXT: rldicl 3, 3, 32, 32 ; CHECK-NEXT: stwbrx 3, 0, 4 @@ -25,7 +25,7 @@ entry: define void @bswapStorei32Toi16() { ; CHECK-LABEL: bswapStorei32Toi16: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: lha 3, 0(3) ; CHECK-NEXT: srwi 3, 3, 16 ; CHECK-NEXT: sthbrx 3, 0, 4 @@ -41,7 +41,7 @@ entry: define void @bswapStorei64Toi16() { ; CHECK-LABEL: bswapStorei64Toi16: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: lha 3, 0(3) ; CHECK-NEXT: rldicl 3, 3, 16, 48 ; CHECK-NEXT: sthbrx 3, 0, 4 diff --git a/test/CodeGen/PowerPC/pr33093.ll b/test/CodeGen/PowerPC/pr33093.ll index fc28bcfd0ca..af0350e17fd 100644 --- a/test/CodeGen/PowerPC/pr33093.ll +++ b/test/CodeGen/PowerPC/pr33093.ll @@ -4,7 +4,7 @@ define zeroext i32 @ReverseBits(i32 zeroext %n) { ; CHECK-LABEL: ReverseBits: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: lis 4, -21846 ; CHECK-NEXT: lis 5, 21845 ; CHECK-NEXT: slwi 6, 3, 1 @@ -68,7 +68,7 @@ entry: define i64 @ReverseBits64(i64 %n) { ; CHECK-LABEL: ReverseBits64: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: lis 4, -21846 ; CHECK-NEXT: lis 5, 21845 ; CHECK-NEXT: lis 6, -13108 diff --git a/test/CodeGen/PowerPC/select-addrRegRegOnly.ll b/test/CodeGen/PowerPC/select-addrRegRegOnly.ll index 6be31eaea74..46b23ff04f2 100644 --- a/test/CodeGen/PowerPC/select-addrRegRegOnly.ll +++ b/test/CodeGen/PowerPC/select-addrRegRegOnly.ll @@ -4,7 +4,7 @@ ; Function Attrs: norecurse nounwind readonly define float @testSingleAccess(i32* nocapture readonly %arr) local_unnamed_addr #0 { ; CHECK-LABEL: testSingleAccess: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addi 3, 3, 8 ; CHECK-NEXT: lfiwax 0, 0, 3 ; CHECK-NEXT: xscvsxdsp 1, 0 @@ -19,7 +19,7 @@ entry: ; Function Attrs: norecurse nounwind readonly define float @testMultipleAccess(i32* nocapture readonly %arr) local_unnamed_addr #0 { ; CHECK-LABEL: testMultipleAccess: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: lwz 4, 8(3) ; CHECK-NEXT: lwz 12, 12(3) ; CHECK-NEXT: add 3, 12, 4 diff --git a/test/CodeGen/PowerPC/select_const.ll b/test/CodeGen/PowerPC/select_const.ll index fd864805abd..178d9187e3b 100644 --- a/test/CodeGen/PowerPC/select_const.ll +++ b/test/CodeGen/PowerPC/select_const.ll @@ -9,7 +9,7 @@ define i32 @select_0_or_1(i1 %cond) { ; ALL-LABEL: select_0_or_1: -; ALL: # BB#0: +; ALL: # %bb.0: ; ALL-NEXT: not 3, 3 ; ALL-NEXT: clrldi 3, 3, 63 ; ALL-NEXT: blr @@ -19,7 +19,7 @@ define i32 @select_0_or_1(i1 %cond) { define i32 @select_0_or_1_zeroext(i1 zeroext %cond) { ; ALL-LABEL: select_0_or_1_zeroext: -; ALL: # BB#0: +; ALL: # %bb.0: ; ALL-NEXT: xori 3, 3, 1 ; ALL-NEXT: blr %sel = select i1 %cond, i32 0, i32 1 @@ -28,7 +28,7 @@ define i32 @select_0_or_1_zeroext(i1 zeroext %cond) { define i32 @select_0_or_1_signext(i1 signext %cond) { ; ALL-LABEL: select_0_or_1_signext: -; ALL: # BB#0: +; ALL: # %bb.0: ; ALL-NEXT: not 3, 3 ; ALL-NEXT: clrldi 3, 3, 63 ; ALL-NEXT: blr @@ -40,7 +40,7 @@ define i32 @select_0_or_1_signext(i1 signext %cond) { define i32 @select_1_or_0(i1 %cond) { ; ALL-LABEL: select_1_or_0: -; ALL: # BB#0: +; ALL: # %bb.0: ; ALL-NEXT: clrldi 3, 3, 63 ; ALL-NEXT: blr %sel = select i1 %cond, i32 1, i32 0 @@ -49,7 +49,7 @@ define i32 @select_1_or_0(i1 %cond) { define i32 @select_1_or_0_zeroext(i1 zeroext %cond) { ; ALL-LABEL: select_1_or_0_zeroext: -; ALL: # BB#0: +; ALL: # %bb.0: ; ALL-NEXT: blr %sel = select i1 %cond, i32 1, i32 0 ret i32 %sel @@ -57,7 +57,7 @@ define i32 @select_1_or_0_zeroext(i1 zeroext %cond) { define i32 @select_1_or_0_signext(i1 signext %cond) { ; ALL-LABEL: select_1_or_0_signext: -; ALL: # BB#0: +; ALL: # %bb.0: ; ALL-NEXT: clrldi 3, 3, 63 ; ALL-NEXT: blr %sel = select i1 %cond, i32 1, i32 0 @@ -68,7 +68,7 @@ define i32 @select_1_or_0_signext(i1 signext %cond) { define i32 @select_0_or_neg1(i1 %cond) { ; ISEL-LABEL: select_0_or_neg1: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: li 4, 0 ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: oris 3, 4, 65535 @@ -77,7 +77,7 @@ define i32 @select_0_or_neg1(i1 %cond) { ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: select_0_or_neg1: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: li 4, 0 ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: oris 3, 4, 65535 @@ -93,7 +93,7 @@ define i32 @select_0_or_neg1(i1 %cond) { define i32 @select_0_or_neg1_zeroext(i1 zeroext %cond) { ; ISEL-LABEL: select_0_or_neg1_zeroext: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: li 4, 0 ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: oris 3, 4, 65535 @@ -102,7 +102,7 @@ define i32 @select_0_or_neg1_zeroext(i1 zeroext %cond) { ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: select_0_or_neg1_zeroext: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: li 4, 0 ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: oris 3, 4, 65535 @@ -118,7 +118,7 @@ define i32 @select_0_or_neg1_zeroext(i1 zeroext %cond) { define i32 @select_0_or_neg1_signext(i1 signext %cond) { ; ISEL-LABEL: select_0_or_neg1_signext: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: li 4, 0 ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: oris 3, 4, 65535 @@ -127,7 +127,7 @@ define i32 @select_0_or_neg1_signext(i1 signext %cond) { ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: select_0_or_neg1_signext: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: li 4, 0 ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: oris 3, 4, 65535 @@ -145,7 +145,7 @@ define i32 @select_0_or_neg1_signext(i1 signext %cond) { define i32 @select_neg1_or_0(i1 %cond) { ; ISEL-LABEL: select_neg1_or_0: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: li 4, 0 ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: oris 3, 4, 65535 @@ -154,13 +154,13 @@ define i32 @select_neg1_or_0(i1 %cond) { ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: select_neg1_or_0: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: li 4, 0 ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: oris 3, 4, 65535 ; NO_ISEL-NEXT: ori 3, 3, 65535 ; NO_ISEL-NEXT: bclr 12, 1, 0 -; NO_ISEL-NEXT: # BB#1: +; NO_ISEL-NEXT: # %bb.1: ; NO_ISEL-NEXT: ori 3, 4, 0 ; NO_ISEL-NEXT: blr %sel = select i1 %cond, i32 -1, i32 0 @@ -169,7 +169,7 @@ define i32 @select_neg1_or_0(i1 %cond) { define i32 @select_neg1_or_0_zeroext(i1 zeroext %cond) { ; ISEL-LABEL: select_neg1_or_0_zeroext: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: li 4, 0 ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: oris 3, 4, 65535 @@ -178,13 +178,13 @@ define i32 @select_neg1_or_0_zeroext(i1 zeroext %cond) { ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: select_neg1_or_0_zeroext: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: li 4, 0 ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: oris 3, 4, 65535 ; NO_ISEL-NEXT: ori 3, 3, 65535 ; NO_ISEL-NEXT: bclr 12, 1, 0 -; NO_ISEL-NEXT: # BB#1: +; NO_ISEL-NEXT: # %bb.1: ; NO_ISEL-NEXT: ori 3, 4, 0 ; NO_ISEL-NEXT: blr %sel = select i1 %cond, i32 -1, i32 0 @@ -193,7 +193,7 @@ define i32 @select_neg1_or_0_zeroext(i1 zeroext %cond) { define i32 @select_neg1_or_0_signext(i1 signext %cond) { ; ISEL-LABEL: select_neg1_or_0_signext: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: li 4, 0 ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: oris 3, 4, 65535 @@ -202,13 +202,13 @@ define i32 @select_neg1_or_0_signext(i1 signext %cond) { ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: select_neg1_or_0_signext: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: li 4, 0 ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: oris 3, 4, 65535 ; NO_ISEL-NEXT: ori 3, 3, 65535 ; NO_ISEL-NEXT: bclr 12, 1, 0 -; NO_ISEL-NEXT: # BB#1: +; NO_ISEL-NEXT: # %bb.1: ; NO_ISEL-NEXT: ori 3, 4, 0 ; NO_ISEL-NEXT: blr %sel = select i1 %cond, i32 -1, i32 0 @@ -219,7 +219,7 @@ define i32 @select_neg1_or_0_signext(i1 signext %cond) { define i32 @select_Cplus1_C(i1 %cond) { ; ALL-LABEL: select_Cplus1_C: -; ALL: # BB#0: +; ALL: # %bb.0: ; ALL-NEXT: clrldi 3, 3, 63 ; ALL-NEXT: addi 3, 3, 41 ; ALL-NEXT: blr @@ -229,7 +229,7 @@ define i32 @select_Cplus1_C(i1 %cond) { define i32 @select_Cplus1_C_zeroext(i1 zeroext %cond) { ; ALL-LABEL: select_Cplus1_C_zeroext: -; ALL: # BB#0: +; ALL: # %bb.0: ; ALL-NEXT: addi 3, 3, 41 ; ALL-NEXT: blr %sel = select i1 %cond, i32 42, i32 41 @@ -238,7 +238,7 @@ define i32 @select_Cplus1_C_zeroext(i1 zeroext %cond) { define i32 @select_Cplus1_C_signext(i1 signext %cond) { ; ALL-LABEL: select_Cplus1_C_signext: -; ALL: # BB#0: +; ALL: # %bb.0: ; ALL-NEXT: subfic 3, 3, 41 ; ALL-NEXT: blr %sel = select i1 %cond, i32 42, i32 41 @@ -249,7 +249,7 @@ define i32 @select_Cplus1_C_signext(i1 signext %cond) { define i32 @select_C_Cplus1(i1 %cond) { ; ALL-LABEL: select_C_Cplus1: -; ALL: # BB#0: +; ALL: # %bb.0: ; ALL-NEXT: clrldi 3, 3, 63 ; ALL-NEXT: subfic 3, 3, 42 ; ALL-NEXT: blr @@ -259,7 +259,7 @@ define i32 @select_C_Cplus1(i1 %cond) { define i32 @select_C_Cplus1_zeroext(i1 zeroext %cond) { ; ALL-LABEL: select_C_Cplus1_zeroext: -; ALL: # BB#0: +; ALL: # %bb.0: ; ALL-NEXT: subfic 3, 3, 42 ; ALL-NEXT: blr %sel = select i1 %cond, i32 41, i32 42 @@ -268,7 +268,7 @@ define i32 @select_C_Cplus1_zeroext(i1 zeroext %cond) { define i32 @select_C_Cplus1_signext(i1 signext %cond) { ; ALL-LABEL: select_C_Cplus1_signext: -; ALL: # BB#0: +; ALL: # %bb.0: ; ALL-NEXT: addi 3, 3, 42 ; ALL-NEXT: blr %sel = select i1 %cond, i32 41, i32 42 @@ -280,7 +280,7 @@ define i32 @select_C_Cplus1_signext(i1 signext %cond) { define i32 @select_C1_C2(i1 %cond) { ; ISEL-LABEL: select_C1_C2: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: li 4, 421 ; ISEL-NEXT: li 3, 42 @@ -288,7 +288,7 @@ define i32 @select_C1_C2(i1 %cond) { ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: select_C1_C2: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: li 4, 421 ; NO_ISEL-NEXT: li 3, 42 @@ -303,7 +303,7 @@ define i32 @select_C1_C2(i1 %cond) { define i32 @select_C1_C2_zeroext(i1 zeroext %cond) { ; ISEL-LABEL: select_C1_C2_zeroext: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: li 4, 421 ; ISEL-NEXT: li 3, 42 @@ -311,7 +311,7 @@ define i32 @select_C1_C2_zeroext(i1 zeroext %cond) { ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: select_C1_C2_zeroext: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: li 4, 421 ; NO_ISEL-NEXT: li 3, 42 @@ -326,7 +326,7 @@ define i32 @select_C1_C2_zeroext(i1 zeroext %cond) { define i32 @select_C1_C2_signext(i1 signext %cond) { ; ISEL-LABEL: select_C1_C2_signext: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: li 4, 421 ; ISEL-NEXT: li 3, 42 @@ -334,7 +334,7 @@ define i32 @select_C1_C2_signext(i1 signext %cond) { ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: select_C1_C2_signext: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: li 4, 421 ; NO_ISEL-NEXT: li 3, 42 @@ -351,7 +351,7 @@ define i32 @select_C1_C2_signext(i1 signext %cond) { define i8 @sel_constants_add_constant(i1 %cond) { ; ISEL-LABEL: sel_constants_add_constant: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: li 4, 1 ; ISEL-NEXT: li 3, 28 @@ -359,7 +359,7 @@ define i8 @sel_constants_add_constant(i1 %cond) { ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: sel_constants_add_constant: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: li 4, 1 ; NO_ISEL-NEXT: li 3, 28 @@ -375,7 +375,7 @@ define i8 @sel_constants_add_constant(i1 %cond) { define i8 @sel_constants_sub_constant(i1 %cond) { ; ISEL-LABEL: sel_constants_sub_constant: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: li 4, 0 ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: oris 3, 4, 65535 @@ -385,14 +385,14 @@ define i8 @sel_constants_sub_constant(i1 %cond) { ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: sel_constants_sub_constant: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: li 4, 0 ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: oris 3, 4, 65535 ; NO_ISEL-NEXT: li 4, 18 ; NO_ISEL-NEXT: ori 3, 3, 65527 ; NO_ISEL-NEXT: bclr 12, 1, 0 -; NO_ISEL-NEXT: # BB#1: +; NO_ISEL-NEXT: # %bb.1: ; NO_ISEL-NEXT: ori 3, 4, 0 ; NO_ISEL-NEXT: blr %sel = select i1 %cond, i8 -4, i8 23 @@ -402,7 +402,7 @@ define i8 @sel_constants_sub_constant(i1 %cond) { define i8 @sel_constants_mul_constant(i1 %cond) { ; ISEL-LABEL: sel_constants_mul_constant: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: lis 4, 16383 ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: ori 3, 4, 65531 @@ -412,14 +412,14 @@ define i8 @sel_constants_mul_constant(i1 %cond) { ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: sel_constants_mul_constant: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: lis 4, 16383 ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: ori 3, 4, 65531 ; NO_ISEL-NEXT: li 4, 115 ; NO_ISEL-NEXT: sldi 3, 3, 2 ; NO_ISEL-NEXT: bclr 12, 1, 0 -; NO_ISEL-NEXT: # BB#1: +; NO_ISEL-NEXT: # %bb.1: ; NO_ISEL-NEXT: ori 3, 4, 0 ; NO_ISEL-NEXT: blr %sel = select i1 %cond, i8 -4, i8 23 @@ -429,14 +429,14 @@ define i8 @sel_constants_mul_constant(i1 %cond) { define i8 @sel_constants_sdiv_constant(i1 %cond) { ; ISEL-LABEL: sel_constants_sdiv_constant: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: li 3, 4 ; ISEL-NEXT: isel 3, 0, 3, 1 ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: sel_constants_sdiv_constant: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: li 3, 4 ; NO_ISEL-NEXT: bc 12, 1, .LBB24_1 @@ -451,7 +451,7 @@ define i8 @sel_constants_sdiv_constant(i1 %cond) { define i8 @sel_constants_udiv_constant(i1 %cond) { ; ISEL-LABEL: sel_constants_udiv_constant: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: li 4, 50 ; ISEL-NEXT: li 3, 4 @@ -459,7 +459,7 @@ define i8 @sel_constants_udiv_constant(i1 %cond) { ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: sel_constants_udiv_constant: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: li 4, 50 ; NO_ISEL-NEXT: li 3, 4 @@ -475,7 +475,7 @@ define i8 @sel_constants_udiv_constant(i1 %cond) { define i8 @sel_constants_srem_constant(i1 %cond) { ; ISEL-LABEL: sel_constants_srem_constant: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: lis 4, 16383 ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: ori 3, 4, 65535 @@ -485,14 +485,14 @@ define i8 @sel_constants_srem_constant(i1 %cond) { ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: sel_constants_srem_constant: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: lis 4, 16383 ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: ori 3, 4, 65535 ; NO_ISEL-NEXT: li 4, 3 ; NO_ISEL-NEXT: sldi 3, 3, 2 ; NO_ISEL-NEXT: bclr 12, 1, 0 -; NO_ISEL-NEXT: # BB#1: +; NO_ISEL-NEXT: # %bb.1: ; NO_ISEL-NEXT: ori 3, 4, 0 ; NO_ISEL-NEXT: blr %sel = select i1 %cond, i8 -4, i8 23 @@ -502,7 +502,7 @@ define i8 @sel_constants_srem_constant(i1 %cond) { define i8 @sel_constants_urem_constant(i1 %cond) { ; ALL-LABEL: sel_constants_urem_constant: -; ALL: # BB#0: +; ALL: # %bb.0: ; ALL-NEXT: rlwinm 3, 3, 0, 31, 31 ; ALL-NEXT: subfic 3, 3, 3 ; ALL-NEXT: blr @@ -513,7 +513,7 @@ define i8 @sel_constants_urem_constant(i1 %cond) { define i8 @sel_constants_and_constant(i1 %cond) { ; ALL-LABEL: sel_constants_and_constant: -; ALL: # BB#0: +; ALL: # %bb.0: ; ALL-NEXT: rlwinm 3, 3, 0, 31, 31 ; ALL-NEXT: subfic 3, 3, 5 ; ALL-NEXT: blr @@ -524,7 +524,7 @@ define i8 @sel_constants_and_constant(i1 %cond) { define i8 @sel_constants_or_constant(i1 %cond) { ; ISEL-LABEL: sel_constants_or_constant: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: li 4, 0 ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: oris 3, 4, 65535 @@ -534,14 +534,14 @@ define i8 @sel_constants_or_constant(i1 %cond) { ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: sel_constants_or_constant: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: li 4, 0 ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: oris 3, 4, 65535 ; NO_ISEL-NEXT: li 4, 23 ; NO_ISEL-NEXT: ori 3, 3, 65533 ; NO_ISEL-NEXT: bclr 12, 1, 0 -; NO_ISEL-NEXT: # BB#1: +; NO_ISEL-NEXT: # %bb.1: ; NO_ISEL-NEXT: ori 3, 4, 0 ; NO_ISEL-NEXT: blr %sel = select i1 %cond, i8 -4, i8 23 @@ -551,7 +551,7 @@ define i8 @sel_constants_or_constant(i1 %cond) { define i8 @sel_constants_xor_constant(i1 %cond) { ; ISEL-LABEL: sel_constants_xor_constant: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: li 4, 0 ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: oris 3, 4, 65535 @@ -561,14 +561,14 @@ define i8 @sel_constants_xor_constant(i1 %cond) { ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: sel_constants_xor_constant: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: li 4, 0 ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: oris 3, 4, 65535 ; NO_ISEL-NEXT: li 4, 18 ; NO_ISEL-NEXT: ori 3, 3, 65529 ; NO_ISEL-NEXT: bclr 12, 1, 0 -; NO_ISEL-NEXT: # BB#1: +; NO_ISEL-NEXT: # %bb.1: ; NO_ISEL-NEXT: ori 3, 4, 0 ; NO_ISEL-NEXT: blr %sel = select i1 %cond, i8 -4, i8 23 @@ -578,7 +578,7 @@ define i8 @sel_constants_xor_constant(i1 %cond) { define i8 @sel_constants_shl_constant(i1 %cond) { ; ISEL-LABEL: sel_constants_shl_constant: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: lis 5, 511 ; ISEL-NEXT: lis 4, 2047 ; ISEL-NEXT: andi. 3, 3, 1 @@ -590,7 +590,7 @@ define i8 @sel_constants_shl_constant(i1 %cond) { ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: sel_constants_shl_constant: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: lis 5, 511 ; NO_ISEL-NEXT: lis 4, 2047 ; NO_ISEL-NEXT: andi. 3, 3, 1 @@ -610,7 +610,7 @@ define i8 @sel_constants_shl_constant(i1 %cond) { define i8 @sel_constants_lshr_constant(i1 %cond) { ; ISEL-LABEL: sel_constants_lshr_constant: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: li 4, 7 ; ISEL-NEXT: li 3, 0 @@ -618,7 +618,7 @@ define i8 @sel_constants_lshr_constant(i1 %cond) { ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: sel_constants_lshr_constant: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: li 4, 7 ; NO_ISEL-NEXT: li 3, 0 @@ -634,7 +634,7 @@ define i8 @sel_constants_lshr_constant(i1 %cond) { define i8 @sel_constants_ashr_constant(i1 %cond) { ; ALL-LABEL: sel_constants_ashr_constant: -; ALL: # BB#0: +; ALL: # %bb.0: ; ALL-NEXT: clrldi 3, 3, 63 ; ALL-NEXT: neg 3, 3 ; ALL-NEXT: blr @@ -645,7 +645,7 @@ define i8 @sel_constants_ashr_constant(i1 %cond) { define double @sel_constants_fadd_constant(i1 %cond) { ; ISEL-LABEL: sel_constants_fadd_constant: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: addis 4, 2, .LCPI34_0@toc@ha ; ISEL-NEXT: addis 3, 2, .LCPI34_1@toc@ha @@ -656,14 +656,14 @@ define double @sel_constants_fadd_constant(i1 %cond) { ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: sel_constants_fadd_constant: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: addis 4, 2, .LCPI34_0@toc@ha ; NO_ISEL-NEXT: addis 3, 2, .LCPI34_1@toc@ha ; NO_ISEL-NEXT: addi 4, 4, .LCPI34_0@toc@l ; NO_ISEL-NEXT: addi 3, 3, .LCPI34_1@toc@l ; NO_ISEL-NEXT: bc 12, 1, .LBB34_2 -; NO_ISEL-NEXT: # BB#1: +; NO_ISEL-NEXT: # %bb.1: ; NO_ISEL-NEXT: ori 3, 4, 0 ; NO_ISEL-NEXT: b .LBB34_2 ; NO_ISEL-NEXT: .LBB34_2: @@ -676,7 +676,7 @@ define double @sel_constants_fadd_constant(i1 %cond) { define double @sel_constants_fsub_constant(i1 %cond) { ; ISEL-LABEL: sel_constants_fsub_constant: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: addis 4, 2, .LCPI35_0@toc@ha ; ISEL-NEXT: addis 3, 2, .LCPI35_1@toc@ha @@ -687,14 +687,14 @@ define double @sel_constants_fsub_constant(i1 %cond) { ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: sel_constants_fsub_constant: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: addis 4, 2, .LCPI35_0@toc@ha ; NO_ISEL-NEXT: addis 3, 2, .LCPI35_1@toc@ha ; NO_ISEL-NEXT: addi 4, 4, .LCPI35_0@toc@l ; NO_ISEL-NEXT: addi 3, 3, .LCPI35_1@toc@l ; NO_ISEL-NEXT: bc 12, 1, .LBB35_2 -; NO_ISEL-NEXT: # BB#1: +; NO_ISEL-NEXT: # %bb.1: ; NO_ISEL-NEXT: ori 3, 4, 0 ; NO_ISEL-NEXT: b .LBB35_2 ; NO_ISEL-NEXT: .LBB35_2: @@ -707,7 +707,7 @@ define double @sel_constants_fsub_constant(i1 %cond) { define double @sel_constants_fmul_constant(i1 %cond) { ; ISEL-LABEL: sel_constants_fmul_constant: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: addis 4, 2, .LCPI36_0@toc@ha ; ISEL-NEXT: addis 3, 2, .LCPI36_1@toc@ha @@ -718,14 +718,14 @@ define double @sel_constants_fmul_constant(i1 %cond) { ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: sel_constants_fmul_constant: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: addis 4, 2, .LCPI36_0@toc@ha ; NO_ISEL-NEXT: addis 3, 2, .LCPI36_1@toc@ha ; NO_ISEL-NEXT: addi 4, 4, .LCPI36_0@toc@l ; NO_ISEL-NEXT: addi 3, 3, .LCPI36_1@toc@l ; NO_ISEL-NEXT: bc 12, 1, .LBB36_2 -; NO_ISEL-NEXT: # BB#1: +; NO_ISEL-NEXT: # %bb.1: ; NO_ISEL-NEXT: ori 3, 4, 0 ; NO_ISEL-NEXT: b .LBB36_2 ; NO_ISEL-NEXT: .LBB36_2: @@ -738,7 +738,7 @@ define double @sel_constants_fmul_constant(i1 %cond) { define double @sel_constants_fdiv_constant(i1 %cond) { ; ISEL-LABEL: sel_constants_fdiv_constant: -; ISEL: # BB#0: +; ISEL: # %bb.0: ; ISEL-NEXT: andi. 3, 3, 1 ; ISEL-NEXT: addis 4, 2, .LCPI37_0@toc@ha ; ISEL-NEXT: addis 3, 2, .LCPI37_1@toc@ha @@ -749,14 +749,14 @@ define double @sel_constants_fdiv_constant(i1 %cond) { ; ISEL-NEXT: blr ; ; NO_ISEL-LABEL: sel_constants_fdiv_constant: -; NO_ISEL: # BB#0: +; NO_ISEL: # %bb.0: ; NO_ISEL-NEXT: andi. 3, 3, 1 ; NO_ISEL-NEXT: addis 4, 2, .LCPI37_0@toc@ha ; NO_ISEL-NEXT: addis 3, 2, .LCPI37_1@toc@ha ; NO_ISEL-NEXT: addi 4, 4, .LCPI37_0@toc@l ; NO_ISEL-NEXT: addi 3, 3, .LCPI37_1@toc@l ; NO_ISEL-NEXT: bc 12, 1, .LBB37_2 -; NO_ISEL-NEXT: # BB#1: +; NO_ISEL-NEXT: # %bb.1: ; NO_ISEL-NEXT: ori 3, 4, 0 ; NO_ISEL-NEXT: b .LBB37_2 ; NO_ISEL-NEXT: .LBB37_2: @@ -769,10 +769,10 @@ define double @sel_constants_fdiv_constant(i1 %cond) { define double @sel_constants_frem_constant(i1 %cond) { ; ALL-LABEL: sel_constants_frem_constant: -; ALL: # BB#0: +; ALL: # %bb.0: ; ALL-NEXT: andi. 3, 3, 1 ; ALL-NEXT: bc 12, 1, .LBB38_2 -; ALL-NEXT: # BB#1: +; ALL-NEXT: # %bb.1: ; ALL-NEXT: addis 3, 2, .LCPI38_0@toc@ha ; ALL-NEXT: addi 3, 3, .LCPI38_0@toc@l ; ALL-NEXT: lxsdx 1, 0, 3 diff --git a/test/CodeGen/PowerPC/setcc-logic.ll b/test/CodeGen/PowerPC/setcc-logic.ll index 108a6bb2909..b17869f312c 100644 --- a/test/CodeGen/PowerPC/setcc-logic.ll +++ b/test/CodeGen/PowerPC/setcc-logic.ll @@ -3,7 +3,7 @@ define zeroext i1 @all_bits_clear(i32 %P, i32 %Q) { ; CHECK-LABEL: all_bits_clear: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: or 3, 3, 4 ; CHECK-NEXT: cntlzw 3, 3 ; CHECK-NEXT: srwi 3, 3, 5 @@ -16,7 +16,7 @@ define zeroext i1 @all_bits_clear(i32 %P, i32 %Q) { define zeroext i1 @all_sign_bits_clear(i32 %P, i32 %Q) { ; CHECK-LABEL: all_sign_bits_clear: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: or 3, 3, 4 ; CHECK-NEXT: nor 3, 3, 3 ; CHECK-NEXT: srwi 3, 3, 31 @@ -29,7 +29,7 @@ define zeroext i1 @all_sign_bits_clear(i32 %P, i32 %Q) { define zeroext i1 @all_bits_set(i32 %P, i32 %Q) { ; CHECK-LABEL: all_bits_set: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: li 5, -1 ; CHECK-NEXT: and 3, 3, 4 ; CHECK-NEXT: xor 3, 3, 5 @@ -44,7 +44,7 @@ define zeroext i1 @all_bits_set(i32 %P, i32 %Q) { define zeroext i1 @all_sign_bits_set(i32 %P, i32 %Q) { ; CHECK-LABEL: all_sign_bits_set: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: and 3, 3, 4 ; CHECK-NEXT: srwi 3, 3, 31 ; CHECK-NEXT: blr @@ -56,7 +56,7 @@ define zeroext i1 @all_sign_bits_set(i32 %P, i32 %Q) { define zeroext i1 @any_bits_set(i32 %P, i32 %Q) { ; CHECK-LABEL: any_bits_set: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: or 3, 3, 4 ; CHECK-NEXT: cntlzw 3, 3 ; CHECK-NEXT: srwi 3, 3, 5 @@ -70,7 +70,7 @@ define zeroext i1 @any_bits_set(i32 %P, i32 %Q) { define zeroext i1 @any_sign_bits_set(i32 %P, i32 %Q) { ; CHECK-LABEL: any_sign_bits_set: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: or 3, 3, 4 ; CHECK-NEXT: srwi 3, 3, 31 ; CHECK-NEXT: blr @@ -82,7 +82,7 @@ define zeroext i1 @any_sign_bits_set(i32 %P, i32 %Q) { define zeroext i1 @any_bits_clear(i32 %P, i32 %Q) { ; CHECK-LABEL: any_bits_clear: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: li 5, -1 ; CHECK-NEXT: and 3, 3, 4 ; CHECK-NEXT: xor 3, 3, 5 @@ -98,7 +98,7 @@ define zeroext i1 @any_bits_clear(i32 %P, i32 %Q) { define zeroext i1 @any_sign_bits_clear(i32 %P, i32 %Q) { ; CHECK-LABEL: any_sign_bits_clear: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: and 3, 3, 4 ; CHECK-NEXT: nor 3, 3, 3 ; CHECK-NEXT: srwi 3, 3, 31 @@ -112,10 +112,10 @@ define zeroext i1 @any_sign_bits_clear(i32 %P, i32 %Q) { ; PR3351 - (P == 0) & (Q == 0) -> (P|Q) == 0 define i32 @all_bits_clear_branch(i32* %P, i32* %Q) { ; CHECK-LABEL: all_bits_clear_branch: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: or. 3, 3, 4 ; CHECK-NEXT: bne 0, .LBB8_2 -; CHECK-NEXT: # BB#1: # %bb1 +; CHECK-NEXT: # %bb.1: # %bb1 ; CHECK-NEXT: li 3, 4 ; CHECK-NEXT: blr ; CHECK-NEXT: .LBB8_2: # %return @@ -136,11 +136,11 @@ return: define i32 @all_sign_bits_clear_branch(i32 %P, i32 %Q) { ; CHECK-LABEL: all_sign_bits_clear_branch: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: or 3, 3, 4 ; CHECK-NEXT: cmpwi 0, 3, 0 ; CHECK-NEXT: blt 0, .LBB9_2 -; CHECK-NEXT: # BB#1: # %bb1 +; CHECK-NEXT: # %bb.1: # %bb1 ; CHECK-NEXT: li 3, 4 ; CHECK-NEXT: blr ; CHECK-NEXT: .LBB9_2: # %return @@ -161,11 +161,11 @@ return: define i32 @all_bits_set_branch(i32 %P, i32 %Q) { ; CHECK-LABEL: all_bits_set_branch: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: and 3, 3, 4 ; CHECK-NEXT: cmpwi 0, 3, -1 ; CHECK-NEXT: bne 0, .LBB10_2 -; CHECK-NEXT: # BB#1: # %bb1 +; CHECK-NEXT: # %bb.1: # %bb1 ; CHECK-NEXT: li 3, 4 ; CHECK-NEXT: blr ; CHECK-NEXT: .LBB10_2: # %return @@ -186,11 +186,11 @@ return: define i32 @all_sign_bits_set_branch(i32 %P, i32 %Q) { ; CHECK-LABEL: all_sign_bits_set_branch: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: and 3, 3, 4 ; CHECK-NEXT: cmpwi 0, 3, -1 ; CHECK-NEXT: bgt 0, .LBB11_2 -; CHECK-NEXT: # BB#1: # %bb1 +; CHECK-NEXT: # %bb.1: # %bb1 ; CHECK-NEXT: li 3, 4 ; CHECK-NEXT: blr ; CHECK-NEXT: .LBB11_2: # %return @@ -212,10 +212,10 @@ return: ; PR3351 - (P != 0) | (Q != 0) -> (P|Q) != 0 define i32 @any_bits_set_branch(i32* %P, i32* %Q) { ; CHECK-LABEL: any_bits_set_branch: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: or. 3, 3, 4 ; CHECK-NEXT: beq 0, .LBB12_2 -; CHECK-NEXT: # BB#1: # %bb1 +; CHECK-NEXT: # %bb.1: # %bb1 ; CHECK-NEXT: li 3, 4 ; CHECK-NEXT: blr ; CHECK-NEXT: .LBB12_2: # %return @@ -236,11 +236,11 @@ return: define i32 @any_sign_bits_set_branch(i32 %P, i32 %Q) { ; CHECK-LABEL: any_sign_bits_set_branch: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: or 3, 3, 4 ; CHECK-NEXT: cmpwi 0, 3, -1 ; CHECK-NEXT: bgt 0, .LBB13_2 -; CHECK-NEXT: # BB#1: # %bb1 +; CHECK-NEXT: # %bb.1: # %bb1 ; CHECK-NEXT: li 3, 4 ; CHECK-NEXT: blr ; CHECK-NEXT: .LBB13_2: # %return @@ -261,11 +261,11 @@ return: define i32 @any_bits_clear_branch(i32 %P, i32 %Q) { ; CHECK-LABEL: any_bits_clear_branch: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: and 3, 3, 4 ; CHECK-NEXT: cmpwi 0, 3, -1 ; CHECK-NEXT: beq 0, .LBB14_2 -; CHECK-NEXT: # BB#1: # %bb1 +; CHECK-NEXT: # %bb.1: # %bb1 ; CHECK-NEXT: li 3, 4 ; CHECK-NEXT: blr ; CHECK-NEXT: .LBB14_2: # %return @@ -286,11 +286,11 @@ return: define i32 @any_sign_bits_clear_branch(i32 %P, i32 %Q) { ; CHECK-LABEL: any_sign_bits_clear_branch: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: and 3, 3, 4 ; CHECK-NEXT: cmpwi 0, 3, 0 ; CHECK-NEXT: blt 0, .LBB15_2 -; CHECK-NEXT: # BB#1: # %bb1 +; CHECK-NEXT: # %bb.1: # %bb1 ; CHECK-NEXT: li 3, 4 ; CHECK-NEXT: blr ; CHECK-NEXT: .LBB15_2: # %return @@ -311,7 +311,7 @@ return: define <4 x i1> @all_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) { ; CHECK-LABEL: all_bits_clear_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: xxlxor 36, 36, 36 ; CHECK-NEXT: xxlor 34, 34, 35 ; CHECK-NEXT: vcmpequw 2, 2, 4 @@ -324,7 +324,7 @@ define <4 x i1> @all_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) { define <4 x i1> @all_sign_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) { ; CHECK-LABEL: all_sign_bits_clear_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vspltisb 4, -1 ; CHECK-NEXT: xxlor 34, 34, 35 ; CHECK-NEXT: vcmpgtsw 2, 2, 4 @@ -337,7 +337,7 @@ define <4 x i1> @all_sign_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) { define <4 x i1> @all_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) { ; CHECK-LABEL: all_bits_set_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vspltisb 4, -1 ; CHECK-NEXT: xxland 34, 34, 35 ; CHECK-NEXT: vcmpequw 2, 2, 4 @@ -350,7 +350,7 @@ define <4 x i1> @all_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) { define <4 x i1> @all_sign_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) { ; CHECK-LABEL: all_sign_bits_set_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: xxlxor 36, 36, 36 ; CHECK-NEXT: xxland 34, 34, 35 ; CHECK-NEXT: vcmpgtsw 2, 4, 2 @@ -363,7 +363,7 @@ define <4 x i1> @all_sign_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) { define <4 x i1> @any_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) { ; CHECK-LABEL: any_bits_set_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: xxlxor 36, 36, 36 ; CHECK-NEXT: xxlor 34, 34, 35 ; CHECK-NEXT: vcmpequw 2, 2, 4 @@ -377,7 +377,7 @@ define <4 x i1> @any_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) { define <4 x i1> @any_sign_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) { ; CHECK-LABEL: any_sign_bits_set_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: xxlxor 36, 36, 36 ; CHECK-NEXT: xxlor 34, 34, 35 ; CHECK-NEXT: vcmpgtsw 2, 4, 2 @@ -390,7 +390,7 @@ define <4 x i1> @any_sign_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) { define <4 x i1> @any_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) { ; CHECK-LABEL: any_bits_clear_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vspltisb 4, -1 ; CHECK-NEXT: xxland 34, 34, 35 ; CHECK-NEXT: vcmpequw 2, 2, 4 @@ -404,7 +404,7 @@ define <4 x i1> @any_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) { define <4 x i1> @any_sign_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) { ; CHECK-LABEL: any_sign_bits_clear_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vspltisb 4, -1 ; CHECK-NEXT: xxland 34, 34, 35 ; CHECK-NEXT: vcmpgtsw 2, 2, 4 @@ -417,7 +417,7 @@ define <4 x i1> @any_sign_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) { define zeroext i1 @ne_neg1_and_ne_zero(i64 %x) { ; CHECK-LABEL: ne_neg1_and_ne_zero: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: li 4, 1 ; CHECK-NEXT: addi 3, 3, 1 ; CHECK-NEXT: subfc 3, 3, 4 @@ -434,7 +434,7 @@ define zeroext i1 @ne_neg1_and_ne_zero(i64 %x) { define zeroext i1 @and_eq(i16 zeroext %a, i16 zeroext %b, i16 zeroext %c, i16 zeroext %d) { ; CHECK-LABEL: and_eq: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: xor 5, 5, 6 ; CHECK-NEXT: xor 3, 3, 4 ; CHECK-NEXT: or 3, 3, 5 @@ -449,7 +449,7 @@ define zeroext i1 @and_eq(i16 zeroext %a, i16 zeroext %b, i16 zeroext %c, i16 z define zeroext i1 @or_ne(i32 %a, i32 %b, i32 %c, i32 %d) { ; CHECK-LABEL: or_ne: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: xor 5, 5, 6 ; CHECK-NEXT: xor 3, 3, 4 ; CHECK-NEXT: or 3, 3, 5 @@ -467,7 +467,7 @@ define zeroext i1 @or_ne(i32 %a, i32 %b, i32 %c, i32 %d) { define <4 x i1> @and_eq_vec(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) { ; CHECK-LABEL: and_eq_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vcmpequw 2, 2, 3 ; CHECK-NEXT: vcmpequw 19, 4, 5 ; CHECK-NEXT: xxland 34, 34, 51 diff --git a/test/CodeGen/PowerPC/setcc-to-sub.ll b/test/CodeGen/PowerPC/setcc-to-sub.ll index 752ebe0c9d8..a143d73c7c0 100644 --- a/test/CodeGen/PowerPC/setcc-to-sub.ll +++ b/test/CodeGen/PowerPC/setcc-to-sub.ll @@ -8,7 +8,7 @@ ; Function Attrs: norecurse nounwind readonly define zeroext i1 @test1(%class.PB2* %s_a, %class.PB2* %s_b) local_unnamed_addr #0 { ; CHECK-LABEL: test1: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: lwz 3, 0(3) ; CHECK-NEXT: lwz 4, 0(4) ; CHECK-NEXT: rlwinm 3, 3, 0, 28, 28 @@ -30,7 +30,7 @@ entry: ; Function Attrs: norecurse nounwind readonly define zeroext i1 @test2(%class.PB2* %s_a, %class.PB2* %s_b) local_unnamed_addr #0 { ; CHECK-LABEL: test2: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: lwz 3, 0(3) ; CHECK-NEXT: lwz 4, 0(4) ; CHECK-NEXT: rlwinm 3, 3, 0, 28, 28 @@ -53,7 +53,7 @@ entry: ; Function Attrs: norecurse nounwind readonly define zeroext i1 @test3(%class.PB2* %s_a, %class.PB2* %s_b) local_unnamed_addr #0 { ; CHECK-LABEL: test3: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: lwz 3, 0(3) ; CHECK-NEXT: lwz 4, 0(4) ; CHECK-NEXT: rlwinm 3, 3, 0, 28, 28 @@ -75,7 +75,7 @@ entry: ; Function Attrs: norecurse nounwind readonly define zeroext i1 @test4(%class.PB2* %s_a, %class.PB2* %s_b) local_unnamed_addr #0 { ; CHECK-LABEL: test4: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: lwz 3, 0(3) ; CHECK-NEXT: lwz 4, 0(4) ; CHECK-NEXT: rlwinm 3, 3, 0, 28, 28 diff --git a/test/CodeGen/PowerPC/shift_mask.ll b/test/CodeGen/PowerPC/shift_mask.ll index e9ca9b0bdf0..59382c61531 100644 --- a/test/CodeGen/PowerPC/shift_mask.ll +++ b/test/CodeGen/PowerPC/shift_mask.ll @@ -4,7 +4,7 @@ target triple = "powerpc64le-linux-gnu" define i8 @test000(i8 %a, i8 %b) { ; CHECK-LABEL: test000: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: rlwinm 4, 4, 0, 29, 31 ; CHECK-NEXT: slw 3, 3, 4 ; CHECK-NEXT: blr @@ -15,7 +15,7 @@ define i8 @test000(i8 %a, i8 %b) { define i16 @test001(i16 %a, i16 %b) { ; CHECK-LABEL: test001: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: rlwinm 4, 4, 0, 28, 31 ; CHECK-NEXT: slw 3, 3, 4 ; CHECK-NEXT: blr @@ -26,7 +26,7 @@ define i16 @test001(i16 %a, i16 %b) { define i32 @test002(i32 %a, i32 %b) { ; CHECK-LABEL: test002: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: rlwinm 4, 4, 0, 27, 31 ; CHECK-NEXT: slw 3, 3, 4 ; CHECK-NEXT: blr @@ -37,7 +37,7 @@ define i32 @test002(i32 %a, i32 %b) { define i64 @test003(i64 %a, i64 %b) { ; CHECK-LABEL: test003: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: rlwinm 4, 4, 0, 26, 31 ; CHECK-NEXT: sld 3, 3, 4 ; CHECK-NEXT: blr @@ -48,7 +48,7 @@ define i64 @test003(i64 %a, i64 %b) { define <16 x i8> @test010(<16 x i8> %a, <16 x i8> %b) { ; CHECK-LABEL: test010: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vslb 2, 2, 3 ; CHECK-NEXT: blr %rem = and <16 x i8> %b, @@ -58,7 +58,7 @@ define <16 x i8> @test010(<16 x i8> %a, <16 x i8> %b) { define <8 x i16> @test011(<8 x i16> %a, <8 x i16> %b) { ; CHECK-LABEL: test011: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vslh 2, 2, 3 ; CHECK-NEXT: blr %rem = and <8 x i16> %b, @@ -68,7 +68,7 @@ define <8 x i16> @test011(<8 x i16> %a, <8 x i16> %b) { define <4 x i32> @test012(<4 x i32> %a, <4 x i32> %b) { ; CHECK-LABEL: test012: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vslw 2, 2, 3 ; CHECK-NEXT: blr %rem = and <4 x i32> %b, @@ -78,7 +78,7 @@ define <4 x i32> @test012(<4 x i32> %a, <4 x i32> %b) { define <2 x i64> @test013(<2 x i64> %a, <2 x i64> %b) { ; CHECK-LABEL: test013: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vsld 2, 2, 3 ; CHECK-NEXT: blr %rem = and <2 x i64> %b, @@ -88,7 +88,7 @@ define <2 x i64> @test013(<2 x i64> %a, <2 x i64> %b) { define i8 @test100(i8 %a, i8 %b) { ; CHECK-LABEL: test100: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: rlwinm 3, 3, 0, 24, 31 ; CHECK-NEXT: rlwinm 4, 4, 0, 29, 31 ; CHECK-NEXT: srw 3, 3, 4 @@ -100,7 +100,7 @@ define i8 @test100(i8 %a, i8 %b) { define i16 @test101(i16 %a, i16 %b) { ; CHECK-LABEL: test101: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: rlwinm 3, 3, 0, 16, 31 ; CHECK-NEXT: rlwinm 4, 4, 0, 28, 31 ; CHECK-NEXT: srw 3, 3, 4 @@ -112,7 +112,7 @@ define i16 @test101(i16 %a, i16 %b) { define i32 @test102(i32 %a, i32 %b) { ; CHECK-LABEL: test102: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: rlwinm 4, 4, 0, 27, 31 ; CHECK-NEXT: srw 3, 3, 4 ; CHECK-NEXT: blr @@ -123,7 +123,7 @@ define i32 @test102(i32 %a, i32 %b) { define i64 @test103(i64 %a, i64 %b) { ; CHECK-LABEL: test103: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: rlwinm 4, 4, 0, 26, 31 ; CHECK-NEXT: srd 3, 3, 4 ; CHECK-NEXT: blr @@ -134,7 +134,7 @@ define i64 @test103(i64 %a, i64 %b) { define <16 x i8> @test110(<16 x i8> %a, <16 x i8> %b) { ; CHECK-LABEL: test110: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vsrb 2, 2, 3 ; CHECK-NEXT: blr %rem = and <16 x i8> %b, @@ -144,7 +144,7 @@ define <16 x i8> @test110(<16 x i8> %a, <16 x i8> %b) { define <8 x i16> @test111(<8 x i16> %a, <8 x i16> %b) { ; CHECK-LABEL: test111: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vsrh 2, 2, 3 ; CHECK-NEXT: blr %rem = and <8 x i16> %b, @@ -154,7 +154,7 @@ define <8 x i16> @test111(<8 x i16> %a, <8 x i16> %b) { define <4 x i32> @test112(<4 x i32> %a, <4 x i32> %b) { ; CHECK-LABEL: test112: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vsrw 2, 2, 3 ; CHECK-NEXT: blr %rem = and <4 x i32> %b, @@ -164,7 +164,7 @@ define <4 x i32> @test112(<4 x i32> %a, <4 x i32> %b) { define <2 x i64> @test113(<2 x i64> %a, <2 x i64> %b) { ; CHECK-LABEL: test113: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vsrd 2, 2, 3 ; CHECK-NEXT: blr %rem = and <2 x i64> %b, @@ -174,7 +174,7 @@ define <2 x i64> @test113(<2 x i64> %a, <2 x i64> %b) { define i8 @test200(i8 %a, i8 %b) { ; CHECK-LABEL: test200: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: extsb 3, 3 ; CHECK-NEXT: rlwinm 4, 4, 0, 29, 31 ; CHECK-NEXT: sraw 3, 3, 4 @@ -186,7 +186,7 @@ define i8 @test200(i8 %a, i8 %b) { define i16 @test201(i16 %a, i16 %b) { ; CHECK-LABEL: test201: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: extsh 3, 3 ; CHECK-NEXT: rlwinm 4, 4, 0, 28, 31 ; CHECK-NEXT: sraw 3, 3, 4 @@ -198,7 +198,7 @@ define i16 @test201(i16 %a, i16 %b) { define i32 @test202(i32 %a, i32 %b) { ; CHECK-LABEL: test202: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: rlwinm 4, 4, 0, 27, 31 ; CHECK-NEXT: sraw 3, 3, 4 ; CHECK-NEXT: blr @@ -209,7 +209,7 @@ define i32 @test202(i32 %a, i32 %b) { define i64 @test203(i64 %a, i64 %b) { ; CHECK-LABEL: test203: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: rlwinm 4, 4, 0, 26, 31 ; CHECK-NEXT: srad 3, 3, 4 ; CHECK-NEXT: blr @@ -220,7 +220,7 @@ define i64 @test203(i64 %a, i64 %b) { define <16 x i8> @test210(<16 x i8> %a, <16 x i8> %b) { ; CHECK-LABEL: test210: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vsrab 2, 2, 3 ; CHECK-NEXT: blr %rem = and <16 x i8> %b, @@ -230,7 +230,7 @@ define <16 x i8> @test210(<16 x i8> %a, <16 x i8> %b) { define <8 x i16> @test211(<8 x i16> %a, <8 x i16> %b) { ; CHECK-LABEL: test211: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vsrah 2, 2, 3 ; CHECK-NEXT: blr %rem = and <8 x i16> %b, @@ -240,7 +240,7 @@ define <8 x i16> @test211(<8 x i16> %a, <8 x i16> %b) { define <4 x i32> @test212(<4 x i32> %a, <4 x i32> %b) { ; CHECK-LABEL: test212: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vsraw 2, 2, 3 ; CHECK-NEXT: blr %rem = and <4 x i32> %b, @@ -250,7 +250,7 @@ define <4 x i32> @test212(<4 x i32> %a, <4 x i32> %b) { define <2 x i64> @test213(<2 x i64> %a, <2 x i64> %b) { ; CHECK-LABEL: test213: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vsrad 2, 2, 3 ; CHECK-NEXT: blr %rem = and <2 x i64> %b, diff --git a/test/CodeGen/PowerPC/sjlj.ll b/test/CodeGen/PowerPC/sjlj.ll index 14aec583891..68b53417f01 100644 --- a/test/CodeGen/PowerPC/sjlj.ll +++ b/test/CodeGen/PowerPC/sjlj.ll @@ -77,7 +77,7 @@ return: ; preds = %if.end, %if.then ; CHECK: bcl 20, 31, .LBB1_3 ; CHECK: li 3, 1 ; CHECK: #EH_SjLj_Setup .LBB1_3 -; CHECK: # BB#1: +; CHECK: # %bb.1: ; CHECK: .LBB1_3: ; CHECK: mflr [[REGL:[0-9]+]] diff --git a/test/CodeGen/PowerPC/tail-dup-branch-to-fallthrough.ll b/test/CodeGen/PowerPC/tail-dup-branch-to-fallthrough.ll index 0b101457161..3ff4753200e 100644 --- a/test/CodeGen/PowerPC/tail-dup-branch-to-fallthrough.ll +++ b/test/CodeGen/PowerPC/tail-dup-branch-to-fallthrough.ll @@ -12,17 +12,17 @@ declare void @f4() ; Function Attrs: nounwind ; CHECK-LABEL: tail_dup_fallthrough_with_branch -; CHECK: # %entry +; CHECK: # %bb.{{[0-9]+}}: # %entry ; CHECK-NOT: # %{{[-_a-zA-Z0-9]+}} -; CHECK: # %entry +; CHECK: # %bb.{{[0-9]+}}: # %entry ; CHECK-NOT: # %{{[-_a-zA-Z0-9]+}} -; CHECK: # %sw.0 +; CHECK: # %bb.{{[0-9]+}}: # %sw.0 ; CHECK-NOT: # %{{[-_a-zA-Z0-9]+}} ; CHECK: # %sw.1 ; CHECK-NOT: # %{{[-_a-zA-Z0-9]+}} ; CHECK: # %sw.default ; CHECK-NOT: # %{{[-_a-zA-Z0-9]+}} -; CHECK: # %if.then +; CHECK: # %bb.{{[0-9]+}}: # %if.then ; CHECK-NOT: # %{{[-_a-zA-Z0-9]+}} ; CHECK: # %if.else ; CHECK-NOT: # %{{[-_a-zA-Z0-9]+}} diff --git a/test/CodeGen/PowerPC/tail-dup-layout.ll b/test/CodeGen/PowerPC/tail-dup-layout.ll index 9665901e874..badeed5b30a 100644 --- a/test/CodeGen/PowerPC/tail-dup-layout.ll +++ b/test/CodeGen/PowerPC/tail-dup-layout.ll @@ -278,7 +278,7 @@ exit: ;CHECK: addi ;CHECK: .[[CHECKLABEL:[._0-9A-Za-z]+]]: # %for.check ;CHECK: lwz [[TAGREG:[0-9]+]], 0([[TAGPTRREG]]) -;CHECK: # %test1 +;CHECK: # %bb.{{[0-9]+}}: # %test1 ;CHECK: andi. {{[0-9]+}}, [[TAGREG]], 1 ;CHECK-NEXT: bc 12, 1, .[[OPT1LABEL:[._0-9A-Za-z]+]] ;CHECK-NEXT: # %test2 @@ -366,12 +366,12 @@ exit: ; code is independent of the outlining code, which works by choosing the ; "unavoidable" blocks. ; CHECK-LABEL: avoidable_test: -; CHECK: # %entry +; CHECK: # %bb.{{[0-9]+}}: # %entry ; CHECK: andi. -; CHECK: # %test2 +; CHECK: # %bb.{{[0-9]+}}: # %test2 ; Make sure then2 falls through from test2 ; CHECK-NOT: # %{{[-_a-zA-Z0-9]+}} -; CHECK: # %then2 +; CHECK: # %bb.{{[0-9]+}}: # %then2 ; CHECK: rlwinm. {{[0-9]+}}, {{[0-9]+}}, 0, 29, 29 ; CHECK: # %else1 ; CHECK: bl a @@ -420,8 +420,8 @@ end1: ; The f;g->h;i trellis should be resolved as f->i;g->h. ; The h;i->j;ret trellis contains a triangle edge, and should be resolved as ; h->j->ret -; CHECK: # %entry -; CHECK: # %c10 +; CHECK: # %bb.{{[0-9]+}}: # %entry +; CHECK: # %bb.{{[0-9]+}}: # %c10 ; CHECK: # %e9 ; CHECK: # %g10 ; CHECK: # %h10 @@ -504,8 +504,8 @@ ret: ; checking, it's profitable to duplicate G into F. The weights here are not ; really important. They are there to help make the test stable. ; CHECK-LABEL: trellis_then_dup_test -; CHECK: # %entry -; CHECK: # %b +; CHECK: # %bb.{{[0-9]+}}: # %entry +; CHECK: # %bb.{{[0-9]+}}: # %b ; CHECK: # %d ; CHECK: # %g ; CHECK: # %ret1 @@ -568,8 +568,8 @@ ret: ; Verify that we did not mis-identify triangle trellises if it is not ; really a triangle. ; CHECK-LABEL: trellis_no_triangle -; CHECK: # %entry -; CHECK: # %b +; CHECK: # %bb.{{[0-9]+}}: # %entry +; CHECK: # %bb.{{[0-9]+}}: # %b ; CHECK: # %d ; CHECK: # %ret ; CHECK: # %c diff --git a/test/CodeGen/PowerPC/testBitReverse.ll b/test/CodeGen/PowerPC/testBitReverse.ll index 1508af9e4d0..22fefe45468 100644 --- a/test/CodeGen/PowerPC/testBitReverse.ll +++ b/test/CodeGen/PowerPC/testBitReverse.ll @@ -4,7 +4,7 @@ declare i32 @llvm.bitreverse.i32(i32) define i32 @testBitReverseIntrinsicI32(i32 %arg) { ; CHECK-LABEL: testBitReverseIntrinsicI32: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: lis 4, -21846 ; CHECK-NEXT: lis 5, 21845 ; CHECK-NEXT: slwi 6, 3, 1 @@ -44,7 +44,7 @@ define i32 @testBitReverseIntrinsicI32(i32 %arg) { declare i64 @llvm.bitreverse.i64(i64) define i64 @testBitReverseIntrinsicI64(i64 %arg) { ; CHECK-LABEL: testBitReverseIntrinsicI64: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: lis 4, -21846 ; CHECK-NEXT: lis 5, 21845 ; CHECK-NEXT: lis 6, -13108 diff --git a/test/CodeGen/PowerPC/testComparesi32gtu.ll b/test/CodeGen/PowerPC/testComparesi32gtu.ll index 1d0cee72823..4341b59390e 100644 --- a/test/CodeGen/PowerPC/testComparesi32gtu.ll +++ b/test/CodeGen/PowerPC/testComparesi32gtu.ll @@ -11,7 +11,7 @@ declare signext i32 @fn2(...) local_unnamed_addr #1 ; Function Attrs: nounwind define i32 @testCompare1(%struct.tree_common* nocapture readonly %arg1) { ; CHECK-LABEL: testCompare1: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: lbz r3, 0(r3) ; CHECK-DAG: clrlwi r3, r3, 31 ; CHECK-DAG: clrldi r3, r3, 32 @@ -35,7 +35,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @testCompare2(i32 zeroext %a, i32 zeroext %b) { ; CHECK-LABEL: testCompare2: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-DAG: rlwinm r3, r3, 0, 31, 31 ; CHECK-DAG: rlwinm r4, r4, 0, 31, 31 ; CHECK-DAG: clrldi r3, r3, 32 diff --git a/test/CodeGen/PowerPC/testComparesi32leu.ll b/test/CodeGen/PowerPC/testComparesi32leu.ll index 65a75dacbeb..3ba967b51da 100644 --- a/test/CodeGen/PowerPC/testComparesi32leu.ll +++ b/test/CodeGen/PowerPC/testComparesi32leu.ll @@ -8,7 +8,7 @@ define signext i32 @test(i8 zeroext %a, i8 zeroext %b) { ; CHECK-LABEL: test: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: rlwinm r3, r3, 0, 31, 31 ; CHECK-NEXT: rlwinm r4, r4, 0, 31, 31 ; CHECK-NEXT: clrldi r3, r3, 32 diff --git a/test/CodeGen/PowerPC/testComparesi32ltu.ll b/test/CodeGen/PowerPC/testComparesi32ltu.ll index fb6b3f88bb5..9623a63e9bc 100644 --- a/test/CodeGen/PowerPC/testComparesi32ltu.ll +++ b/test/CodeGen/PowerPC/testComparesi32ltu.ll @@ -11,7 +11,7 @@ declare signext i32 @fn2(...) local_unnamed_addr #1 ; Function Attrs: nounwind define i32 @testCompare1(%struct.tree_common* nocapture readonly %arg1) { ; CHECK-LABEL: testCompare1: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: lbz r3, 0(r3) ; CHECK-DAG: clrlwi r3, r3, 31 ; CHECK-DAG: clrldi r3, r3, 32 @@ -35,7 +35,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @testCompare2(i32 zeroext %a, i32 zeroext %b) { ; CHECK-LABEL: testCompare2: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-DAG: rlwinm r3, r3, 0, 31, 31 ; CHECK-DAG: rlwinm r4, r4, 0, 31, 31 ; CHECK-DAG: clrldi r3, r3, 32 diff --git a/test/CodeGen/PowerPC/testComparesieqsc.ll b/test/CodeGen/PowerPC/testComparesieqsc.ll index e65abd317f4..aa0211ebb65 100644 --- a/test/CodeGen/PowerPC/testComparesieqsc.ll +++ b/test/CodeGen/PowerPC/testComparesieqsc.ll @@ -12,7 +12,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_ieqsc(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_ieqsc: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -26,7 +26,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_ieqsc_sext(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_ieqsc_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -41,7 +41,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_ieqsc_z(i8 signext %a) { ; CHECK-LABEL: test_ieqsc_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: blr @@ -54,7 +54,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_ieqsc_sext_z(i8 signext %a) { ; CHECK-LABEL: test_ieqsc_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: neg r3, r3 @@ -68,7 +68,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_ieqsc_store(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_ieqsc_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -86,7 +86,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_ieqsc_sext_store(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_ieqsc_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 @@ -105,7 +105,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_ieqsc_z_store(i8 signext %a) { ; CHECK-LABEL: test_ieqsc_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -122,7 +122,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_ieqsc_sext_z_store(i8 signext %a) { ; CHECK-LABEL: test_ieqsc_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) diff --git a/test/CodeGen/PowerPC/testComparesieqsi.ll b/test/CodeGen/PowerPC/testComparesieqsi.ll index 81b28ac6fe5..0a6b7b9ca35 100644 --- a/test/CodeGen/PowerPC/testComparesieqsi.ll +++ b/test/CodeGen/PowerPC/testComparesieqsi.ll @@ -12,7 +12,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_ieqsi(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_ieqsi: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -26,7 +26,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_ieqsi_sext(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_ieqsi_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -41,7 +41,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_ieqsi_z(i32 signext %a) { ; CHECK-LABEL: test_ieqsi_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: blr @@ -54,7 +54,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_ieqsi_sext_z(i32 signext %a) { ; CHECK-LABEL: test_ieqsi_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: neg r3, r3 @@ -68,7 +68,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_ieqsi_store(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_ieqsi_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -86,7 +86,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_ieqsi_sext_store(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_ieqsi_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 @@ -105,7 +105,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_ieqsi_z_store(i32 signext %a) { ; CHECK-LABEL: test_ieqsi_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -122,7 +122,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_ieqsi_sext_z_store(i32 signext %a) { ; CHECK-LABEL: test_ieqsi_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) diff --git a/test/CodeGen/PowerPC/testComparesieqsll.ll b/test/CodeGen/PowerPC/testComparesieqsll.ll index bedd0ed9c97..1dae985c36c 100644 --- a/test/CodeGen/PowerPC/testComparesieqsll.ll +++ b/test/CodeGen/PowerPC/testComparesieqsll.ll @@ -12,7 +12,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_ieqsll(i64 %a, i64 %b) { ; CHECK-LABEL: test_ieqsll: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzd r3, r3 ; CHECK-NEXT: rldicl r3, r3, 58, 63 @@ -26,7 +26,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_ieqsll_sext(i64 %a, i64 %b) { ; CHECK-LABEL: test_ieqsll_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addic r3, r3, -1 ; CHECK-NEXT: subfe r3, r3, r3 @@ -40,7 +40,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_ieqsll_z(i64 %a) { ; CHECK-LABEL: test_ieqsll_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzd r3, r3 ; CHECK-NEXT: rldicl r3, r3, 58, 63 ; CHECK-NEXT: blr @@ -53,7 +53,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_ieqsll_sext_z(i64 %a) { ; CHECK-LABEL: test_ieqsll_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addic r3, r3, -1 ; CHECK-NEXT: subfe r3, r3, r3 ; CHECK-NEXT: blr @@ -66,7 +66,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_ieqsll_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_ieqsll_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -84,7 +84,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_ieqsll_sext_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_ieqsll_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -102,7 +102,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_ieqsll_z_store(i64 %a) { ; CHECK-LABEL: test_ieqsll_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzd r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -119,7 +119,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_ieqsll_sext_z_store(i64 %a) { ; CHECK-LABEL: test_ieqsll_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: addic r3, r3, -1 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) diff --git a/test/CodeGen/PowerPC/testComparesieqss.ll b/test/CodeGen/PowerPC/testComparesieqss.ll index 66a1fa814b9..93a92e17807 100644 --- a/test/CodeGen/PowerPC/testComparesieqss.ll +++ b/test/CodeGen/PowerPC/testComparesieqss.ll @@ -12,7 +12,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_ieqss(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_ieqss: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -26,7 +26,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_ieqss_sext(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_ieqss_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -41,7 +41,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_ieqss_z(i16 signext %a) { ; CHECK-LABEL: test_ieqss_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: blr @@ -54,7 +54,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_ieqss_sext_z(i16 signext %a) { ; CHECK-LABEL: test_ieqss_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: neg r3, r3 @@ -68,7 +68,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_ieqss_store(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_ieqss_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -86,7 +86,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_ieqss_sext_store(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_ieqss_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 @@ -105,7 +105,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_ieqss_z_store(i16 signext %a) { ; CHECK-LABEL: test_ieqss_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -122,7 +122,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_ieqss_sext_z_store(i16 signext %a) { ; CHECK-LABEL: test_ieqss_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) diff --git a/test/CodeGen/PowerPC/testComparesiequc.ll b/test/CodeGen/PowerPC/testComparesiequc.ll index 2616ab56d8d..592f7bc83bb 100644 --- a/test/CodeGen/PowerPC/testComparesiequc.ll +++ b/test/CodeGen/PowerPC/testComparesiequc.ll @@ -12,7 +12,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iequc(i8 zeroext %a, i8 zeroext %b) { ; CHECK-LABEL: test_iequc: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -26,7 +26,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iequc_sext(i8 zeroext %a, i8 zeroext %b) { ; CHECK-LABEL: test_iequc_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -41,7 +41,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iequc_z(i8 zeroext %a) { ; CHECK-LABEL: test_iequc_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: blr @@ -54,7 +54,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iequc_sext_z(i8 zeroext %a) { ; CHECK-LABEL: test_iequc_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: neg r3, r3 @@ -68,7 +68,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_iequc_store(i8 zeroext %a, i8 zeroext %b) { ; CHECK-LABEL: test_iequc_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -86,7 +86,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_iequc_sext_store(i8 zeroext %a, i8 zeroext %b) { ; CHECK-LABEL: test_iequc_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 @@ -105,7 +105,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_iequc_z_store(i8 zeroext %a) { ; CHECK-LABEL: test_iequc_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -122,7 +122,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_iequc_sext_z_store(i8 zeroext %a) { ; CHECK-LABEL: test_iequc_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) diff --git a/test/CodeGen/PowerPC/testComparesiequi.ll b/test/CodeGen/PowerPC/testComparesiequi.ll index a4a1b7635e8..9a639dc5410 100644 --- a/test/CodeGen/PowerPC/testComparesiequi.ll +++ b/test/CodeGen/PowerPC/testComparesiequi.ll @@ -12,7 +12,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iequi(i32 zeroext %a, i32 zeroext %b) { ; CHECK-LABEL: test_iequi: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -26,7 +26,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iequi_sext(i32 zeroext %a, i32 zeroext %b) { ; CHECK-LABEL: test_iequi_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -41,7 +41,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iequi_z(i32 zeroext %a) { ; CHECK-LABEL: test_iequi_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: blr @@ -54,7 +54,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iequi_sext_z(i32 zeroext %a) { ; CHECK-LABEL: test_iequi_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: neg r3, r3 @@ -68,7 +68,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_iequi_store(i32 zeroext %a, i32 zeroext %b) { ; CHECK-LABEL: test_iequi_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -86,7 +86,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_iequi_sext_store(i32 zeroext %a, i32 zeroext %b) { ; CHECK-LABEL: test_iequi_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 @@ -105,7 +105,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_iequi_z_store(i32 zeroext %a) { ; CHECK-LABEL: test_iequi_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -122,7 +122,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_iequi_sext_z_store(i32 zeroext %a) { ; CHECK-LABEL: test_iequi_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) diff --git a/test/CodeGen/PowerPC/testComparesiequll.ll b/test/CodeGen/PowerPC/testComparesiequll.ll index 4d9035813a8..f147478d5ea 100644 --- a/test/CodeGen/PowerPC/testComparesiequll.ll +++ b/test/CodeGen/PowerPC/testComparesiequll.ll @@ -12,7 +12,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iequll(i64 %a, i64 %b) { ; CHECK-LABEL: test_iequll: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzd r3, r3 ; CHECK-NEXT: rldicl r3, r3, 58, 63 @@ -26,7 +26,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iequll_sext(i64 %a, i64 %b) { ; CHECK-LABEL: test_iequll_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addic r3, r3, -1 ; CHECK-NEXT: subfe r3, r3, r3 @@ -40,7 +40,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iequll_z(i64 %a) { ; CHECK-LABEL: test_iequll_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzd r3, r3 ; CHECK-NEXT: rldicl r3, r3, 58, 63 ; CHECK-NEXT: blr @@ -53,7 +53,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iequll_sext_z(i64 %a) { ; CHECK-LABEL: test_iequll_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addic r3, r3, -1 ; CHECK-NEXT: subfe r3, r3, r3 ; CHECK-NEXT: blr @@ -66,7 +66,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_iequll_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_iequll_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -84,7 +84,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_iequll_sext_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_iequll_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -102,7 +102,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_iequll_z_store(i64 %a) { ; CHECK-LABEL: test_iequll_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzd r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -119,7 +119,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_iequll_sext_z_store(i64 %a) { ; CHECK-LABEL: test_iequll_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: addic r3, r3, -1 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) diff --git a/test/CodeGen/PowerPC/testComparesiequs.ll b/test/CodeGen/PowerPC/testComparesiequs.ll index 5d47c38f739..195339ddb2e 100644 --- a/test/CodeGen/PowerPC/testComparesiequs.ll +++ b/test/CodeGen/PowerPC/testComparesiequs.ll @@ -12,7 +12,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iequs(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: test_iequs: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -26,7 +26,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iequs_sext(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: test_iequs_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -41,7 +41,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iequs_z(i16 zeroext %a) { ; CHECK-LABEL: test_iequs_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: blr @@ -54,7 +54,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iequs_sext_z(i16 zeroext %a) { ; CHECK-LABEL: test_iequs_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: neg r3, r3 @@ -68,7 +68,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_iequs_store(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: test_iequs_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -86,7 +86,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_iequs_sext_store(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: test_iequs_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 @@ -105,7 +105,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_iequs_z_store(i16 zeroext %a) { ; CHECK-LABEL: test_iequs_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -122,7 +122,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_iequs_sext_z_store(i16 zeroext %a) { ; CHECK-LABEL: test_iequs_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) diff --git a/test/CodeGen/PowerPC/testComparesigesc.ll b/test/CodeGen/PowerPC/testComparesigesc.ll index 130127bf351..69dd97fc9c4 100644 --- a/test/CodeGen/PowerPC/testComparesigesc.ll +++ b/test/CodeGen/PowerPC/testComparesigesc.ll @@ -9,7 +9,7 @@ define signext i32 @test_igesc(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_igesc: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: xori r3, r3, 1 @@ -22,7 +22,7 @@ entry: define signext i32 @test_igesc_sext(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_igesc_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: addi r3, r3, -1 @@ -35,7 +35,7 @@ entry: define void @test_igesc_store(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_igesc_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -52,7 +52,7 @@ entry: define void @test_igesc_sext_store(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_igesc_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) diff --git a/test/CodeGen/PowerPC/testComparesigesi.ll b/test/CodeGen/PowerPC/testComparesigesi.ll index 018fb940a2c..7efc8ae8245 100644 --- a/test/CodeGen/PowerPC/testComparesigesi.ll +++ b/test/CodeGen/PowerPC/testComparesigesi.ll @@ -9,7 +9,7 @@ define signext i32 @test_igesi(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_igesi: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: xori r3, r3, 1 @@ -22,7 +22,7 @@ entry: define signext i32 @test_igesi_sext(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_igesi_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: addi r3, r3, -1 @@ -35,7 +35,7 @@ entry: define void @test_igesi_store(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_igesi_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -52,7 +52,7 @@ entry: define void @test_igesi_sext_store(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_igesi_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) diff --git a/test/CodeGen/PowerPC/testComparesigesll.ll b/test/CodeGen/PowerPC/testComparesigesll.ll index 8ce71c0fd9c..30efe3da3e9 100644 --- a/test/CodeGen/PowerPC/testComparesigesll.ll +++ b/test/CodeGen/PowerPC/testComparesigesll.ll @@ -9,7 +9,7 @@ define signext i32 @test_igesll(i64 %a, i64 %b) { ; CHECK-LABEL: test_igesll: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi r5, r3, 63 ; CHECK-NEXT: rldicl r6, r4, 1, 63 ; CHECK-NEXT: subfc r3, r4, r3 @@ -23,7 +23,7 @@ entry: define signext i32 @test_igesll_sext(i64 %a, i64 %b) { ; CHECK-LABEL: test_igesll_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi r5, r3, 63 ; CHECK-NEXT: rldicl r6, r4, 1, 63 ; CHECK-NEXT: subfc r3, r4, r3 @@ -38,7 +38,7 @@ entry: define signext i32 @test_igesll_z(i64 %a) { ; CHECK-LABEL: test_igesll_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: xori r3, r3, 1 ; CHECK-NEXT: blr @@ -50,7 +50,7 @@ entry: define signext i32 @test_igesll_sext_z(i64 %a) { ; CHECK-LABEL: test_igesll_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi r3, r3, 63 ; CHECK-NEXT: not r3, r3 ; CHECK-NEXT: blr @@ -62,7 +62,7 @@ entry: define void @test_igesll_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_igesll_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sradi r6, r3, 63 ; CHECK: subfc r3, r4, r3 ; CHECK: rldicl r3, r4, 1, 63 @@ -78,7 +78,7 @@ entry: define void @test_igesll_sext_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_igesll_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi r6, r3, 63 ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: subfc r3, r4, r3 @@ -97,7 +97,7 @@ entry: define void @test_igesll_z_store(i64 %a) { ; CHECK-LABEL: test_igesll_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -113,7 +113,7 @@ entry: define void @test_igesll_sext_z_store(i64 %a) { ; CHECK-LABEL: test_igesll_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: sradi r3, r3, 63 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) diff --git a/test/CodeGen/PowerPC/testComparesigess.ll b/test/CodeGen/PowerPC/testComparesigess.ll index 8773d423a58..231a26c916d 100644 --- a/test/CodeGen/PowerPC/testComparesigess.ll +++ b/test/CodeGen/PowerPC/testComparesigess.ll @@ -9,7 +9,7 @@ define signext i32 @test_igess(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_igess: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: xori r3, r3, 1 @@ -22,7 +22,7 @@ entry: define signext i32 @test_igess_sext(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_igess_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: addi r3, r3, -1 @@ -35,7 +35,7 @@ entry: define void @test_igess_store(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_igess_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -52,7 +52,7 @@ entry: define void @test_igess_sext_store(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_igess_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) diff --git a/test/CodeGen/PowerPC/testComparesigtsc.ll b/test/CodeGen/PowerPC/testComparesigtsc.ll index 9af61599398..8009043c45d 100644 --- a/test/CodeGen/PowerPC/testComparesigtsc.ll +++ b/test/CodeGen/PowerPC/testComparesigtsc.ll @@ -10,7 +10,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_igtsc(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_igtsc: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub [[REG:r[0-9]+]], r4, r3 ; CHECK-NEXT: rldicl r3, [[REG]], 1, 63 ; CHECK-NEXT: blr @@ -23,7 +23,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_igtsc_sext(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_igtsc_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub [[REG:r[0-9]+]], r4, r3 ; CHECK-NEXT: sradi r3, [[REG]], 63 ; CHECK-NEXT: blr @@ -37,7 +37,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_igtsc_z(i8 signext %a) { ; CHECK-LABEL: test_igtsc_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: neg r3, r3 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: blr @@ -62,7 +62,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_igtsc_store(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_igtsc_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sub [[REG:r[0-9]+]], r4, r3 ; CHECK: rldicl {{r[0-9]+}}, [[REG]], 1, 63 entry: @@ -75,7 +75,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_igtsc_sext_store(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_igtsc_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sub [[REG:r[0-9]+]], r4, r3 ; CHECK: sradi {{r[0-9]+}}, [[REG]], 63 entry: @@ -89,7 +89,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_igtsc_z_store(i8 signext %a) { ; CHECK-LABEL: test_igtsc_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: neg r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) diff --git a/test/CodeGen/PowerPC/testComparesigtsi.ll b/test/CodeGen/PowerPC/testComparesigtsi.ll index f7a32c9c49b..77dfc3583f1 100644 --- a/test/CodeGen/PowerPC/testComparesigtsi.ll +++ b/test/CodeGen/PowerPC/testComparesigtsi.ll @@ -10,7 +10,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_igtsi(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_igtsi: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub [[REG:r[0-9]+]], r4, r3 ; CHECK-NEXT: rldicl r3, [[REG]], 1, 63 ; CHECK-NEXT: blr @@ -23,7 +23,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_igtsi_sext(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_igtsi_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub [[REG:r[0-9]+]], r4, r3 ; CHECK-NEXT: sradi r3, [[REG]], 63 ; CHECK-NEXT: blr @@ -37,7 +37,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_igtsi_z(i32 signext %a) { ; CHECK-LABEL: test_igtsi_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: neg r3, r3 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: blr @@ -62,7 +62,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_igtsi_store(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_igtsi_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sub [[REG:r[0-9]+]], r4, r3 ; CHECK: rldicl {{r[0-9]+}}, [[REG]], 1, 63 entry: @@ -75,7 +75,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_igtsi_sext_store(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_igtsi_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sub [[REG:r[0-9]+]], r4, r3 ; CHECK: sradi {{r[0-9]+}}, [[REG]], 63 entry: @@ -89,7 +89,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_igtsi_z_store(i32 signext %a) { ; CHECK-LABEL: test_igtsi_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: neg r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) diff --git a/test/CodeGen/PowerPC/testComparesigtsll.ll b/test/CodeGen/PowerPC/testComparesigtsll.ll index bd681f9e168..75314d708f5 100644 --- a/test/CodeGen/PowerPC/testComparesigtsll.ll +++ b/test/CodeGen/PowerPC/testComparesigtsll.ll @@ -10,7 +10,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_igtsll(i64 %a, i64 %b) { ; CHECK-LABEL: test_igtsll: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi [[REG1:r[0-9]+]], r4, 63 ; CHECK-NEXT: rldicl [[REG2:r[0-9]+]], r3, 1, 63 ; CHECK-NEXT: subfc [[REG3:r[0-9]+]], r3, r4 @@ -26,7 +26,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_igtsll_sext(i64 %a, i64 %b) { ; CHECK-LABEL: test_igtsll_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi [[REG1:r[0-9]+]], r4, 63 ; CHECK-NEXT: rldicl [[REG2:r[0-9]+]], r3, 1, 63 ; CHECK-NEXT: subfc [[REG3:r[0-9]+]], r3, r4 @@ -44,7 +44,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_igtsll_z(i64 %a) { ; CHECK-LABEL: test_igtsll_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addi r4, r3, -1 ; CHECK-NEXT: nor r3, r4, r3 ; CHECK-NEXT: rldicl r3, r3, 1, 63 @@ -70,7 +70,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_igtsll_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_igtsll_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sradi [[REG1:r[0-9]+]], r4, 63 ; CHECK: rldicl [[REG2:r[0-9]+]], r3, 1, 63 ; CHECK-DIAG: subfc [[REG3:r[0-9]+]], r3, r4 @@ -87,7 +87,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_igtsll_sext_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_igtsll_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sradi [[REG1:r[0-9]+]], r4, 63 ; CHECK: rldicl [[REG2:r[0-9]+]], r3, 1, 63 ; CHECK-DIAG: subfc [[REG3:r[0-9]+]], r3, r4 @@ -105,7 +105,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_igtsll_z_store(i64 %a) { ; CHECK-LABEL: test_igtsll_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: addi r5, r3, -1 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) diff --git a/test/CodeGen/PowerPC/testComparesigtss.ll b/test/CodeGen/PowerPC/testComparesigtss.ll index 65ea0b58e78..23ddbe30f7e 100644 --- a/test/CodeGen/PowerPC/testComparesigtss.ll +++ b/test/CodeGen/PowerPC/testComparesigtss.ll @@ -10,7 +10,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_igtss(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_igtss: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub [[REG1:r[0-9]+]], r4, r3 ; CHECK-NEXT: rldicl r3, [[REG1]], 1, 63 ; CHECK-NEXT: blr @@ -23,7 +23,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_igtss_sext(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_igtss_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub [[REG:r[0-9]+]], r4, r3 ; CHECK-NEXT: sradi r3, [[REG]], 63 ; CHECK-NEXT: blr @@ -37,7 +37,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_igtss_z(i16 signext %a) { ; CHECK-LABEL: test_igtss_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: neg r3, r3 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: blr @@ -50,7 +50,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_igtss_sext_z(i16 signext %a) { ; CHECK-LABEL: test_igtss_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: neg [[REG2:r[0-9]+]], r3 ; CHECK-NEXT: sradi r3, [[REG2]], 63 ; CHECK-NEXT: blr @@ -63,7 +63,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_igtss_store(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_igtss_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sub [[REG1:r[0-9]+]], r4, r3 ; CHECK: rldicl {{r[0-9]+}}, [[REG1]], 1, 63 entry: @@ -76,7 +76,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_igtss_sext_store(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_igtss_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sub [[REG:r[0-9]+]], r4, r3 ; CHECK: sradi {{r[0-9]+}}, [[REG]], 63 entry: @@ -90,7 +90,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_igtss_z_store(i16 signext %a) { ; CHECK-LABEL: test_igtss_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: neg r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) diff --git a/test/CodeGen/PowerPC/testComparesilesc.ll b/test/CodeGen/PowerPC/testComparesilesc.ll index b932867ef31..422dc3adc5d 100644 --- a/test/CodeGen/PowerPC/testComparesilesc.ll +++ b/test/CodeGen/PowerPC/testComparesilesc.ll @@ -9,7 +9,7 @@ define signext i32 @test_ilesc(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_ilesc: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: xori r3, r3, 1 @@ -22,7 +22,7 @@ entry: define signext i32 @test_ilesc_sext(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_ilesc_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: addi r3, r3, -1 @@ -35,7 +35,7 @@ entry: define void @test_ilesc_store(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_ilesc_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -52,7 +52,7 @@ entry: define void @test_ilesc_sext_store(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_ilesc_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) diff --git a/test/CodeGen/PowerPC/testComparesilesi.ll b/test/CodeGen/PowerPC/testComparesilesi.ll index 250cbc704c1..72439bd9aa3 100644 --- a/test/CodeGen/PowerPC/testComparesilesi.ll +++ b/test/CodeGen/PowerPC/testComparesilesi.ll @@ -9,7 +9,7 @@ define signext i32 @test_ilesi(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_ilesi: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: xori r3, r3, 1 @@ -22,7 +22,7 @@ entry: define signext i32 @test_ilesi_sext(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_ilesi_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: addi r3, r3, -1 @@ -35,7 +35,7 @@ entry: define void @test_ilesi_store(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_ilesi_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -52,7 +52,7 @@ entry: define void @test_ilesi_sext_store(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_ilesi_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) diff --git a/test/CodeGen/PowerPC/testComparesilesll.ll b/test/CodeGen/PowerPC/testComparesilesll.ll index 8c23dcd95cd..21b67664c30 100644 --- a/test/CodeGen/PowerPC/testComparesilesll.ll +++ b/test/CodeGen/PowerPC/testComparesilesll.ll @@ -9,7 +9,7 @@ define signext i32 @test_ilesll(i64 %a, i64 %b) { ; CHECK-LABEL: test_ilesll: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi r5, r4, 63 ; CHECK-NEXT: rldicl r6, r3, 1, 63 ; CHECK-NEXT: subfc r12, r3, r4 @@ -23,7 +23,7 @@ entry: define signext i32 @test_ilesll_sext(i64 %a, i64 %b) { ; CHECK-LABEL: test_ilesll_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi r5, r4, 63 ; CHECK-NEXT: rldicl r6, r3, 1, 63 ; CHECK-NEXT: subfc r12, r3, r4 @@ -38,7 +38,7 @@ entry: define signext i32 @test_ilesll_z(i64 %a) { ; CHECK-LABEL: test_ilesll_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addi r4, r3, -1 ; CHECK-NEXT: or r3, r4, r3 ; CHECK-NEXT: rldicl r3, r3, 1, 63 @@ -51,7 +51,7 @@ entry: define signext i32 @test_ilesll_sext_z(i64 %a) { ; CHECK-LABEL: test_ilesll_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addi r4, r3, -1 ; CHECK-NEXT: or r3, r4, r3 ; CHECK-NEXT: sradi r3, r3, 63 @@ -64,7 +64,7 @@ entry: define void @test_ilesll_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_ilesll_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sradi r6, r4, 63 ; CHECK: subfc r4, r3, r4 ; CHECK: rldicl r3, r3, 1, 63 @@ -80,7 +80,7 @@ entry: define void @test_ilesll_sext_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_ilesll_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sradi r6, r4, 63 ; CHECK-DAG: rldicl r3, r3, 1, 63 ; CHECK-DAG: subfc r4, r3, r4 @@ -97,7 +97,7 @@ entry: define void @test_ilesll_z_store(i64 %a) { ; CHECK-LABEL: test_ilesll_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: addi r5, r3, -1 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -114,7 +114,7 @@ entry: define void @test_ilesll_sext_z_store(i64 %a) { ; CHECK-LABEL: test_ilesll_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: addi r5, r3, -1 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) diff --git a/test/CodeGen/PowerPC/testComparesiless.ll b/test/CodeGen/PowerPC/testComparesiless.ll index 5e4a455990d..c85ff6078e7 100644 --- a/test/CodeGen/PowerPC/testComparesiless.ll +++ b/test/CodeGen/PowerPC/testComparesiless.ll @@ -9,7 +9,7 @@ define signext i32 @test_iless(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_iless: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: xori r3, r3, 1 @@ -22,7 +22,7 @@ entry: define signext i32 @test_iless_sext(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_iless_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: addi r3, r3, -1 @@ -35,7 +35,7 @@ entry: define void @test_iless_store(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_iless_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -52,7 +52,7 @@ entry: define void @test_iless_sext_store(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_iless_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) diff --git a/test/CodeGen/PowerPC/testComparesiltsc.ll b/test/CodeGen/PowerPC/testComparesiltsc.ll index d4f267cc12a..08a023302bd 100644 --- a/test/CodeGen/PowerPC/testComparesiltsc.ll +++ b/test/CodeGen/PowerPC/testComparesiltsc.ll @@ -11,7 +11,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iltsc(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_iltsc: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub [[REG:r[0-9]+]], r3, r4 ; CHECK-NEXT: rldicl r3, [[REG]], 1, 63 ; CHECK-NEXT: blr @@ -24,7 +24,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iltsc_sext(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_iltsc_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub [[REG:r[0-9]+]], r3, r4 ; CHECK-NEXT: sradi r3, [[REG]], 63 ; CHECK-NEXT: blr @@ -48,7 +48,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_iltsc_store(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_iltsc_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sub [[REG:r[0-9]+]], r3, r4 ; CHECK: rldicl {{r[0-9]+}}, [[REG]], 1, 63 entry: @@ -61,7 +61,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_iltsc_sext_store(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_iltsc_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sub [[REG:r[0-9]+]], r3, r4 ; CHECK: sradi {{r[0-9]+}}, [[REG]], 63 entry: diff --git a/test/CodeGen/PowerPC/testComparesiltsi.ll b/test/CodeGen/PowerPC/testComparesiltsi.ll index 191afd20eaa..39f37387f53 100644 --- a/test/CodeGen/PowerPC/testComparesiltsi.ll +++ b/test/CodeGen/PowerPC/testComparesiltsi.ll @@ -11,7 +11,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iltsi(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_iltsi: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub [[REG:r[0-9]+]], r3, r4 ; CHECK-NEXT: rldicl r3, [[REG]], 1, 63 ; CHECK-NEXT: blr @@ -24,7 +24,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iltsi_sext(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_iltsi_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub [[REG:r[0-9]+]], r3, r4 ; CHECK-NEXT: sradi r3, [[REG]], 63 ; CHECK-NEXT: blr @@ -37,7 +37,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iltsi_sext_z(i32 signext %a) { ; CHECK-LABEL: test_iltsi_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: srawi r3, r3, 31 ; CHECK-NEXT: blr entry: @@ -49,7 +49,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_iltsi_store(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_iltsi_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sub [[REG:r[0-9]+]], r3, r4 ; CHECK: rldicl {{r[0-9]+}}, [[REG]], 1, 63 entry: @@ -62,7 +62,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_iltsi_sext_store(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_iltsi_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sub [[REG:r[0-9]+]], r3, r4 ; CHECK: sradi {{r[0-9]+}}, [[REG]], 63 entry: diff --git a/test/CodeGen/PowerPC/testComparesiltsll.ll b/test/CodeGen/PowerPC/testComparesiltsll.ll index a0452954917..4152b8556df 100644 --- a/test/CodeGen/PowerPC/testComparesiltsll.ll +++ b/test/CodeGen/PowerPC/testComparesiltsll.ll @@ -11,7 +11,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iltsll(i64 %a, i64 %b) { ; CHECK-LABEL: test_iltsll: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi [[REG1:r[0-9]+]], r3, 63 ; CHECK-NEXT: rldicl [[REG2:r[0-9]+]], r4, 1, 63 ; CHECK-NEXT: subfc [[REG3:r[0-9]+]], r4, r3 @@ -27,7 +27,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iltsll_sext(i64 %a, i64 %b) { ; CHECK-LABEL: test_iltsll_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi [[REG1:r[0-9]+]], r3, 63 ; CHECK-NEXT: rldicl [[REG2:r[0-9]+]], r4, 1, 63 ; CHECK-NEXT: subfc [[REG3:r[0-9]+]], r4, r3 @@ -44,7 +44,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iltsll_sext_z(i64 %a) { ; CHECK-LABEL: test_iltsll_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi r3, r3, 63 ; CHECK-NEXT: blr entry: @@ -56,7 +56,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_iltsll_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_iltsll_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sradi [[REG1:r[0-9]+]], r3, 63 ; CHECK: rldicl [[REG2:r[0-9]+]], r4, 1, 63 ; CHECK-DIAG: subfc [[REG3:r[0-9]+]], r4, r3 @@ -73,7 +73,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_iltsll_sext_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_iltsll_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sradi [[REG1:r[0-9]+]], r3, 63 ; CHECK: rldicl [[REG2:r[0-9]+]], r4, 1, 63 ; CHECK-DIAG: subfc [[REG3:r[0-9]+]], r4, r3 diff --git a/test/CodeGen/PowerPC/testComparesiltss.ll b/test/CodeGen/PowerPC/testComparesiltss.ll index 4d66fad13ad..db5a60dfb66 100644 --- a/test/CodeGen/PowerPC/testComparesiltss.ll +++ b/test/CodeGen/PowerPC/testComparesiltss.ll @@ -11,7 +11,7 @@ ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iltss(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_iltss: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub [[REG:r[0-9]+]], r3, r4 ; CHECK-NEXT: rldicl r3, [[REG]], 1, 63 ; CHECK-NEXT: blr @@ -24,7 +24,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define signext i32 @test_iltss_sext(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_iltss_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub [[REG:r[0-9]+]], r3, r4 ; CHECK-NEXT: sradi r3, [[REG]], 63 ; CHECK-NEXT: blr @@ -48,7 +48,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_iltss_store(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_iltss_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sub [[REG:r[0-9]+]], r3, r4 ; CHECK: rldicl {{r[0-9]+}}, [[REG]], 1, 63 entry: @@ -61,7 +61,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_iltss_sext_store(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_iltss_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sub [[REG:r[0-9]+]], r3, r4 ; CHECK: sradi {{r[0-9]+}}, [[REG]], 63 entry: diff --git a/test/CodeGen/PowerPC/testComparesinesll.ll b/test/CodeGen/PowerPC/testComparesinesll.ll index 5f49d2290a6..cccff24c504 100644 --- a/test/CodeGen/PowerPC/testComparesinesll.ll +++ b/test/CodeGen/PowerPC/testComparesinesll.ll @@ -10,7 +10,7 @@ define signext i32 @test_inesll(i64 %a, i64 %b) { ; CHECK-LABEL: test_inesll: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addic r4, r3, -1 ; CHECK-NEXT: subfe r3, r4, r3 @@ -23,7 +23,7 @@ entry: define signext i32 @test_inesll_sext(i64 %a, i64 %b) { ; CHECK-LABEL: test_inesll_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: subfic r3, r3, 0 ; CHECK-NEXT: subfe r3, r3, r3 @@ -36,7 +36,7 @@ entry: define signext i32 @test_inesll_z(i64 %a) { ; CHECK-LABEL: test_inesll_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addic r4, r3, -1 ; CHECK-NEXT: subfe r3, r4, r3 ; CHECK-NEXT: blr @@ -48,7 +48,7 @@ entry: define signext i32 @test_inesll_sext_z(i64 %a) { ; CHECK-LABEL: test_inesll_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subfic r3, r3, 0 ; CHECK-NEXT: subfe r3, r3, r3 ; CHECK-NEXT: blr @@ -60,7 +60,7 @@ entry: define void @test_inesll_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_inesll_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -77,7 +77,7 @@ entry: define void @test_inesll_sext_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_inesll_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -94,7 +94,7 @@ entry: define void @test_inesll_z_store(i64 %a) { ; CHECK-LABEL: test_inesll_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: addic r5, r3, -1 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -110,7 +110,7 @@ entry: define void @test_inesll_sext_z_store(i64 %a) { ; CHECK-LABEL: test_inesll_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: subfic r3, r3, 0 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) diff --git a/test/CodeGen/PowerPC/testComparesineuc.ll b/test/CodeGen/PowerPC/testComparesineuc.ll index 3f99fbcd212..c478041b19e 100644 --- a/test/CodeGen/PowerPC/testComparesineuc.ll +++ b/test/CodeGen/PowerPC/testComparesineuc.ll @@ -9,7 +9,7 @@ define signext i32 @test_ineuc(i8 zeroext %a, i8 zeroext %b) { ; CHECK-LABEL: test_ineuc: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -23,7 +23,7 @@ entry: define signext i32 @test_ineuc_sext(i8 zeroext %a, i8 zeroext %b) { ; CHECK-LABEL: test_ineuc_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -38,7 +38,7 @@ entry: define signext i32 @test_ineuc_z(i8 zeroext %a) { ; CHECK-LABEL: test_ineuc_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: xori r3, r3, 1 @@ -51,7 +51,7 @@ entry: define signext i32 @test_ineuc_sext_z(i8 zeroext %a) { ; CHECK-LABEL: test_ineuc_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: xori r3, r3, 1 @@ -65,7 +65,7 @@ entry: define void @test_ineuc_store(i8 zeroext %a, i8 zeroext %b) { ; CHECK-LABEL: test_ineuc_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 @@ -83,7 +83,7 @@ entry: define void @test_ineuc_sext_store(i8 zeroext %a, i8 zeroext %b) { ; CHECK-LABEL: test_ineuc_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 @@ -102,7 +102,7 @@ entry: define void @test_ineuc_z_store(i8 zeroext %a) { ; CHECK-LABEL: test_ineuc_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -119,7 +119,7 @@ entry: define void @test_ineuc_sext_z_store(i8 zeroext %a) { ; CHECK-LABEL: test_ineuc_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 diff --git a/test/CodeGen/PowerPC/testComparesineull.ll b/test/CodeGen/PowerPC/testComparesineull.ll index 6d645f5d33b..ba388a45fad 100644 --- a/test/CodeGen/PowerPC/testComparesineull.ll +++ b/test/CodeGen/PowerPC/testComparesineull.ll @@ -10,7 +10,7 @@ define signext i32 @test_ineull(i64 %a, i64 %b) { ; CHECK-LABEL: test_ineull: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addic r4, r3, -1 ; CHECK-NEXT: subfe r3, r4, r3 @@ -23,7 +23,7 @@ entry: define signext i32 @test_ineull_sext(i64 %a, i64 %b) { ; CHECK-LABEL: test_ineull_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: subfic r3, r3, 0 ; CHECK-NEXT: subfe r3, r3, r3 @@ -36,7 +36,7 @@ entry: define signext i32 @test_ineull_z(i64 %a) { ; CHECK-LABEL: test_ineull_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addic r4, r3, -1 ; CHECK-NEXT: subfe r3, r4, r3 ; CHECK-NEXT: blr @@ -48,7 +48,7 @@ entry: define signext i32 @test_ineull_sext_z(i64 %a) { ; CHECK-LABEL: test_ineull_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subfic r3, r3, 0 ; CHECK-NEXT: subfe r3, r3, r3 ; CHECK-NEXT: blr @@ -60,7 +60,7 @@ entry: define void @test_ineull_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_ineull_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -77,7 +77,7 @@ entry: define void @test_ineull_sext_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_ineull_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -94,7 +94,7 @@ entry: define void @test_ineull_z_store(i64 %a) { ; CHECK-LABEL: test_ineull_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: addic r5, r3, -1 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -110,7 +110,7 @@ entry: define void @test_ineull_sext_z_store(i64 %a) { ; CHECK-LABEL: test_ineull_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: subfic r3, r3, 0 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) diff --git a/test/CodeGen/PowerPC/testComparesineus.ll b/test/CodeGen/PowerPC/testComparesineus.ll index 38c62f7dc3f..a78671b6407 100644 --- a/test/CodeGen/PowerPC/testComparesineus.ll +++ b/test/CodeGen/PowerPC/testComparesineus.ll @@ -10,7 +10,7 @@ define signext i32 @test_ineus(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: test_ineus: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -24,7 +24,7 @@ entry: define signext i32 @test_ineus_sext(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: test_ineus_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -39,7 +39,7 @@ entry: define signext i32 @test_ineus_z(i16 zeroext %a) { ; CHECK-LABEL: test_ineus_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: xori r3, r3, 1 @@ -52,7 +52,7 @@ entry: define signext i32 @test_ineus_sext_z(i16 zeroext %a) { ; CHECK-LABEL: test_ineus_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: xori r3, r3, 1 @@ -66,7 +66,7 @@ entry: define void @test_ineus_store(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: test_ineus_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 @@ -84,7 +84,7 @@ entry: define void @test_ineus_sext_store(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: test_ineus_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 @@ -103,7 +103,7 @@ entry: define void @test_ineus_z_store(i16 zeroext %a) { ; CHECK-LABEL: test_ineus_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -120,7 +120,7 @@ entry: define void @test_ineus_sext_z_store(i16 zeroext %a) { ; CHECK-LABEL: test_ineus_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 diff --git a/test/CodeGen/PowerPC/testCompareslleqsc.ll b/test/CodeGen/PowerPC/testCompareslleqsc.ll index 8559665f53d..43fb358efef 100644 --- a/test/CodeGen/PowerPC/testCompareslleqsc.ll +++ b/test/CodeGen/PowerPC/testCompareslleqsc.ll @@ -12,7 +12,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_lleqsc(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_lleqsc: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -26,7 +26,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_lleqsc_sext(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_lleqsc_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -41,7 +41,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_lleqsc_z(i8 signext %a) { ; CHECK-LABEL: test_lleqsc_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: blr @@ -54,7 +54,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_lleqsc_sext_z(i8 signext %a) { ; CHECK-LABEL: test_lleqsc_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: neg r3, r3 @@ -68,7 +68,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_lleqsc_store(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_lleqsc_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -86,7 +86,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_lleqsc_sext_store(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_lleqsc_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 @@ -105,7 +105,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_lleqsc_z_store(i8 signext %a) { ; CHECK-LABEL: test_lleqsc_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -122,7 +122,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_lleqsc_sext_z_store(i8 signext %a) { ; CHECK-LABEL: test_lleqsc_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) diff --git a/test/CodeGen/PowerPC/testCompareslleqsi.ll b/test/CodeGen/PowerPC/testCompareslleqsi.ll index 131c088a8ab..ae8dffb1e22 100644 --- a/test/CodeGen/PowerPC/testCompareslleqsi.ll +++ b/test/CodeGen/PowerPC/testCompareslleqsi.ll @@ -11,7 +11,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_lleqsi(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_lleqsi: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -25,7 +25,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_lleqsi_sext(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_lleqsi_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -40,7 +40,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_lleqsi_z(i32 signext %a) { ; CHECK-LABEL: test_lleqsi_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: blr @@ -53,7 +53,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_lleqsi_sext_z(i32 signext %a) { ; CHECK-LABEL: test_lleqsi_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: neg r3, r3 @@ -67,7 +67,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_lleqsi_store(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_lleqsi_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -85,7 +85,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_lleqsi_sext_store(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_lleqsi_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 @@ -104,7 +104,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_lleqsi_z_store(i32 signext %a) { ; CHECK-LABEL: test_lleqsi_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -122,7 +122,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_lleqsi_sext_z_store(i32 signext %a) { ; CHECK-LABEL: test_lleqsi_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) diff --git a/test/CodeGen/PowerPC/testCompareslleqsll.ll b/test/CodeGen/PowerPC/testCompareslleqsll.ll index 8484586c160..89ef960a6f9 100644 --- a/test/CodeGen/PowerPC/testCompareslleqsll.ll +++ b/test/CodeGen/PowerPC/testCompareslleqsll.ll @@ -11,7 +11,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_lleqsll(i64 %a, i64 %b) { ; CHECK-LABEL: test_lleqsll: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzd r3, r3 ; CHECK-NEXT: rldicl r3, r3, 58, 63 @@ -25,7 +25,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_lleqsll_sext(i64 %a, i64 %b) { ; CHECK-LABEL: test_lleqsll_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addic r3, r3, -1 ; CHECK-NEXT: subfe r3, r3, r3 @@ -39,7 +39,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_lleqsll_z(i64 %a) { ; CHECK-LABEL: test_lleqsll_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzd r3, r3 ; CHECK-NEXT: rldicl r3, r3, 58, 63 ; CHECK-NEXT: blr @@ -52,7 +52,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_lleqsll_sext_z(i64 %a) { ; CHECK-LABEL: test_lleqsll_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addic r3, r3, -1 ; CHECK-NEXT: subfe r3, r3, r3 ; CHECK-NEXT: blr @@ -65,7 +65,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_lleqsll_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_lleqsll_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -83,7 +83,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_lleqsll_sext_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_lleqsll_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -101,7 +101,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_lleqsll_z_store(i64 %a) { ; CHECK-LABEL: test_lleqsll_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzd r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -118,7 +118,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_lleqsll_sext_z_store(i64 %a) { ; CHECK-LABEL: test_lleqsll_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: addic r3, r3, -1 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) diff --git a/test/CodeGen/PowerPC/testCompareslleqss.ll b/test/CodeGen/PowerPC/testCompareslleqss.ll index 1eac65f268c..5d1945d73e3 100644 --- a/test/CodeGen/PowerPC/testCompareslleqss.ll +++ b/test/CodeGen/PowerPC/testCompareslleqss.ll @@ -11,7 +11,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_lleqss(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_lleqss: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -25,7 +25,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_lleqss_sext(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_lleqss_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -40,7 +40,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_lleqss_z(i16 signext %a) { ; CHECK-LABEL: test_lleqss_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: blr @@ -53,7 +53,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_lleqss_sext_z(i16 signext %a) { ; CHECK-LABEL: test_lleqss_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: neg r3, r3 @@ -67,7 +67,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_lleqss_store(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_lleqss_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -85,7 +85,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_lleqss_sext_store(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_lleqss_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 @@ -104,7 +104,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_lleqss_z_store(i16 signext %a) { ; CHECK-LABEL: test_lleqss_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -121,7 +121,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_lleqss_sext_z_store(i16 signext %a) { ; CHECK-LABEL: test_lleqss_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) diff --git a/test/CodeGen/PowerPC/testComparesllequc.ll b/test/CodeGen/PowerPC/testComparesllequc.ll index a1733b86516..0f5d4c6f287 100644 --- a/test/CodeGen/PowerPC/testComparesllequc.ll +++ b/test/CodeGen/PowerPC/testComparesllequc.ll @@ -11,7 +11,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llequc(i8 zeroext %a, i8 zeroext %b) { ; CHECK-LABEL: test_llequc: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -25,7 +25,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_llequc_sext(i8 zeroext %a, i8 zeroext %b) { ; CHECK-LABEL: test_llequc_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -40,7 +40,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_llequc_z(i8 zeroext %a) { ; CHECK-LABEL: test_llequc_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: blr @@ -53,7 +53,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_llequc_sext_z(i8 zeroext %a) { ; CHECK-LABEL: test_llequc_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: neg r3, r3 @@ -67,7 +67,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_llequc_store(i8 zeroext %a, i8 zeroext %b) { ; CHECK-LABEL: test_llequc_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -85,7 +85,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_llequc_sext_store(i8 zeroext %a, i8 zeroext %b) { ; CHECK-LABEL: test_llequc_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 @@ -104,7 +104,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_llequc_z_store(i8 zeroext %a) { ; CHECK-LABEL: test_llequc_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -121,7 +121,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_llequc_sext_z_store(i8 zeroext %a) { ; CHECK-LABEL: test_llequc_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) diff --git a/test/CodeGen/PowerPC/testComparesllequi.ll b/test/CodeGen/PowerPC/testComparesllequi.ll index ab3176d49ac..350168e0e6c 100644 --- a/test/CodeGen/PowerPC/testComparesllequi.ll +++ b/test/CodeGen/PowerPC/testComparesllequi.ll @@ -11,7 +11,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llequi(i32 zeroext %a, i32 zeroext %b) { ; CHECK-LABEL: test_llequi: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -25,7 +25,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_llequi_sext(i32 zeroext %a, i32 zeroext %b) { ; CHECK-LABEL: test_llequi_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -40,7 +40,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_llequi_z(i32 zeroext %a) { ; CHECK-LABEL: test_llequi_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: blr @@ -53,7 +53,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_llequi_sext_z(i32 zeroext %a) { ; CHECK-LABEL: test_llequi_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: neg r3, r3 @@ -67,7 +67,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_llequi_store(i32 zeroext %a, i32 zeroext %b) { ; CHECK-LABEL: test_llequi_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -85,7 +85,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_llequi_sext_store(i32 zeroext %a, i32 zeroext %b) { ; CHECK-LABEL: test_llequi_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 @@ -104,7 +104,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_llequi_z_store(i32 zeroext %a) { ; CHECK-LABEL: test_llequi_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -121,7 +121,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_llequi_sext_z_store(i32 zeroext %a) { ; CHECK-LABEL: test_llequi_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) diff --git a/test/CodeGen/PowerPC/testComparesllequll.ll b/test/CodeGen/PowerPC/testComparesllequll.ll index 8ca9767e879..7d1fe527e8a 100644 --- a/test/CodeGen/PowerPC/testComparesllequll.ll +++ b/test/CodeGen/PowerPC/testComparesllequll.ll @@ -11,7 +11,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llequll(i64 %a, i64 %b) { ; CHECK-LABEL: test_llequll: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzd r3, r3 ; CHECK-NEXT: rldicl r3, r3, 58, 63 @@ -25,7 +25,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_llequll_sext(i64 %a, i64 %b) { ; CHECK-LABEL: test_llequll_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addic r3, r3, -1 ; CHECK-NEXT: subfe r3, r3, r3 @@ -39,7 +39,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_llequll_z(i64 %a) { ; CHECK-LABEL: test_llequll_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzd r3, r3 ; CHECK-NEXT: rldicl r3, r3, 58, 63 ; CHECK-NEXT: blr @@ -52,7 +52,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_llequll_sext_z(i64 %a) { ; CHECK-LABEL: test_llequll_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addic r3, r3, -1 ; CHECK-NEXT: subfe r3, r3, r3 ; CHECK-NEXT: blr @@ -65,7 +65,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_llequll_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_llequll_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -83,7 +83,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_llequll_sext_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_llequll_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -101,7 +101,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_llequll_z_store(i64 %a) { ; CHECK-LABEL: test_llequll_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzd r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -118,7 +118,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_llequll_sext_z_store(i64 %a) { ; CHECK-LABEL: test_llequll_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: addic r3, r3, -1 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) diff --git a/test/CodeGen/PowerPC/testComparesllequs.ll b/test/CodeGen/PowerPC/testComparesllequs.ll index 0cc2b43bdff..cc215216dfc 100644 --- a/test/CodeGen/PowerPC/testComparesllequs.ll +++ b/test/CodeGen/PowerPC/testComparesllequs.ll @@ -11,7 +11,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llequs(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: test_llequs: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -25,7 +25,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_llequs_sext(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: test_llequs_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 @@ -40,7 +40,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_llequs_z(i16 zeroext %a) { ; CHECK-LABEL: test_llequs_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: blr @@ -53,7 +53,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_llequs_sext_z(i16 zeroext %a) { ; CHECK-LABEL: test_llequs_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: neg r3, r3 @@ -67,7 +67,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_llequs_store(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: test_llequs_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -85,7 +85,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_llequs_sext_store(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: test_llequs_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 @@ -104,7 +104,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_llequs_z_store(i16 zeroext %a) { ; CHECK-LABEL: test_llequs_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -121,7 +121,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_llequs_sext_z_store(i16 zeroext %a) { ; CHECK-LABEL: test_llequs_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) diff --git a/test/CodeGen/PowerPC/testComparesllgesc.ll b/test/CodeGen/PowerPC/testComparesllgesc.ll index 744ef362abf..82f54cd6b1b 100644 --- a/test/CodeGen/PowerPC/testComparesllgesc.ll +++ b/test/CodeGen/PowerPC/testComparesllgesc.ll @@ -9,7 +9,7 @@ define i64 @test_llgesc(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_llgesc: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: xori r3, r3, 1 @@ -22,7 +22,7 @@ entry: define i64 @test_llgesc_sext(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_llgesc_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: addi r3, r3, -1 @@ -35,7 +35,7 @@ entry: define void @test_llgesc_store(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_llgesc_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -52,7 +52,7 @@ entry: define void @test_llgesc_sext_store(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_llgesc_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) diff --git a/test/CodeGen/PowerPC/testComparesllgesi.ll b/test/CodeGen/PowerPC/testComparesllgesi.ll index eec3e671b8f..82c1fa11b8b 100644 --- a/test/CodeGen/PowerPC/testComparesllgesi.ll +++ b/test/CodeGen/PowerPC/testComparesllgesi.ll @@ -9,7 +9,7 @@ define i64 @test_llgesi(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_llgesi: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: xori r3, r3, 1 @@ -22,7 +22,7 @@ entry: define i64 @test_llgesi_sext(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_llgesi_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: addi r3, r3, -1 @@ -35,7 +35,7 @@ entry: define void @test_llgesi_store(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_llgesi_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -52,7 +52,7 @@ entry: define void @test_llgesi_sext_store(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_llgesi_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) diff --git a/test/CodeGen/PowerPC/testComparesllgesll.ll b/test/CodeGen/PowerPC/testComparesllgesll.ll index 9fa5985ba6e..6fb53977a55 100644 --- a/test/CodeGen/PowerPC/testComparesllgesll.ll +++ b/test/CodeGen/PowerPC/testComparesllgesll.ll @@ -9,7 +9,7 @@ define i64 @test_llgesll(i64 %a, i64 %b) { ; CHECK-LABEL: test_llgesll: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi r5, r3, 63 ; CHECK-NEXT: rldicl r6, r4, 1, 63 ; CHECK-NEXT: subfc r3, r4, r3 @@ -23,7 +23,7 @@ entry: define i64 @test_llgesll_sext(i64 %a, i64 %b) { ; CHECK-LABEL: test_llgesll_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi r5, r3, 63 ; CHECK-NEXT: rldicl r6, r4, 1, 63 ; CHECK-NEXT: subfc r3, r4, r3 @@ -38,7 +38,7 @@ entry: define i64 @test_llgesll_z(i64 %a) { ; CHECK-LABEL: test_llgesll_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: xori r3, r3, 1 ; CHECK-NEXT: blr @@ -50,7 +50,7 @@ entry: define i64 @test_llgesll_sext_z(i64 %a) { ; CHECK-LABEL: test_llgesll_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi r3, r3, 63 ; CHECK-NEXT: not r3, r3 ; CHECK-NEXT: blr @@ -62,7 +62,7 @@ entry: define void @test_llgesll_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_llgesll_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sradi r6, r3, 63 ; CHECK: subfc r3, r4, r3 ; CHECK: rldicl r3, r4, 1, 63 @@ -78,7 +78,7 @@ entry: define void @test_llgesll_sext_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_llgesll_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi r6, r3, 63 ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: subfc r3, r4, r3 @@ -97,7 +97,7 @@ entry: define void @test_llgesll_z_store(i64 %a) { ; CHECK-LABEL: test_llgesll_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -113,7 +113,7 @@ entry: define void @test_llgesll_sext_z_store(i64 %a) { ; CHECK-LABEL: test_llgesll_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: sradi r3, r3, 63 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) diff --git a/test/CodeGen/PowerPC/testComparesllgess.ll b/test/CodeGen/PowerPC/testComparesllgess.ll index 04b07b2200e..1206339a23b 100644 --- a/test/CodeGen/PowerPC/testComparesllgess.ll +++ b/test/CodeGen/PowerPC/testComparesllgess.ll @@ -9,7 +9,7 @@ define i64 @test_llgess(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_llgess: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: xori r3, r3, 1 @@ -22,7 +22,7 @@ entry: define i64 @test_llgess_sext(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_llgess_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: addi r3, r3, -1 @@ -35,7 +35,7 @@ entry: define void @test_llgess_store(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_llgess_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -52,7 +52,7 @@ entry: define void @test_llgess_sext_store(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_llgess_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) diff --git a/test/CodeGen/PowerPC/testComparesllgtsll.ll b/test/CodeGen/PowerPC/testComparesllgtsll.ll index 2467468afac..0dc1374374f 100644 --- a/test/CodeGen/PowerPC/testComparesllgtsll.ll +++ b/test/CodeGen/PowerPC/testComparesllgtsll.ll @@ -10,7 +10,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llgtsll(i64 %a, i64 %b) { ; CHECK-LABEL: test_llgtsll: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi [[REG1:r[0-9]+]], r4, 63 ; CHECK-NEXT: rldicl [[REG2:r[0-9]+]], r3, 1, 63 ; CHECK-NEXT: subfc [[REG3:r[0-9]+]], r3, r4 @@ -26,7 +26,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_llgtsll_sext(i64 %a, i64 %b) { ; CHECK-LABEL: test_llgtsll_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi [[REG1:r[0-9]+]], r4, 63 ; CHECK-NEXT: rldicl [[REG2:r[0-9]+]], r3, 1, 63 ; CHECK-NEXT: subfc [[REG3:r[0-9]+]], r3, r4 @@ -44,7 +44,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_llgtsll_z(i64 %a) { ; CHECK-LABEL: test_llgtsll_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addi r4, r3, -1 ; CHECK-NEXT: nor r3, r4, r3 ; CHECK-NEXT: rldicl r3, r3, 1, 63 @@ -70,7 +70,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_llgtsll_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_llgtsll_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sradi [[REG1:r[0-9]+]], r4, 63 ; CHECK: rldicl [[REG2:r[0-9]+]], r3, 1, 63 ; CHECK-DIAG: subfc [[REG3:r[0-9]+]], r3, r4 @@ -87,7 +87,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_llgtsll_sext_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_llgtsll_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sradi [[REG1:r[0-9]+]], r4, 63 ; CHECK: rldicl [[REG2:r[0-9]+]], r3, 1, 63 ; CHECK-DIAG: subfc [[REG3:r[0-9]+]], r3, r4 @@ -105,7 +105,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_llgtsll_z_store(i64 %a) { ; CHECK-LABEL: test_llgtsll_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: addi r5, r3, -1 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) diff --git a/test/CodeGen/PowerPC/testComparesllgtus.ll b/test/CodeGen/PowerPC/testComparesllgtus.ll index 8d06b4b3790..3758e8e097c 100644 --- a/test/CodeGen/PowerPC/testComparesllgtus.ll +++ b/test/CodeGen/PowerPC/testComparesllgtus.ll @@ -10,7 +10,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llgtus(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: test_llgtus: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub [[REG:r[0-9]+]], r4, r3 ; CHECK-NEXT: rldicl r3, [[REG]], 1, 63 ; CHECK-NEXT: blr @@ -23,7 +23,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_llgtus_sext(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: test_llgtus_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub [[REG:r[0-9]+]], r4, r3 ; CHECK-NEXT: sradi r3, [[REG]], 63 ; CHECK-NEXT: blr @@ -36,7 +36,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_llgtus_z(i16 zeroext %a) { ; CHECK-LABEL: test_llgtus_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: xori r3, r3, 1 @@ -50,7 +50,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_llgtus_sext_z(i16 zeroext %a) { ; CHECK-LABEL: test_llgtus_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 ; CHECK-NEXT: xori r3, r3, 1 @@ -65,7 +65,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_llgtus_store(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: test_llgtus_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sub [[REG:r[0-9]+]], r4, r3 ; CHECK: rldicl {{r[0-9]+}}, [[REG]], 1, 63 entry: @@ -78,7 +78,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_llgtus_sext_store(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: test_llgtus_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sub [[REG:r[0-9]+]], r4, r3 ; CHECK: sradi {{r[0-9]+}}, [[REG]], 63 entry: @@ -91,7 +91,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_llgtus_z_store(i16 zeroext %a) { ; CHECK-LABEL: test_llgtus_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -109,7 +109,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_llgtus_sext_z_store(i16 zeroext %a) { ; CHECK-LABEL: test_llgtus_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: cntlzw r3, r3 ; CHECK-NEXT: srwi r3, r3, 5 diff --git a/test/CodeGen/PowerPC/testCompareslllesc.ll b/test/CodeGen/PowerPC/testCompareslllesc.ll index cb564cc721b..f9352990f2c 100644 --- a/test/CodeGen/PowerPC/testCompareslllesc.ll +++ b/test/CodeGen/PowerPC/testCompareslllesc.ll @@ -10,7 +10,7 @@ define i64 @test_lllesc(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_lllesc: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: xori r3, r3, 1 @@ -23,7 +23,7 @@ entry: define i64 @test_lllesc_sext(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_lllesc_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: addi r3, r3, -1 @@ -36,7 +36,7 @@ entry: define void @test_lllesc_store(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_lllesc_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -53,7 +53,7 @@ entry: define void @test_lllesc_sext_store(i8 signext %a, i8 signext %b) { ; CHECK-LABEL: test_lllesc_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) diff --git a/test/CodeGen/PowerPC/testCompareslllesi.ll b/test/CodeGen/PowerPC/testCompareslllesi.ll index d39f61fbd88..42062692a08 100644 --- a/test/CodeGen/PowerPC/testCompareslllesi.ll +++ b/test/CodeGen/PowerPC/testCompareslllesi.ll @@ -10,7 +10,7 @@ define i64 @test_lllesi(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_lllesi: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: xori r3, r3, 1 @@ -23,7 +23,7 @@ entry: define i64 @test_lllesi_sext(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_lllesi_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: addi r3, r3, -1 @@ -36,7 +36,7 @@ entry: define void @test_lllesi_store(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_lllesi_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -53,7 +53,7 @@ entry: define void @test_lllesi_sext_store(i32 signext %a, i32 signext %b) { ; CHECK-LABEL: test_lllesi_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) diff --git a/test/CodeGen/PowerPC/testCompareslllesll.ll b/test/CodeGen/PowerPC/testCompareslllesll.ll index 375bd2295a7..8db1ee19ebb 100644 --- a/test/CodeGen/PowerPC/testCompareslllesll.ll +++ b/test/CodeGen/PowerPC/testCompareslllesll.ll @@ -10,7 +10,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_lllesll(i64 %a, i64 %b) { ; CHECK-LABEL: test_lllesll: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi r5, r4, 63 ; CHECK-NEXT: rldicl r6, r3, 1, 63 ; CHECK-NEXT: subfc r12, r3, r4 @@ -25,7 +25,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_lllesll_sext(i64 %a, i64 %b) { ; CHECK-LABEL: test_lllesll_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi r5, r4, 63 ; CHECK-NEXT: rldicl r6, r3, 1, 63 ; CHECK-NEXT: subfc r12, r3, r4 @@ -41,7 +41,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_lllesll_z(i64 %a) { ; CHECK-LABEL: test_lllesll_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addi r4, r3, -1 ; CHECK-NEXT: or r3, r4, r3 ; CHECK-NEXT: rldicl r3, r3, 1, 63 @@ -55,7 +55,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_lllesll_sext_z(i64 %a) { ; CHECK-LABEL: test_lllesll_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addi r4, r3, -1 ; CHECK-NEXT: or r3, r4, r3 ; CHECK-NEXT: sradi r3, r3, 63 @@ -69,7 +69,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_lllesll_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_lllesll_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sradi r6, r4, 63 ; CHECK: subfc r4, r3, r4 ; CHECK: rldicl r3, r3, 1, 63 @@ -86,7 +86,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_lllesll_sext_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_lllesll_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sradi r6, r4, 63 ; CHECK-DAG: rldicl r3, r3, 1, 63 ; CHECK-DAG: subfc r4, r3, r4 @@ -104,7 +104,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_lllesll_z_store(i64 %a) { ; CHECK-LABEL: test_lllesll_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: addi r5, r3, -1 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -122,7 +122,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_lllesll_sext_z_store(i64 %a) { ; CHECK-LABEL: test_lllesll_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: addi r5, r3, -1 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) diff --git a/test/CodeGen/PowerPC/testComparesllless.ll b/test/CodeGen/PowerPC/testComparesllless.ll index 4971fb75900..a6f3b5e3988 100644 --- a/test/CodeGen/PowerPC/testComparesllless.ll +++ b/test/CodeGen/PowerPC/testComparesllless.ll @@ -10,7 +10,7 @@ define i64 @test_llless(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_llless: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: xori r3, r3, 1 @@ -23,7 +23,7 @@ entry: define i64 @test_llless_sext(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_llless_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: rldicl r3, r3, 1, 63 ; CHECK-NEXT: addi r3, r3, -1 @@ -36,7 +36,7 @@ entry: define void @test_llless_store(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_llless_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -53,7 +53,7 @@ entry: define void @test_llless_sext_store(i16 signext %a, i16 signext %b) { ; CHECK-LABEL: test_llless_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: sub r3, r4, r3 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) diff --git a/test/CodeGen/PowerPC/testComparesllltsll.ll b/test/CodeGen/PowerPC/testComparesllltsll.ll index 887c14faf7b..3e37daf046f 100644 --- a/test/CodeGen/PowerPC/testComparesllltsll.ll +++ b/test/CodeGen/PowerPC/testComparesllltsll.ll @@ -11,7 +11,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llltsll(i64 %a, i64 %b) { ; CHECK-LABEL: test_llltsll: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi [[REG1:r[0-9]+]], r3, 63 ; CHECK-NEXT: rldicl [[REG2:r[0-9]+]], r4, 1, 63 ; CHECK-NEXT: subfc [[REG3:r[0-9]+]], r4, r3 @@ -27,7 +27,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_llltsll_sext(i64 %a, i64 %b) { ; CHECK-LABEL: test_llltsll_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi [[REG1:r[0-9]+]], r3, 63 ; CHECK-NEXT: rldicl [[REG2:r[0-9]+]], r4, 1, 63 ; CHECK-NEXT: subfc [[REG3:r[0-9]+]], r4, r3 @@ -44,7 +44,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_llltsll_sext_z(i64 %a) { ; CHECK-LABEL: test_llltsll_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sradi r3, r3, 63 ; CHECK-NEXT: blr entry: @@ -56,7 +56,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_llltsll_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_llltsll_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sradi [[REG1:r[0-9]+]], r3, 63 ; CHECK: rldicl [[REG2:r[0-9]+]], r4, 1, 63 ; CHECK-DIAG: subfc [[REG3:r[0-9]+]], r4, r3 @@ -73,7 +73,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_llltsll_sext_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_llltsll_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sradi [[REG1:r[0-9]+]], r3, 63 ; CHECK: rldicl [[REG2:r[0-9]+]], r4, 1, 63 ; CHECK-DIAG: subfc [[REG3:r[0-9]+]], r4, r3 diff --git a/test/CodeGen/PowerPC/testComparesllltuc.ll b/test/CodeGen/PowerPC/testComparesllltuc.ll index a02452554b5..a8244e757b1 100644 --- a/test/CodeGen/PowerPC/testComparesllltuc.ll +++ b/test/CodeGen/PowerPC/testComparesllltuc.ll @@ -10,7 +10,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llltuc(i8 zeroext %a, i8 zeroext %b) { ; CHECK-LABEL: test_llltuc: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub [[REG:r[0-9]+]], r3, r4 ; CHECK-NEXT: rldicl r3, [[REG]], 1, 63 ; CHECK-NEXT: blr @@ -23,7 +23,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_llltuc_sext(i8 zeroext %a, i8 zeroext %b) { ; CHECK-LABEL: test_llltuc_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub [[REG:r[0-9]+]], r3, r4 ; CHECK-NEXT: sradi r3, [[REG]], 63 ; CHECK-NEXT: blr @@ -36,7 +36,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_llltuc_store(i8 zeroext %a, i8 zeroext %b) { ; CHECK-LABEL: test_llltuc_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sub [[REG:r[2-9]+]], r3, r4 ; CHECK: rldicl {{r[0-9]+}}, [[REG]], 1, 63 entry: @@ -49,7 +49,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_llltuc_sext_store(i8 zeroext %a, i8 zeroext %b) { ; CHECK-LABEL: test_llltuc_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sub [[REG:r[0-9]+]], r3, r4 ; CHECK: sradi {{r[0-9]+}}, [[REG]], 63 entry: diff --git a/test/CodeGen/PowerPC/testComparesllltui.ll b/test/CodeGen/PowerPC/testComparesllltui.ll index bea180168da..e785942b3c9 100644 --- a/test/CodeGen/PowerPC/testComparesllltui.ll +++ b/test/CodeGen/PowerPC/testComparesllltui.ll @@ -10,7 +10,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llltui(i32 zeroext %a, i32 zeroext %b) { ; CHECK-LABEL: test_llltui: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NOT: clrldi ; CHECK-NEXT: sub [[REG:r[0-9]+]], r3, r4 ; CHECK-NEXT: rldicl r3, [[REG]], 1, 63 @@ -24,7 +24,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_llltui_sext(i32 zeroext %a, i32 zeroext %b) { ; CHECK-LABEL: test_llltui_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub [[REG:r[0-9]+]], r3, r4 ; CHECK-NEXT: sradi r3, [[REG]], 63 ; CHECK-NEXT: blr @@ -37,7 +37,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_llltui_z(i32 zeroext %a) { ; CHECK-LABEL: test_llltui_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li r3, 0 ; CHECK-NEXT: blr entry: @@ -47,7 +47,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_llltui_sext_z(i32 zeroext %a) { ; CHECK-LABEL: test_llltui_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li r3, 0 ; CHECK-NEXT: blr entry: @@ -57,7 +57,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_llltui_store(i32 zeroext %a, i32 zeroext %b) { ; CHECK-LABEL: test_llltui_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NOT: clrldi ; CHECK: sub [[REG:r[2-9]+]], r3, r4 ; CHECK: rldicl {{r[0-9]+}}, [[REG]], 1, 63 @@ -71,7 +71,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_llltui_sext_store(i32 zeroext %a, i32 zeroext %b) { ; CHECK-LABEL: test_llltui_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NOT: clrldi ; CHECK: sub [[REG:r[0-9]+]], r3, r4 ; CHECK: sradi {{r[0-9]+}}, [[REG]], 63 @@ -85,7 +85,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_llltui_z_store(i32 zeroext %a) { ; CHECK-LABEL: test_llltui_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: li [[REG:r[0-9]+]], 0 ; CHECK: stw [[REG]], 0(r3) ; CHECK-NEXT: blr @@ -97,7 +97,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_llltui_sext_z_store(i32 zeroext %a) { ; CHECK-LABEL: test_llltui_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: li [[REG:r[0-9]+]], 0 ; CHECK: stw [[REG]], 0(r3) ; CHECK-NEXT: blr diff --git a/test/CodeGen/PowerPC/testComparesllltus.ll b/test/CodeGen/PowerPC/testComparesllltus.ll index 713bc220442..e997d0aa8b8 100644 --- a/test/CodeGen/PowerPC/testComparesllltus.ll +++ b/test/CodeGen/PowerPC/testComparesllltus.ll @@ -10,7 +10,7 @@ ; Function Attrs: norecurse nounwind readnone define i64 @test_llltus(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: test_llltus: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub [[REG:r[0-9]+]], r3, r4 ; CHECK-NEXT: rldicl r3, [[REG]], 1, 63 ; CHECK-NEXT: blr @@ -23,7 +23,7 @@ entry: ; Function Attrs: norecurse nounwind readnone define i64 @test_llltus_sext(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: test_llltus_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sub [[REG:r[0-9]+]], r3, r4 ; CHECK-NEXT: sradi r3, [[REG]], 63 ; CHECK-NEXT: blr @@ -48,7 +48,7 @@ entry: ; Function Attrs: norecurse nounwind define void @test_llltus_sext_store(i16 zeroext %a, i16 zeroext %b) { ; CHECK-LABEL: test_llltus_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK: sub [[REG:r[0-9]+]], r3, r4 ; CHECK: sradi {{r[0-9]+}}, [[REG]], 63 entry: diff --git a/test/CodeGen/PowerPC/testComparesllnesll.ll b/test/CodeGen/PowerPC/testComparesllnesll.ll index 6b2b662dcc7..cdd272f57bd 100644 --- a/test/CodeGen/PowerPC/testComparesllnesll.ll +++ b/test/CodeGen/PowerPC/testComparesllnesll.ll @@ -10,7 +10,7 @@ define i64 @test_llnesll(i64 %a, i64 %b) { ; CHECK-LABEL: test_llnesll: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addic r4, r3, -1 ; CHECK-NEXT: subfe r3, r4, r3 @@ -23,7 +23,7 @@ entry: define i64 @test_llnesll_sext(i64 %a, i64 %b) { ; CHECK-LABEL: test_llnesll_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: subfic r3, r3, 0 ; CHECK-NEXT: subfe r3, r3, r3 @@ -36,7 +36,7 @@ entry: define i64 @test_llnesll_z(i64 %a) { ; CHECK-LABEL: test_llnesll_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addic r4, r3, -1 ; CHECK-NEXT: subfe r3, r4, r3 ; CHECK-NEXT: blr @@ -48,7 +48,7 @@ entry: define i64 @test_llnesll_sext_z(i64 %a) { ; CHECK-LABEL: test_llnesll_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subfic r3, r3, 0 ; CHECK-NEXT: subfe r3, r3, r3 ; CHECK-NEXT: blr @@ -60,7 +60,7 @@ entry: define void @test_llnesll_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_llnesll_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -77,7 +77,7 @@ entry: define void @test_llnesll_sext_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_llnesll_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -94,7 +94,7 @@ entry: define void @test_llnesll_z_store(i64 %a) { ; CHECK-LABEL: test_llnesll_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: addic r5, r3, -1 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -110,7 +110,7 @@ entry: define void @test_llnesll_sext_z_store(i64 %a) { ; CHECK-LABEL: test_llnesll_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: subfic r3, r3, 0 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) diff --git a/test/CodeGen/PowerPC/testComparesllneull.ll b/test/CodeGen/PowerPC/testComparesllneull.ll index 0cf47e4ac03..7956881f495 100644 --- a/test/CodeGen/PowerPC/testComparesllneull.ll +++ b/test/CodeGen/PowerPC/testComparesllneull.ll @@ -10,7 +10,7 @@ define i64 @test_llneull(i64 %a, i64 %b) { ; CHECK-LABEL: test_llneull: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: addic r4, r3, -1 ; CHECK-NEXT: subfe r3, r4, r3 @@ -23,7 +23,7 @@ entry: define i64 @test_llneull_sext(i64 %a, i64 %b) { ; CHECK-LABEL: test_llneull_sext: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: subfic r3, r3, 0 ; CHECK-NEXT: subfe r3, r3, r3 @@ -36,7 +36,7 @@ entry: define i64 @test_llneull_z(i64 %a) { ; CHECK-LABEL: test_llneull_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addic r4, r3, -1 ; CHECK-NEXT: subfe r3, r4, r3 ; CHECK-NEXT: blr @@ -48,7 +48,7 @@ entry: define i64 @test_llneull_sext_z(i64 %a) { ; CHECK-LABEL: test_llneull_sext_z: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subfic r3, r3, 0 ; CHECK-NEXT: subfe r3, r3, r3 ; CHECK-NEXT: blr @@ -60,7 +60,7 @@ entry: define void @test_llneull_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_llneull_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -77,7 +77,7 @@ entry: define void @test_llneull_sext_store(i64 %a, i64 %b) { ; CHECK-LABEL: test_llneull_sext_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r5, r2, .LC0@toc@ha ; CHECK-NEXT: xor r3, r3, r4 ; CHECK-NEXT: ld r12, .LC0@toc@l(r5) @@ -94,7 +94,7 @@ entry: define void @test_llneull_z_store(i64 %a) { ; CHECK-LABEL: test_llneull_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: addic r5, r3, -1 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) @@ -110,7 +110,7 @@ entry: define void @test_llneull_sext_z_store(i64 %a) { ; CHECK-LABEL: test_llneull_sext_z_store: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addis r4, r2, .LC0@toc@ha ; CHECK-NEXT: subfic r3, r3, 0 ; CHECK-NEXT: ld r4, .LC0@toc@l(r4) diff --git a/test/CodeGen/PowerPC/vec_add_sub_quadword.ll b/test/CodeGen/PowerPC/vec_add_sub_quadword.ll index f42f7d11783..8f3864ff268 100644 --- a/test/CodeGen/PowerPC/vec_add_sub_quadword.ll +++ b/test/CodeGen/PowerPC/vec_add_sub_quadword.ll @@ -8,7 +8,7 @@ define <1 x i128> @out_of_bounds_insertelement(<1 x i128> %x, i128 %val) nounwin %result = add <1 x i128> %x, %tmpvec ret <1 x i128> %result ; CHECK-LABEL: @out_of_bounds_insertelement -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: blr } diff --git a/test/CodeGen/PowerPC/vec_extract_p9.ll b/test/CodeGen/PowerPC/vec_extract_p9.ll index b07c905ceec..7e397f54684 100644 --- a/test/CodeGen/PowerPC/vec_extract_p9.ll +++ b/test/CodeGen/PowerPC/vec_extract_p9.ll @@ -4,12 +4,12 @@ define zeroext i8 @test1(<16 x i8> %a, i32 signext %index) { ; CHECK-LE-LABEL: test1: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: vextubrx 3, 5, 2 ; CHECK-LE-NEXT: clrldi 3, 3, 56 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: test1: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: vextublx 3, 5, 2 ; CHECK-BE-NEXT: clrldi 3, 3, 56 ; CHECK-BE-NEXT: blr @@ -21,12 +21,12 @@ entry: define signext i8 @test2(<16 x i8> %a, i32 signext %index) { ; CHECK-LE-LABEL: test2: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: vextubrx 3, 5, 2 ; CHECK-LE-NEXT: extsb 3, 3 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: test2: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: vextublx 3, 5, 2 ; CHECK-BE-NEXT: extsb 3, 3 ; CHECK-BE-NEXT: blr @@ -38,13 +38,13 @@ entry: define zeroext i16 @test3(<8 x i16> %a, i32 signext %index) { ; CHECK-LE-LABEL: test3: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: rlwinm 3, 5, 1, 28, 30 ; CHECK-LE-NEXT: vextuhrx 3, 3, 2 ; CHECK-LE-NEXT: clrldi 3, 3, 48 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: test3: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: rlwinm 3, 5, 1, 28, 30 ; CHECK-BE-NEXT: vextuhlx 3, 3, 2 ; CHECK-BE-NEXT: clrldi 3, 3, 48 @@ -57,13 +57,13 @@ entry: define signext i16 @test4(<8 x i16> %a, i32 signext %index) { ; CHECK-LE-LABEL: test4: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: rlwinm 3, 5, 1, 28, 30 ; CHECK-LE-NEXT: vextuhrx 3, 3, 2 ; CHECK-LE-NEXT: extsh 3, 3 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: test4: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: rlwinm 3, 5, 1, 28, 30 ; CHECK-BE-NEXT: vextuhlx 3, 3, 2 ; CHECK-BE-NEXT: extsh 3, 3 @@ -76,12 +76,12 @@ entry: define zeroext i32 @test5(<4 x i32> %a, i32 signext %index) { ; CHECK-LE-LABEL: test5: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: rlwinm 3, 5, 2, 28, 29 ; CHECK-LE-NEXT: vextuwrx 3, 3, 2 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: test5: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: rlwinm 3, 5, 2, 28, 29 ; CHECK-BE-NEXT: vextuwlx 3, 3, 2 ; CHECK-BE-NEXT: blr @@ -93,13 +93,13 @@ entry: define signext i32 @test6(<4 x i32> %a, i32 signext %index) { ; CHECK-LE-LABEL: test6: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: rlwinm 3, 5, 2, 28, 29 ; CHECK-LE-NEXT: vextuwrx 3, 3, 2 ; CHECK-LE-NEXT: extsw 3, 3 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: test6: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: rlwinm 3, 5, 2, 28, 29 ; CHECK-BE-NEXT: vextuwlx 3, 3, 2 ; CHECK-BE-NEXT: extsw 3, 3 @@ -113,13 +113,13 @@ entry: ; Test with immediate index define zeroext i8 @test7(<16 x i8> %a) { ; CHECK-LE-LABEL: test7: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: li 3, 1 ; CHECK-LE-NEXT: vextubrx 3, 3, 2 ; CHECK-LE-NEXT: clrldi 3, 3, 56 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: test7: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: li 3, 1 ; CHECK-BE-NEXT: vextublx 3, 3, 2 ; CHECK-BE-NEXT: clrldi 3, 3, 56 @@ -132,13 +132,13 @@ entry: define zeroext i16 @test8(<8 x i16> %a) { ; CHECK-LE-LABEL: test8: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: li 3, 2 ; CHECK-LE-NEXT: vextuhrx 3, 3, 2 ; CHECK-LE-NEXT: clrldi 3, 3, 48 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: test8: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: li 3, 2 ; CHECK-BE-NEXT: vextuhlx 3, 3, 2 ; CHECK-BE-NEXT: clrldi 3, 3, 48 @@ -151,12 +151,12 @@ entry: define zeroext i32 @test9(<4 x i32> %a) { ; CHECK-LE-LABEL: test9: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: li 3, 12 ; CHECK-LE-NEXT: vextuwrx 3, 3, 2 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: test9: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: li 3, 12 ; CHECK-BE-NEXT: vextuwlx 3, 3, 2 ; CHECK-BE-NEXT: blr diff --git a/test/CodeGen/PowerPC/vec_extract_p9_2.ll b/test/CodeGen/PowerPC/vec_extract_p9_2.ll index 9734a88fdec..f2ce7924ed9 100644 --- a/test/CodeGen/PowerPC/vec_extract_p9_2.ll +++ b/test/CodeGen/PowerPC/vec_extract_p9_2.ll @@ -4,13 +4,13 @@ define zeroext i8 @test_add1(<16 x i8> %a, i32 signext %index, i8 zeroext %c) { ; CHECK-LE-LABEL: test_add1: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: vextubrx 3, 5, 2 ; CHECK-LE-NEXT: add 3, 3, 6 ; CHECK-LE-NEXT: rlwinm 3, 3, 0, 24, 31 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: test_add1: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: vextublx 3, 5, 2 ; CHECK-BE-NEXT: add 3, 3, 6 ; CHECK-BE-NEXT: rlwinm 3, 3, 0, 24, 31 @@ -26,13 +26,13 @@ entry: define signext i8 @test_add2(<16 x i8> %a, i32 signext %index, i8 signext %c) { ; CHECK-LE-LABEL: test_add2: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: vextubrx 3, 5, 2 ; CHECK-LE-NEXT: add 3, 3, 6 ; CHECK-LE-NEXT: extsb 3, 3 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: test_add2: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: vextublx 3, 5, 2 ; CHECK-BE-NEXT: add 3, 3, 6 ; CHECK-BE-NEXT: extsb 3, 3 @@ -48,14 +48,14 @@ entry: define zeroext i16 @test_add3(<8 x i16> %a, i32 signext %index, i16 zeroext %c) { ; CHECK-LE-LABEL: test_add3: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: rlwinm 3, 5, 1, 28, 30 ; CHECK-LE-NEXT: vextuhrx 3, 3, 2 ; CHECK-LE-NEXT: add 3, 3, 6 ; CHECK-LE-NEXT: rlwinm 3, 3, 0, 16, 31 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: test_add3: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: rlwinm 3, 5, 1, 28, 30 ; CHECK-BE-NEXT: vextuhlx 3, 3, 2 ; CHECK-BE-NEXT: add 3, 3, 6 @@ -72,14 +72,14 @@ entry: define signext i16 @test_add4(<8 x i16> %a, i32 signext %index, i16 signext %c) { ; CHECK-LE-LABEL: test_add4: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: rlwinm 3, 5, 1, 28, 30 ; CHECK-LE-NEXT: vextuhrx 3, 3, 2 ; CHECK-LE-NEXT: add 3, 3, 6 ; CHECK-LE-NEXT: extsh 3, 3 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: test_add4: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: rlwinm 3, 5, 1, 28, 30 ; CHECK-BE-NEXT: vextuhlx 3, 3, 2 ; CHECK-BE-NEXT: add 3, 3, 6 @@ -96,14 +96,14 @@ entry: define zeroext i32 @test_add5(<4 x i32> %a, i32 signext %index, i32 zeroext %c) { ; CHECK-LE-LABEL: test_add5: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: rlwinm 3, 5, 2, 28, 29 ; CHECK-LE-NEXT: vextuwrx 3, 3, 2 ; CHECK-LE-NEXT: add 3, 3, 6 ; CHECK-LE-NEXT: clrldi 3, 3, 32 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: test_add5: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: rlwinm 3, 5, 2, 28, 29 ; CHECK-BE-NEXT: vextuwlx 3, 3, 2 ; CHECK-BE-NEXT: add 3, 3, 6 @@ -117,14 +117,14 @@ entry: define signext i32 @test_add6(<4 x i32> %a, i32 signext %index, i32 signext %c) { ; CHECK-LE-LABEL: test_add6: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: rlwinm 3, 5, 2, 28, 29 ; CHECK-LE-NEXT: vextuwrx 3, 3, 2 ; CHECK-LE-NEXT: add 3, 3, 6 ; CHECK-LE-NEXT: extsw 3, 3 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: test_add6: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: rlwinm 3, 5, 2, 28, 29 ; CHECK-BE-NEXT: vextuwlx 3, 3, 2 ; CHECK-BE-NEXT: add 3, 3, 6 @@ -139,11 +139,11 @@ entry: ; When extracting word element 2 on LE, it's better to use mfvsrwz rather than vextuwrx define zeroext i32 @test7(<4 x i32> %a) { ; CHECK-LE-LABEL: test7: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: mfvsrwz 3, 34 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: test7: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: li 3, 8 ; CHECK-BE-NEXT: vextuwlx 3, 3, 2 ; CHECK-BE-NEXT: blr @@ -154,13 +154,13 @@ entry: define zeroext i32 @testadd_7(<4 x i32> %a, i32 zeroext %c) { ; CHECK-LE-LABEL: testadd_7: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: mfvsrwz 3, 34 ; CHECK-LE-NEXT: add 3, 3, 5 ; CHECK-LE-NEXT: clrldi 3, 3, 32 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: testadd_7: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: li 3, 8 ; CHECK-BE-NEXT: vextuwlx 3, 3, 2 ; CHECK-BE-NEXT: add 3, 3, 5 @@ -174,12 +174,12 @@ entry: define signext i32 @test8(<4 x i32> %a) { ; CHECK-LE-LABEL: test8: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: mfvsrwz 3, 34 ; CHECK-LE-NEXT: extsw 3, 3 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: test8: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: li 3, 8 ; CHECK-BE-NEXT: vextuwlx 3, 3, 2 ; CHECK-BE-NEXT: extsw 3, 3 @@ -191,13 +191,13 @@ entry: define signext i32 @testadd_8(<4 x i32> %a, i32 signext %c) { ; CHECK-LE-LABEL: testadd_8: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: mfvsrwz 3, 34 ; CHECK-LE-NEXT: add 3, 3, 5 ; CHECK-LE-NEXT: extsw 3, 3 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: testadd_8: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: li 3, 8 ; CHECK-BE-NEXT: vextuwlx 3, 3, 2 ; CHECK-BE-NEXT: add 3, 3, 5 @@ -212,13 +212,13 @@ entry: ; When extracting word element 1 on BE, it's better to use mfvsrwz rather than vextuwlx define signext i32 @test9(<4 x i32> %a) { ; CHECK-LE-LABEL: test9: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: li 3, 4 ; CHECK-LE-NEXT: vextuwrx 3, 3, 2 ; CHECK-LE-NEXT: extsw 3, 3 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: test9: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: mfvsrwz 3, 34 ; CHECK-BE-NEXT: extsw 3, 3 ; CHECK-BE-NEXT: blr @@ -229,14 +229,14 @@ entry: define signext i32 @testadd_9(<4 x i32> %a, i32 signext %c) { ; CHECK-LE-LABEL: testadd_9: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: li 3, 4 ; CHECK-LE-NEXT: vextuwrx 3, 3, 2 ; CHECK-LE-NEXT: add 3, 3, 5 ; CHECK-LE-NEXT: extsw 3, 3 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: testadd_9: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: mfvsrwz 3, 34 ; CHECK-BE-NEXT: add 3, 3, 5 ; CHECK-BE-NEXT: extsw 3, 3 diff --git a/test/CodeGen/PowerPC/vec_int_ext.ll b/test/CodeGen/PowerPC/vec_int_ext.ll index d7bed503318..1c86e38d060 100644 --- a/test/CodeGen/PowerPC/vec_int_ext.ll +++ b/test/CodeGen/PowerPC/vec_int_ext.ll @@ -4,11 +4,11 @@ define <4 x i32> @vextsb2wLE(<16 x i8> %a) { ; CHECK-LE-LABEL: vextsb2wLE: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: vextsb2w 2, 2 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: vextsb2wLE: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE: vperm 2, 2, 2, 3 ; CHECK-BE-NEXT: vextsb2w 2, 2 ; CHECK-BE-NEXT: blr @@ -31,11 +31,11 @@ entry: define <2 x i64> @vextsb2dLE(<16 x i8> %a) { ; CHECK-LE-LABEL: vextsb2dLE: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: vextsb2d 2, 2 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: vextsb2dLE: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE: vperm 2, 2, 2, 3 ; CHECK-BE-NEXT: vextsb2d 2, 2 ; CHECK-BE-NEXT: blr @@ -52,11 +52,11 @@ entry: define <4 x i32> @vextsh2wLE(<8 x i16> %a) { ; CHECK-LE-LABEL: vextsh2wLE: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: vextsh2w 2, 2 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: vextsh2wLE: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE: vperm 2, 2, 2, 3 ; CHECK-BE-NEXT: vextsh2w 2, 2 ; CHECK-BE-NEXT: blr @@ -79,11 +79,11 @@ entry: define <2 x i64> @vextsh2dLE(<8 x i16> %a) { ; CHECK-LE-LABEL: vextsh2dLE: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: vextsh2d 2, 2 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: vextsh2dLE: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE: vperm 2, 2, 2, 3 ; CHECK-BE-NEXT: vextsh2d 2, 2 ; CHECK-BE-NEXT: blr @@ -100,11 +100,11 @@ entry: define <2 x i64> @vextsw2dLE(<4 x i32> %a) { ; CHECK-LE-LABEL: vextsw2dLE: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: vextsw2d 2, 2 ; CHECK-LE-NEXT: blr ; CHECK-BE-LABEL: vextsw2dLE: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE: vmrgew ; CHECK-BE-NEXT: vextsw2d 2, 2 ; CHECK-BE-NEXT: blr @@ -121,11 +121,11 @@ entry: define <4 x i32> @vextsb2wBE(<16 x i8> %a) { ; CHECK-BE-LABEL: vextsb2wBE: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: vextsb2w 2, 2 ; CHECK-BE-NEXT: blr ; CHECK-LE-LABEL: vextsb2wBE: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: vsldoi 2, 2, 2, 13 ; CHECK-LE-NEXT: vextsb2w 2, 2 ; CHECK-LE-NEXT: blr @@ -147,11 +147,11 @@ entry: define <2 x i64> @vextsb2dBE(<16 x i8> %a) { ; CHECK-BE-LABEL: vextsb2dBE: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: vextsb2d 2, 2 ; CHECK-BE-NEXT: blr ; CHECK-LE-LABEL: vextsb2dBE: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: vsldoi 2, 2, 2, 9 ; CHECK-LE-NEXT: vextsb2d 2, 2 ; CHECK-LE-NEXT: blr @@ -167,11 +167,11 @@ entry: define <4 x i32> @vextsh2wBE(<8 x i16> %a) { ; CHECK-BE-LABEL: vextsh2wBE: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: vextsh2w 2, 2 ; CHECK-BE-NEXT: blr ; CHECK-LE-LABEL: vextsh2wBE: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: vsldoi 2, 2, 2, 14 ; CHECK-LE-NEXT: vextsh2w 2, 2 ; CHECK-LE-NEXT: blr @@ -193,11 +193,11 @@ entry: define <2 x i64> @vextsh2dBE(<8 x i16> %a) { ; CHECK-BE-LABEL: vextsh2dBE: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: vextsh2d 2, 2 ; CHECK-BE-NEXT: blr ; CHECK-LE-LABEL: vextsh2dBE: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: vsldoi 2, 2, 2, 10 ; CHECK-LE-NEXT: vextsh2d 2, 2 ; CHECK-LE-NEXT: blr @@ -213,11 +213,11 @@ entry: define <2 x i64> @vextsw2dBE(<4 x i32> %a) { ; CHECK-BE-LABEL: vextsw2dBE: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NEXT: vextsw2d 2, 2 ; CHECK-BE-NEXT: blr ; CHECK-LE-LABEL: vextsw2dBE: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NEXT: vsldoi 2, 2, 2, 12 ; CHECK-LE-NEXT: vextsw2d 2, 2 ; CHECK-LE-NEXT: blr @@ -233,11 +233,11 @@ entry: define <2 x i64> @vextDiffVectors(<4 x i32> %a, <4 x i32> %b) { ; CHECK-LE-LABEL: vextDiffVectors: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NOT: vextsw2d ; CHECK-BE-LABEL: vextDiffVectors: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NOT: vextsw2d entry: %vecext = extractelement <4 x i32> %a, i32 0 @@ -252,11 +252,11 @@ entry: define <8 x i16> @testInvalidExtend(<16 x i8> %a) { entry: ; CHECK-LE-LABEL: testInvalidExtend: -; CHECK-LE: # BB#0: # %entry +; CHECK-LE: # %bb.0: # %entry ; CHECK-LE-NOT: vexts ; CHECK-BE-LABEL: testInvalidExtend: -; CHECK-BE: # BB#0: # %entry +; CHECK-BE: # %bb.0: # %entry ; CHECK-BE-NOT: vexts %vecext = extractelement <16 x i8> %a, i32 0 diff --git a/test/CodeGen/PowerPC/vec_revb.ll b/test/CodeGen/PowerPC/vec_revb.ll index c09164bae13..00c08a1204f 100644 --- a/test/CodeGen/PowerPC/vec_revb.ll +++ b/test/CodeGen/PowerPC/vec_revb.ll @@ -3,7 +3,7 @@ define <8 x i16> @testXXBRH(<8 x i16> %a) { ; CHECK-LABEL: testXXBRH: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxbrh 34, 34 ; CHECK-NEXT: blr @@ -16,7 +16,7 @@ entry: define <4 x i32> @testXXBRW(<4 x i32> %a) { ; CHECK-LABEL: testXXBRW: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxbrw 34, 34 ; CHECK-NEXT: blr @@ -29,7 +29,7 @@ entry: define <2 x double> @testXXBRD(<2 x double> %a) { ; CHECK-LABEL: testXXBRD: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxbrd 34, 34 ; CHECK-NEXT: blr @@ -42,7 +42,7 @@ entry: define <1 x i128> @testXXBRQ(<1 x i128> %a) { ; CHECK-LABEL: testXXBRQ: -; CHECK: # BB#0: # %entry +; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxbrq 34, 34 ; CHECK-NEXT: blr diff --git a/test/CodeGen/PowerPC/vselect-constants.ll b/test/CodeGen/PowerPC/vselect-constants.ll index 077eb2defc0..5f23c3e40de 100644 --- a/test/CodeGen/PowerPC/vselect-constants.ll +++ b/test/CodeGen/PowerPC/vselect-constants.ll @@ -9,7 +9,7 @@ define <4 x i32> @sel_C1_or_C2_vec(<4 x i1> %cond) { ; CHECK-LABEL: sel_C1_or_C2_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vspltisw 3, -16 ; CHECK-NEXT: vspltisw 4, 15 ; CHECK-NEXT: addis 3, 2, .LCPI0_0@toc@ha @@ -29,7 +29,7 @@ define <4 x i32> @sel_C1_or_C2_vec(<4 x i1> %cond) { define <4 x i32> @cmp_sel_C1_or_C2_vec(<4 x i32> %x, <4 x i32> %y) { ; CHECK-LABEL: cmp_sel_C1_or_C2_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vcmpequw 2, 2, 3 ; CHECK-NEXT: addis 3, 2, .LCPI1_0@toc@ha ; CHECK-NEXT: addis 4, 2, .LCPI1_1@toc@ha @@ -46,7 +46,7 @@ define <4 x i32> @cmp_sel_C1_or_C2_vec(<4 x i32> %x, <4 x i32> %y) { define <4 x i32> @sel_Cplus1_or_C_vec(<4 x i1> %cond) { ; CHECK-LABEL: sel_Cplus1_or_C_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vspltisw 3, 1 ; CHECK-NEXT: addis 3, 2, .LCPI2_0@toc@ha ; CHECK-NEXT: addi 3, 3, .LCPI2_0@toc@l @@ -60,7 +60,7 @@ define <4 x i32> @sel_Cplus1_or_C_vec(<4 x i1> %cond) { define <4 x i32> @cmp_sel_Cplus1_or_C_vec(<4 x i32> %x, <4 x i32> %y) { ; CHECK-LABEL: cmp_sel_Cplus1_or_C_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vcmpequw 2, 2, 3 ; CHECK-NEXT: addis 3, 2, .LCPI3_0@toc@ha ; CHECK-NEXT: addi 3, 3, .LCPI3_0@toc@l @@ -74,7 +74,7 @@ define <4 x i32> @cmp_sel_Cplus1_or_C_vec(<4 x i32> %x, <4 x i32> %y) { define <4 x i32> @sel_Cminus1_or_C_vec(<4 x i1> %cond) { ; CHECK-LABEL: sel_Cminus1_or_C_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vspltisw 3, -16 ; CHECK-NEXT: vspltisw 4, 15 ; CHECK-NEXT: addis 3, 2, .LCPI4_0@toc@ha @@ -91,7 +91,7 @@ define <4 x i32> @sel_Cminus1_or_C_vec(<4 x i1> %cond) { define <4 x i32> @cmp_sel_Cminus1_or_C_vec(<4 x i32> %x, <4 x i32> %y) { ; CHECK-LABEL: cmp_sel_Cminus1_or_C_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vcmpequw 2, 2, 3 ; CHECK-NEXT: addis 3, 2, .LCPI5_0@toc@ha ; CHECK-NEXT: addi 3, 3, .LCPI5_0@toc@l @@ -105,7 +105,7 @@ define <4 x i32> @cmp_sel_Cminus1_or_C_vec(<4 x i32> %x, <4 x i32> %y) { define <4 x i32> @sel_minus1_or_0_vec(<4 x i1> %cond) { ; CHECK-LABEL: sel_minus1_or_0_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vspltisw 3, -16 ; CHECK-NEXT: vspltisw 4, 15 ; CHECK-NEXT: vsubuwm 3, 4, 3 @@ -118,7 +118,7 @@ define <4 x i32> @sel_minus1_or_0_vec(<4 x i1> %cond) { define <4 x i32> @cmp_sel_minus1_or_0_vec(<4 x i32> %x, <4 x i32> %y) { ; CHECK-LABEL: cmp_sel_minus1_or_0_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vcmpequw 2, 2, 3 ; CHECK-NEXT: blr %cond = icmp eq <4 x i32> %x, %y @@ -128,7 +128,7 @@ define <4 x i32> @cmp_sel_minus1_or_0_vec(<4 x i32> %x, <4 x i32> %y) { define <4 x i32> @sel_0_or_minus1_vec(<4 x i1> %cond) { ; CHECK-LABEL: sel_0_or_minus1_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vspltisw 3, 1 ; CHECK-NEXT: vspltisb 4, -1 ; CHECK-NEXT: xxland 34, 34, 35 @@ -140,7 +140,7 @@ define <4 x i32> @sel_0_or_minus1_vec(<4 x i1> %cond) { define <4 x i32> @cmp_sel_0_or_minus1_vec(<4 x i32> %x, <4 x i32> %y) { ; CHECK-LABEL: cmp_sel_0_or_minus1_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vcmpequw 2, 2, 3 ; CHECK-NEXT: xxlnor 34, 34, 34 ; CHECK-NEXT: blr @@ -151,7 +151,7 @@ define <4 x i32> @cmp_sel_0_or_minus1_vec(<4 x i32> %x, <4 x i32> %y) { define <4 x i32> @sel_1_or_0_vec(<4 x i1> %cond) { ; CHECK-LABEL: sel_1_or_0_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vspltisw 3, 1 ; CHECK-NEXT: xxland 34, 34, 35 ; CHECK-NEXT: blr @@ -161,7 +161,7 @@ define <4 x i32> @sel_1_or_0_vec(<4 x i1> %cond) { define <4 x i32> @cmp_sel_1_or_0_vec(<4 x i32> %x, <4 x i32> %y) { ; CHECK-LABEL: cmp_sel_1_or_0_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vcmpequw 2, 2, 3 ; CHECK-NEXT: vspltisw 19, 1 ; CHECK-NEXT: xxland 34, 34, 51 @@ -173,7 +173,7 @@ define <4 x i32> @cmp_sel_1_or_0_vec(<4 x i32> %x, <4 x i32> %y) { define <4 x i32> @sel_0_or_1_vec(<4 x i1> %cond) { ; CHECK-LABEL: sel_0_or_1_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vspltisw 3, 1 ; CHECK-NEXT: xxlandc 34, 35, 34 ; CHECK-NEXT: blr @@ -183,7 +183,7 @@ define <4 x i32> @sel_0_or_1_vec(<4 x i1> %cond) { define <4 x i32> @cmp_sel_0_or_1_vec(<4 x i32> %x, <4 x i32> %y) { ; CHECK-LABEL: cmp_sel_0_or_1_vec: -; CHECK: # BB#0: +; CHECK: # %bb.0: ; CHECK-NEXT: vcmpequw 2, 2, 3 ; CHECK-NEXT: vspltisw 19, 1 ; CHECK-NEXT: xxlnor 0, 34, 34 diff --git a/test/CodeGen/RISCV/addc-adde-sube-subc.ll b/test/CodeGen/RISCV/addc-adde-sube-subc.ll index 50de47d7c1f..54f5482b9e7 100644 --- a/test/CodeGen/RISCV/addc-adde-sube-subc.ll +++ b/test/CodeGen/RISCV/addc-adde-sube-subc.ll @@ -6,7 +6,7 @@ define i64 @addc_adde(i64 %a, i64 %b) { ; RV32I-LABEL: addc_adde: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: add a1, a1, a3 ; RV32I-NEXT: add a2, a0, a2 ; RV32I-NEXT: sltu a0, a2, a0 @@ -19,7 +19,7 @@ define i64 @addc_adde(i64 %a, i64 %b) { define i64 @subc_sube(i64 %a, i64 %b) { ; RV32I-LABEL: subc_sube: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sub a1, a1, a3 ; RV32I-NEXT: sltu a3, a0, a2 ; RV32I-NEXT: sub a1, a1, a3 diff --git a/test/CodeGen/RISCV/alu32.ll b/test/CodeGen/RISCV/alu32.ll index 9aa6058c2a0..e7c82181027 100644 --- a/test/CodeGen/RISCV/alu32.ll +++ b/test/CodeGen/RISCV/alu32.ll @@ -10,7 +10,7 @@ define i32 @addi(i32 %a) nounwind { ; RV32I-LABEL: addi: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: addi a0, a0, 1 ; RV32I-NEXT: jalr zero, ra, 0 %1 = add i32 %a, 1 @@ -19,7 +19,7 @@ define i32 @addi(i32 %a) nounwind { define i32 @slti(i32 %a) nounwind { ; RV32I-LABEL: slti: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: slti a0, a0, 2 ; RV32I-NEXT: jalr zero, ra, 0 %1 = icmp slt i32 %a, 2 @@ -29,7 +29,7 @@ define i32 @slti(i32 %a) nounwind { define i32 @sltiu(i32 %a) nounwind { ; RV32I-LABEL: sltiu: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sltiu a0, a0, 3 ; RV32I-NEXT: jalr zero, ra, 0 %1 = icmp ult i32 %a, 3 @@ -39,7 +39,7 @@ define i32 @sltiu(i32 %a) nounwind { define i32 @xori(i32 %a) nounwind { ; RV32I-LABEL: xori: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: xori a0, a0, 4 ; RV32I-NEXT: jalr zero, ra, 0 %1 = xor i32 %a, 4 @@ -48,7 +48,7 @@ define i32 @xori(i32 %a) nounwind { define i32 @ori(i32 %a) nounwind { ; RV32I-LABEL: ori: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: ori a0, a0, 5 ; RV32I-NEXT: jalr zero, ra, 0 %1 = or i32 %a, 5 @@ -57,7 +57,7 @@ define i32 @ori(i32 %a) nounwind { define i32 @andi(i32 %a) nounwind { ; RV32I-LABEL: andi: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 6 ; RV32I-NEXT: jalr zero, ra, 0 %1 = and i32 %a, 6 @@ -66,7 +66,7 @@ define i32 @andi(i32 %a) nounwind { define i32 @slli(i32 %a) nounwind { ; RV32I-LABEL: slli: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 7 ; RV32I-NEXT: jalr zero, ra, 0 %1 = shl i32 %a, 7 @@ -75,7 +75,7 @@ define i32 @slli(i32 %a) nounwind { define i32 @srli(i32 %a) nounwind { ; RV32I-LABEL: srli: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: srli a0, a0, 8 ; RV32I-NEXT: jalr zero, ra, 0 %1 = lshr i32 %a, 8 @@ -84,7 +84,7 @@ define i32 @srli(i32 %a) nounwind { define i32 @srai(i32 %a) nounwind { ; RV32I-LABEL: srai: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: srai a0, a0, 9 ; RV32I-NEXT: jalr zero, ra, 0 %1 = ashr i32 %a, 9 @@ -95,7 +95,7 @@ define i32 @srai(i32 %a) nounwind { define i32 @add(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: add: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: jalr zero, ra, 0 %1 = add i32 %a, %b @@ -104,7 +104,7 @@ define i32 @add(i32 %a, i32 %b) nounwind { define i32 @sub(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: sub: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: jalr zero, ra, 0 %1 = sub i32 %a, %b @@ -113,7 +113,7 @@ define i32 @sub(i32 %a, i32 %b) nounwind { define i32 @sll(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: sll: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sll a0, a0, a1 ; RV32I-NEXT: jalr zero, ra, 0 %1 = shl i32 %a, %b @@ -122,7 +122,7 @@ define i32 @sll(i32 %a, i32 %b) nounwind { define i32 @slt(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: slt: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: slt a0, a0, a1 ; RV32I-NEXT: jalr zero, ra, 0 %1 = icmp slt i32 %a, %b @@ -132,7 +132,7 @@ define i32 @slt(i32 %a, i32 %b) nounwind { define i32 @sltu(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: sltu: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sltu a0, a0, a1 ; RV32I-NEXT: jalr zero, ra, 0 %1 = icmp ult i32 %a, %b @@ -142,7 +142,7 @@ define i32 @sltu(i32 %a, i32 %b) nounwind { define i32 @xor(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: xor: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: xor a0, a0, a1 ; RV32I-NEXT: jalr zero, ra, 0 %1 = xor i32 %a, %b @@ -151,7 +151,7 @@ define i32 @xor(i32 %a, i32 %b) nounwind { define i32 @srl(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: srl: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: srl a0, a0, a1 ; RV32I-NEXT: jalr zero, ra, 0 %1 = lshr i32 %a, %b @@ -160,7 +160,7 @@ define i32 @srl(i32 %a, i32 %b) nounwind { define i32 @sra(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: sra: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sra a0, a0, a1 ; RV32I-NEXT: jalr zero, ra, 0 %1 = ashr i32 %a, %b @@ -169,7 +169,7 @@ define i32 @sra(i32 %a, i32 %b) nounwind { define i32 @or(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: or: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: or a0, a0, a1 ; RV32I-NEXT: jalr zero, ra, 0 %1 = or i32 %a, %b @@ -178,7 +178,7 @@ define i32 @or(i32 %a, i32 %b) nounwind { define i32 @and(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: and: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: and a0, a0, a1 ; RV32I-NEXT: jalr zero, ra, 0 %1 = and i32 %a, %b diff --git a/test/CodeGen/RISCV/bare-select.ll b/test/CodeGen/RISCV/bare-select.ll index ec98b6d18b2..a46afe27143 100644 --- a/test/CodeGen/RISCV/bare-select.ll +++ b/test/CodeGen/RISCV/bare-select.ll @@ -4,10 +4,10 @@ define i32 @bare_select(i1 %a, i32 %b, i32 %c) { ; RV32I-LABEL: bare_select: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: bne a0, zero, .LBB0_2 -; RV32I-NEXT: # BB#1: +; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: addi a1, a2, 0 ; RV32I-NEXT: .LBB0_2: ; RV32I-NEXT: addi a0, a1, 0 diff --git a/test/CodeGen/RISCV/blockaddress.ll b/test/CodeGen/RISCV/blockaddress.ll index f51598ff5a7..9eb4e3d404d 100644 --- a/test/CodeGen/RISCV/blockaddress.ll +++ b/test/CodeGen/RISCV/blockaddress.ll @@ -6,7 +6,7 @@ define void @test_blockaddress() nounwind { ; RV32I-LABEL: test_blockaddress: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 0(s0) ; RV32I-NEXT: lui a0, %hi(addr) ; RV32I-NEXT: addi a0, a0, %lo(addr) diff --git a/test/CodeGen/RISCV/branch.ll b/test/CodeGen/RISCV/branch.ll index 194083b07c7..e2593d3309b 100644 --- a/test/CodeGen/RISCV/branch.ll +++ b/test/CodeGen/RISCV/branch.ll @@ -4,7 +4,7 @@ define void @foo(i32 %a, i32 *%b, i1 %c) { ; RV32I-LABEL: foo: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lw a3, 0(a1) ; RV32I-NEXT: beq a3, a0, .LBB0_12 ; RV32I-NEXT: jal zero, .LBB0_1 diff --git a/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll b/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll index 6521f66cf6a..150dfed3573 100644 --- a/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll +++ b/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll @@ -14,7 +14,7 @@ declare i32 @llvm.ctpop.i32(i32) define i16 @test_bswap_i16(i16 %a) nounwind { ; RV32I-LABEL: test_bswap_i16: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lui a1, 4080 ; RV32I-NEXT: addi a1, a1, 0 ; RV32I-NEXT: slli a2, a0, 8 @@ -29,7 +29,7 @@ define i16 @test_bswap_i16(i16 %a) nounwind { define i32 @test_bswap_i32(i32 %a) nounwind { ; RV32I-LABEL: test_bswap_i32: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lui a1, 16 ; RV32I-NEXT: addi a1, a1, -256 ; RV32I-NEXT: srli a2, a0, 8 @@ -50,7 +50,7 @@ define i32 @test_bswap_i32(i32 %a) nounwind { define i64 @test_bswap_i64(i64 %a) nounwind { ; RV32I-LABEL: test_bswap_i64: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lui a2, 16 ; RV32I-NEXT: addi a3, a2, -256 ; RV32I-NEXT: srli a2, a1, 8 @@ -81,7 +81,7 @@ define i64 @test_bswap_i64(i64 %a) nounwind { define i8 @test_cttz_i8(i8 %a) nounwind { ; RV32I-LABEL: test_cttz_i8: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: addi a1, a0, 0 ; RV32I-NEXT: addi a0, zero, 8 @@ -123,7 +123,7 @@ define i8 @test_cttz_i8(i8 %a) nounwind { define i16 @test_cttz_i16(i16 %a) nounwind { ; RV32I-LABEL: test_cttz_i16: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: addi a1, a0, 0 ; RV32I-NEXT: addi a0, zero, 16 @@ -167,7 +167,7 @@ define i16 @test_cttz_i16(i16 %a) nounwind { define i32 @test_cttz_i32(i32 %a) nounwind { ; RV32I-LABEL: test_cttz_i32: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: addi a1, a0, 0 ; RV32I-NEXT: addi a0, zero, 32 @@ -208,7 +208,7 @@ define i32 @test_cttz_i32(i32 %a) nounwind { define i32 @test_ctlz_i32(i32 %a) nounwind { ; RV32I-LABEL: test_ctlz_i32: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: addi a1, a0, 0 ; RV32I-NEXT: addi a0, zero, 32 @@ -257,7 +257,7 @@ define i32 @test_ctlz_i32(i32 %a) nounwind { define i64 @test_cttz_i64(i64 %a) nounwind { ; RV32I-LABEL: test_cttz_i64: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 28(s0) ; RV32I-NEXT: sw s1, 24(s0) ; RV32I-NEXT: sw s2, 20(s0) @@ -311,7 +311,7 @@ define i64 @test_cttz_i64(i64 %a) nounwind { ; RV32I-NEXT: addi a1, s3, 0 ; RV32I-NEXT: jalr ra, s6, 0 ; RV32I-NEXT: bne s2, zero, .LBB7_2 -; RV32I-NEXT: # BB#1: +; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: srli a0, a0, 24 ; RV32I-NEXT: addi s1, a0, 32 ; RV32I-NEXT: .LBB7_2: @@ -332,7 +332,7 @@ define i64 @test_cttz_i64(i64 %a) nounwind { define i8 @test_cttz_i8_zero_undef(i8 %a) nounwind { ; RV32I-LABEL: test_cttz_i8_zero_undef: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: addi a1, a0, -1 ; RV32I-NEXT: xori a0, a0, -1 @@ -367,7 +367,7 @@ define i8 @test_cttz_i8_zero_undef(i8 %a) nounwind { define i16 @test_cttz_i16_zero_undef(i16 %a) nounwind { ; RV32I-LABEL: test_cttz_i16_zero_undef: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: addi a1, a0, -1 ; RV32I-NEXT: xori a0, a0, -1 @@ -402,7 +402,7 @@ define i16 @test_cttz_i16_zero_undef(i16 %a) nounwind { define i32 @test_cttz_i32_zero_undef(i32 %a) nounwind { ; RV32I-LABEL: test_cttz_i32_zero_undef: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: addi a1, a0, -1 ; RV32I-NEXT: xori a0, a0, -1 @@ -437,7 +437,7 @@ define i32 @test_cttz_i32_zero_undef(i32 %a) nounwind { define i64 @test_cttz_i64_zero_undef(i64 %a) nounwind { ; RV32I-LABEL: test_cttz_i64_zero_undef: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 28(s0) ; RV32I-NEXT: sw s1, 24(s0) ; RV32I-NEXT: sw s2, 20(s0) @@ -491,7 +491,7 @@ define i64 @test_cttz_i64_zero_undef(i64 %a) nounwind { ; RV32I-NEXT: addi a1, s3, 0 ; RV32I-NEXT: jalr ra, s6, 0 ; RV32I-NEXT: bne s2, zero, .LBB11_2 -; RV32I-NEXT: # BB#1: +; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: srli a0, a0, 24 ; RV32I-NEXT: addi s1, a0, 32 ; RV32I-NEXT: .LBB11_2: @@ -512,7 +512,7 @@ define i64 @test_cttz_i64_zero_undef(i64 %a) nounwind { define i32 @test_ctpop_i32(i32 %a) nounwind { ; RV32I-LABEL: test_ctpop_i32: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a1, 349525 ; RV32I-NEXT: addi a1, a1, 1365 diff --git a/test/CodeGen/RISCV/calls.ll b/test/CodeGen/RISCV/calls.ll index 8abe5e92a8e..77f61290705 100644 --- a/test/CodeGen/RISCV/calls.ll +++ b/test/CodeGen/RISCV/calls.ll @@ -6,7 +6,7 @@ declare i32 @external_function(i32) define i32 @test_call_external(i32 %a) nounwind { ; RV32I-LABEL: test_call_external: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a1, %hi(external_function) ; RV32I-NEXT: addi a1, a1, %lo(external_function) @@ -19,7 +19,7 @@ define i32 @test_call_external(i32 %a) nounwind { define i32 @defined_function(i32 %a) nounwind { ; RV32I-LABEL: defined_function: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: addi a0, a0, 1 ; RV32I-NEXT: jalr zero, ra, 0 %1 = add i32 %a, 1 @@ -28,7 +28,7 @@ define i32 @defined_function(i32 %a) nounwind { define i32 @test_call_defined(i32 %a) nounwind { ; RV32I-LABEL: test_call_defined: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a1, %hi(defined_function) ; RV32I-NEXT: addi a1, a1, %lo(defined_function) @@ -41,7 +41,7 @@ define i32 @test_call_defined(i32 %a) nounwind { define i32 @test_call_indirect(i32 (i32)* %a, i32 %b) nounwind { ; RV32I-LABEL: test_call_indirect: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: addi a2, a0, 0 ; RV32I-NEXT: addi a0, a1, 0 @@ -57,7 +57,7 @@ define i32 @test_call_indirect(i32 (i32)* %a, i32 %b) nounwind { define fastcc i32 @fastcc_function(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: fastcc_function: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: jalr zero, ra, 0 %1 = add i32 %a, %b @@ -66,7 +66,7 @@ define fastcc i32 @fastcc_function(i32 %a, i32 %b) nounwind { define i32 @test_call_fastcc(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: test_call_fastcc: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: sw s1, 8(s0) ; RV32I-NEXT: addi s1, a0, 0 diff --git a/test/CodeGen/RISCV/div.ll b/test/CodeGen/RISCV/div.ll index 4c0f5de0358..a53c51c94d8 100644 --- a/test/CodeGen/RISCV/div.ll +++ b/test/CodeGen/RISCV/div.ll @@ -4,7 +4,7 @@ define i32 @udiv(i32 %a, i32 %b) { ; RV32I-LABEL: udiv: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a2, %hi(__udivsi3) ; RV32I-NEXT: addi a2, a2, %lo(__udivsi3) @@ -17,7 +17,7 @@ define i32 @udiv(i32 %a, i32 %b) { define i32 @udiv_constant(i32 %a) { ; RV32I-LABEL: udiv_constant: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a1, %hi(__udivsi3) ; RV32I-NEXT: addi a2, a1, %lo(__udivsi3) @@ -31,7 +31,7 @@ define i32 @udiv_constant(i32 %a) { define i32 @udiv_pow2(i32 %a) { ; RV32I-LABEL: udiv_pow2: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: srli a0, a0, 3 ; RV32I-NEXT: jalr zero, ra, 0 %1 = udiv i32 %a, 8 @@ -40,7 +40,7 @@ define i32 @udiv_pow2(i32 %a) { define i64 @udiv64(i64 %a, i64 %b) { ; RV32I-LABEL: udiv64: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a4, %hi(__udivdi3) ; RV32I-NEXT: addi a4, a4, %lo(__udivdi3) @@ -53,7 +53,7 @@ define i64 @udiv64(i64 %a, i64 %b) { define i64 @udiv64_constant(i64 %a) { ; RV32I-LABEL: udiv64_constant: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a2, %hi(__udivdi3) ; RV32I-NEXT: addi a4, a2, %lo(__udivdi3) @@ -68,7 +68,7 @@ define i64 @udiv64_constant(i64 %a) { define i32 @sdiv(i32 %a, i32 %b) { ; RV32I-LABEL: sdiv: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a2, %hi(__divsi3) ; RV32I-NEXT: addi a2, a2, %lo(__divsi3) @@ -81,7 +81,7 @@ define i32 @sdiv(i32 %a, i32 %b) { define i32 @sdiv_constant(i32 %a) { ; RV32I-LABEL: sdiv_constant: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a1, %hi(__divsi3) ; RV32I-NEXT: addi a2, a1, %lo(__divsi3) @@ -95,7 +95,7 @@ define i32 @sdiv_constant(i32 %a) { define i32 @sdiv_pow2(i32 %a) { ; RV32I-LABEL: sdiv_pow2: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: srai a1, a0, 31 ; RV32I-NEXT: srli a1, a1, 29 ; RV32I-NEXT: add a0, a0, a1 @@ -107,7 +107,7 @@ define i32 @sdiv_pow2(i32 %a) { define i64 @sdiv64(i64 %a, i64 %b) { ; RV32I-LABEL: sdiv64: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a4, %hi(__divdi3) ; RV32I-NEXT: addi a4, a4, %lo(__divdi3) @@ -120,7 +120,7 @@ define i64 @sdiv64(i64 %a, i64 %b) { define i64 @sdiv64_constant(i64 %a) { ; RV32I-LABEL: sdiv64_constant: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a2, %hi(__divdi3) ; RV32I-NEXT: addi a4, a2, %lo(__divdi3) diff --git a/test/CodeGen/RISCV/i32-icmp.ll b/test/CodeGen/RISCV/i32-icmp.ll index 4d86ced2584..bc06ec805e9 100644 --- a/test/CodeGen/RISCV/i32-icmp.ll +++ b/test/CodeGen/RISCV/i32-icmp.ll @@ -7,7 +7,7 @@ define i32 @icmp_eq(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: icmp_eq: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: xor a0, a0, a1 ; RV32I-NEXT: sltiu a0, a0, 1 ; RV32I-NEXT: jalr zero, ra, 0 @@ -18,7 +18,7 @@ define i32 @icmp_eq(i32 %a, i32 %b) nounwind { define i32 @icmp_ne(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: icmp_ne: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: xor a0, a0, a1 ; RV32I-NEXT: sltu a0, zero, a0 ; RV32I-NEXT: jalr zero, ra, 0 @@ -29,7 +29,7 @@ define i32 @icmp_ne(i32 %a, i32 %b) nounwind { define i32 @icmp_ugt(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: icmp_ugt: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sltu a0, a1, a0 ; RV32I-NEXT: jalr zero, ra, 0 %1 = icmp ugt i32 %a, %b @@ -39,7 +39,7 @@ define i32 @icmp_ugt(i32 %a, i32 %b) nounwind { define i32 @icmp_uge(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: icmp_uge: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sltu a0, a0, a1 ; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: jalr zero, ra, 0 @@ -50,7 +50,7 @@ define i32 @icmp_uge(i32 %a, i32 %b) nounwind { define i32 @icmp_ult(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: icmp_ult: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sltu a0, a0, a1 ; RV32I-NEXT: jalr zero, ra, 0 %1 = icmp ult i32 %a, %b @@ -60,7 +60,7 @@ define i32 @icmp_ult(i32 %a, i32 %b) nounwind { define i32 @icmp_ule(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: icmp_ule: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sltu a0, a1, a0 ; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: jalr zero, ra, 0 @@ -71,7 +71,7 @@ define i32 @icmp_ule(i32 %a, i32 %b) nounwind { define i32 @icmp_sgt(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: icmp_sgt: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: slt a0, a1, a0 ; RV32I-NEXT: jalr zero, ra, 0 %1 = icmp sgt i32 %a, %b @@ -81,7 +81,7 @@ define i32 @icmp_sgt(i32 %a, i32 %b) nounwind { define i32 @icmp_sge(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: icmp_sge: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: slt a0, a0, a1 ; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: jalr zero, ra, 0 @@ -92,7 +92,7 @@ define i32 @icmp_sge(i32 %a, i32 %b) nounwind { define i32 @icmp_slt(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: icmp_slt: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: slt a0, a0, a1 ; RV32I-NEXT: jalr zero, ra, 0 %1 = icmp slt i32 %a, %b @@ -102,7 +102,7 @@ define i32 @icmp_slt(i32 %a, i32 %b) nounwind { define i32 @icmp_sle(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: icmp_sle: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: slt a0, a1, a0 ; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: jalr zero, ra, 0 diff --git a/test/CodeGen/RISCV/imm.ll b/test/CodeGen/RISCV/imm.ll index c52638da02e..ddefa22835a 100644 --- a/test/CodeGen/RISCV/imm.ll +++ b/test/CodeGen/RISCV/imm.ll @@ -6,7 +6,7 @@ define i32 @zero() nounwind { ; RV32I-LABEL: zero: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: addi a0, zero, 0 ; RV32I-NEXT: jalr zero, ra, 0 ret i32 0 @@ -14,7 +14,7 @@ define i32 @zero() nounwind { define i32 @pos_small() nounwind { ; RV32I-LABEL: pos_small: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: addi a0, zero, 2047 ; RV32I-NEXT: jalr zero, ra, 0 ret i32 2047 @@ -22,7 +22,7 @@ define i32 @pos_small() nounwind { define i32 @neg_small() nounwind { ; RV32I-LABEL: neg_small: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: addi a0, zero, -2048 ; RV32I-NEXT: jalr zero, ra, 0 ret i32 -2048 @@ -30,7 +30,7 @@ define i32 @neg_small() nounwind { define i32 @pos_i32() nounwind { ; RV32I-LABEL: pos_i32: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lui a0, 423811 ; RV32I-NEXT: addi a0, a0, -1297 ; RV32I-NEXT: jalr zero, ra, 0 @@ -39,7 +39,7 @@ define i32 @pos_i32() nounwind { define i32 @neg_i32() nounwind { ; RV32I-LABEL: neg_i32: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lui a0, 912092 ; RV32I-NEXT: addi a0, a0, -273 ; RV32I-NEXT: jalr zero, ra, 0 diff --git a/test/CodeGen/RISCV/indirectbr.ll b/test/CodeGen/RISCV/indirectbr.ll index 0a51e3d0b2e..40641da6d6f 100644 --- a/test/CodeGen/RISCV/indirectbr.ll +++ b/test/CodeGen/RISCV/indirectbr.ll @@ -4,7 +4,7 @@ define i32 @indirectbr(i8* %target) nounwind { ; RV32I-LABEL: indirectbr: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 0(s0) ; RV32I-NEXT: jalr zero, a0, 0 ; RV32I-NEXT: .LBB0_1: # %ret @@ -20,7 +20,7 @@ ret: define i32 @indirectbr_with_offset(i8* %a) nounwind { ; RV32I-LABEL: indirectbr_with_offset: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 0(s0) ; RV32I-NEXT: jalr zero, a0, 1380 ; RV32I-NEXT: .LBB1_1: # %ret diff --git a/test/CodeGen/RISCV/jumptable.ll b/test/CodeGen/RISCV/jumptable.ll index 98144c7c1e6..68f4f1cb721 100644 --- a/test/CodeGen/RISCV/jumptable.ll +++ b/test/CodeGen/RISCV/jumptable.ll @@ -4,7 +4,7 @@ define void @jt(i32 %in, i32* %out) { ; RV32I-LABEL: jt: -; RV32I: # BB#0: # %entry +; RV32I: # %bb.0: # %entry ; RV32I-NEXT: addi a2, zero, 2 ; RV32I-NEXT: blt a2, a0, .LBB0_3 ; RV32I-NEXT: jal zero, .LBB0_1 diff --git a/test/CodeGen/RISCV/mem.ll b/test/CodeGen/RISCV/mem.ll index b06382f8742..6446034e542 100644 --- a/test/CodeGen/RISCV/mem.ll +++ b/test/CodeGen/RISCV/mem.ll @@ -6,7 +6,7 @@ define i32 @lb(i8 *%a) nounwind { ; RV32I-LABEL: lb: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lb a1, 0(a0) ; RV32I-NEXT: lb a0, 1(a0) ; RV32I-NEXT: jalr zero, ra, 0 @@ -20,7 +20,7 @@ define i32 @lb(i8 *%a) nounwind { define i32 @lh(i16 *%a) nounwind { ; RV32I-LABEL: lh: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lh a1, 0(a0) ; RV32I-NEXT: lh a0, 4(a0) ; RV32I-NEXT: jalr zero, ra, 0 @@ -34,7 +34,7 @@ define i32 @lh(i16 *%a) nounwind { define i32 @lw(i32 *%a) nounwind { ; RV32I-LABEL: lw: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lw a1, 0(a0) ; RV32I-NEXT: lw a0, 12(a0) ; RV32I-NEXT: jalr zero, ra, 0 @@ -46,7 +46,7 @@ define i32 @lw(i32 *%a) nounwind { define i32 @lbu(i8 *%a) nounwind { ; RV32I-LABEL: lbu: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lbu a1, 0(a0) ; RV32I-NEXT: lbu a0, 4(a0) ; RV32I-NEXT: add a0, a0, a1 @@ -62,7 +62,7 @@ define i32 @lbu(i8 *%a) nounwind { define i32 @lhu(i16 *%a) nounwind { ; RV32I-LABEL: lhu: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lhu a1, 0(a0) ; RV32I-NEXT: lhu a0, 10(a0) ; RV32I-NEXT: add a0, a0, a1 @@ -80,7 +80,7 @@ define i32 @lhu(i16 *%a) nounwind { define void @sb(i8 *%a, i8 %b) nounwind { ; RV32I-LABEL: sb: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sb a1, 6(a0) ; RV32I-NEXT: sb a1, 0(a0) ; RV32I-NEXT: jalr zero, ra, 0 @@ -92,7 +92,7 @@ define void @sb(i8 *%a, i8 %b) nounwind { define void @sh(i16 *%a, i16 %b) nounwind { ; RV32I-LABEL: sh: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sh a1, 14(a0) ; RV32I-NEXT: sh a1, 0(a0) ; RV32I-NEXT: jalr zero, ra, 0 @@ -104,7 +104,7 @@ define void @sh(i16 *%a, i16 %b) nounwind { define void @sw(i32 *%a, i32 %b) nounwind { ; RV32I-LABEL: sw: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw a1, 32(a0) ; RV32I-NEXT: sw a1, 0(a0) ; RV32I-NEXT: jalr zero, ra, 0 @@ -117,7 +117,7 @@ define void @sw(i32 *%a, i32 %b) nounwind { ; Check load and store to an i1 location define i32 @load_sext_zext_anyext_i1(i1 *%a) nounwind { ; RV32I-LABEL: load_sext_zext_anyext_i1: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lb a1, 0(a0) ; RV32I-NEXT: lbu a1, 1(a0) ; RV32I-NEXT: lbu a0, 2(a0) @@ -139,7 +139,7 @@ define i32 @load_sext_zext_anyext_i1(i1 *%a) nounwind { define i16 @load_sext_zext_anyext_i1_i16(i1 *%a) nounwind { ; RV32I-LABEL: load_sext_zext_anyext_i1_i16: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lb a1, 0(a0) ; RV32I-NEXT: lbu a1, 1(a0) ; RV32I-NEXT: lbu a0, 2(a0) @@ -165,7 +165,7 @@ define i16 @load_sext_zext_anyext_i1_i16(i1 *%a) nounwind { define i32 @lw_sw_global(i32 %a) nounwind { ; TODO: the addi should be folded in to the lw/sw operations ; RV32I-LABEL: lw_sw_global: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lui a1, %hi(G) ; RV32I-NEXT: addi a2, a1, %lo(G) ; RV32I-NEXT: lw a1, 0(a2) @@ -188,7 +188,7 @@ define i32 @lw_sw_global(i32 %a) nounwind { define i32 @lw_sw_constant(i32 %a) nounwind { ; TODO: the addi should be folded in to the lw/sw ; RV32I-LABEL: lw_sw_constant: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lui a1, 912092 ; RV32I-NEXT: addi a2, a1, -273 ; RV32I-NEXT: lw a1, 0(a2) diff --git a/test/CodeGen/RISCV/mul.ll b/test/CodeGen/RISCV/mul.ll index 41653256deb..2eb5db79d1b 100644 --- a/test/CodeGen/RISCV/mul.ll +++ b/test/CodeGen/RISCV/mul.ll @@ -4,7 +4,7 @@ define i32 @square(i32 %a) { ; RV32I-LABEL: square: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a1, %hi(__mulsi3) ; RV32I-NEXT: addi a2, a1, %lo(__mulsi3) @@ -18,7 +18,7 @@ define i32 @square(i32 %a) { define i32 @mul(i32 %a, i32 %b) { ; RV32I-LABEL: mul: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a2, %hi(__mulsi3) ; RV32I-NEXT: addi a2, a2, %lo(__mulsi3) @@ -31,7 +31,7 @@ define i32 @mul(i32 %a, i32 %b) { define i32 @mul_constant(i32 %a) { ; RV32I-LABEL: mul_constant: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a1, %hi(__mulsi3) ; RV32I-NEXT: addi a2, a1, %lo(__mulsi3) @@ -45,7 +45,7 @@ define i32 @mul_constant(i32 %a) { define i32 @mul_pow2(i32 %a) { ; RV32I-LABEL: mul_pow2: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 3 ; RV32I-NEXT: jalr zero, ra, 0 %1 = mul i32 %a, 8 @@ -54,7 +54,7 @@ define i32 @mul_pow2(i32 %a) { define i64 @mul64(i64 %a, i64 %b) { ; RV32I-LABEL: mul64: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a4, %hi(__muldi3) ; RV32I-NEXT: addi a4, a4, %lo(__muldi3) @@ -67,7 +67,7 @@ define i64 @mul64(i64 %a, i64 %b) { define i64 @mul64_constant(i64 %a) { ; RV32I-LABEL: mul64_constant: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a2, %hi(__muldi3) ; RV32I-NEXT: addi a4, a2, %lo(__muldi3) diff --git a/test/CodeGen/RISCV/rem.ll b/test/CodeGen/RISCV/rem.ll index 80f79817b74..c9e2a90521d 100644 --- a/test/CodeGen/RISCV/rem.ll +++ b/test/CodeGen/RISCV/rem.ll @@ -4,7 +4,7 @@ define i32 @urem(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: urem: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a2, %hi(__umodsi3) ; RV32I-NEXT: addi a2, a2, %lo(__umodsi3) @@ -17,7 +17,7 @@ define i32 @urem(i32 %a, i32 %b) nounwind { define i32 @srem(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: srem: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a2, %hi(__modsi3) ; RV32I-NEXT: addi a2, a2, %lo(__modsi3) diff --git a/test/CodeGen/RISCV/rotl-rotr.ll b/test/CodeGen/RISCV/rotl-rotr.ll index bf0689feafa..b2331051fcd 100644 --- a/test/CodeGen/RISCV/rotl-rotr.ll +++ b/test/CodeGen/RISCV/rotl-rotr.ll @@ -7,7 +7,7 @@ define i32 @rotl(i32 %x, i32 %y) { ; RV32I-LABEL: rotl: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: addi a2, zero, 32 ; RV32I-NEXT: sub a2, a2, a1 ; RV32I-NEXT: sll a1, a0, a1 @@ -23,7 +23,7 @@ define i32 @rotl(i32 %x, i32 %y) { define i32 @rotr(i32 %x, i32 %y) { ; RV32I-LABEL: rotr: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: addi a2, zero, 32 ; RV32I-NEXT: sub a2, a2, a1 ; RV32I-NEXT: srl a1, a0, a1 diff --git a/test/CodeGen/RISCV/select-cc.ll b/test/CodeGen/RISCV/select-cc.ll index c1a570c5c98..ddc5983525e 100644 --- a/test/CodeGen/RISCV/select-cc.ll +++ b/test/CodeGen/RISCV/select-cc.ll @@ -4,55 +4,55 @@ define i32 @foo(i32 %a, i32 *%b) { ; RV32I-LABEL: foo: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lw a2, 0(a1) ; RV32I-NEXT: beq a0, a2, .LBB0_2 -; RV32I-NEXT: # BB#1: +; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: addi a0, a2, 0 ; RV32I-NEXT: .LBB0_2: ; RV32I-NEXT: lw a2, 0(a1) ; RV32I-NEXT: bne a0, a2, .LBB0_4 -; RV32I-NEXT: # BB#3: +; RV32I-NEXT: # %bb.3: ; RV32I-NEXT: addi a0, a2, 0 ; RV32I-NEXT: .LBB0_4: ; RV32I-NEXT: lw a2, 0(a1) ; RV32I-NEXT: bltu a2, a0, .LBB0_6 -; RV32I-NEXT: # BB#5: +; RV32I-NEXT: # %bb.5: ; RV32I-NEXT: addi a0, a2, 0 ; RV32I-NEXT: .LBB0_6: ; RV32I-NEXT: lw a2, 0(a1) ; RV32I-NEXT: bgeu a0, a2, .LBB0_8 -; RV32I-NEXT: # BB#7: +; RV32I-NEXT: # %bb.7: ; RV32I-NEXT: addi a0, a2, 0 ; RV32I-NEXT: .LBB0_8: ; RV32I-NEXT: lw a2, 0(a1) ; RV32I-NEXT: bltu a0, a2, .LBB0_10 -; RV32I-NEXT: # BB#9: +; RV32I-NEXT: # %bb.9: ; RV32I-NEXT: addi a0, a2, 0 ; RV32I-NEXT: .LBB0_10: ; RV32I-NEXT: lw a2, 0(a1) ; RV32I-NEXT: bgeu a2, a0, .LBB0_12 -; RV32I-NEXT: # BB#11: +; RV32I-NEXT: # %bb.11: ; RV32I-NEXT: addi a0, a2, 0 ; RV32I-NEXT: .LBB0_12: ; RV32I-NEXT: lw a2, 0(a1) ; RV32I-NEXT: blt a2, a0, .LBB0_14 -; RV32I-NEXT: # BB#13: +; RV32I-NEXT: # %bb.13: ; RV32I-NEXT: addi a0, a2, 0 ; RV32I-NEXT: .LBB0_14: ; RV32I-NEXT: lw a2, 0(a1) ; RV32I-NEXT: bge a0, a2, .LBB0_16 -; RV32I-NEXT: # BB#15: +; RV32I-NEXT: # %bb.15: ; RV32I-NEXT: addi a0, a2, 0 ; RV32I-NEXT: .LBB0_16: ; RV32I-NEXT: lw a2, 0(a1) ; RV32I-NEXT: blt a0, a2, .LBB0_18 -; RV32I-NEXT: # BB#17: +; RV32I-NEXT: # %bb.17: ; RV32I-NEXT: addi a0, a2, 0 ; RV32I-NEXT: .LBB0_18: ; RV32I-NEXT: lw a1, 0(a1) ; RV32I-NEXT: bge a1, a0, .LBB0_20 -; RV32I-NEXT: # BB#19: +; RV32I-NEXT: # %bb.19: ; RV32I-NEXT: addi a0, a1, 0 ; RV32I-NEXT: .LBB0_20: ; RV32I-NEXT: jalr zero, ra, 0 diff --git a/test/CodeGen/RISCV/sext-zext-trunc.ll b/test/CodeGen/RISCV/sext-zext-trunc.ll index 7c5f1205b76..80bd2d2b204 100644 --- a/test/CodeGen/RISCV/sext-zext-trunc.ll +++ b/test/CodeGen/RISCV/sext-zext-trunc.ll @@ -4,7 +4,7 @@ define i8 @sext_i1_to_i8(i1 %a) { ; RV32I-LABEL: sext_i1_to_i8: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: sub a0, zero, a0 ; RV32I-NEXT: jalr zero, ra, 0 @@ -14,7 +14,7 @@ define i8 @sext_i1_to_i8(i1 %a) { define i16 @sext_i1_to_i16(i1 %a) { ; RV32I-LABEL: sext_i1_to_i16: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: sub a0, zero, a0 ; RV32I-NEXT: jalr zero, ra, 0 @@ -24,7 +24,7 @@ define i16 @sext_i1_to_i16(i1 %a) { define i32 @sext_i1_to_i32(i1 %a) { ; RV32I-LABEL: sext_i1_to_i32: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: sub a0, zero, a0 ; RV32I-NEXT: jalr zero, ra, 0 @@ -34,7 +34,7 @@ define i32 @sext_i1_to_i32(i1 %a) { define i64 @sext_i1_to_i64(i1 %a) { ; RV32I-LABEL: sext_i1_to_i64: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: sub a0, zero, a0 ; RV32I-NEXT: addi a1, a0, 0 @@ -45,7 +45,7 @@ define i64 @sext_i1_to_i64(i1 %a) { define i16 @sext_i8_to_i16(i8 %a) { ; RV32I-LABEL: sext_i8_to_i16: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 24 ; RV32I-NEXT: srai a0, a0, 24 ; RV32I-NEXT: jalr zero, ra, 0 @@ -55,7 +55,7 @@ define i16 @sext_i8_to_i16(i8 %a) { define i32 @sext_i8_to_i32(i8 %a) { ; RV32I-LABEL: sext_i8_to_i32: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 24 ; RV32I-NEXT: srai a0, a0, 24 ; RV32I-NEXT: jalr zero, ra, 0 @@ -65,7 +65,7 @@ define i32 @sext_i8_to_i32(i8 %a) { define i64 @sext_i8_to_i64(i8 %a) { ; RV32I-LABEL: sext_i8_to_i64: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: slli a1, a0, 24 ; RV32I-NEXT: srai a0, a1, 24 ; RV32I-NEXT: srai a1, a1, 31 @@ -76,7 +76,7 @@ define i64 @sext_i8_to_i64(i8 %a) { define i32 @sext_i16_to_i32(i16 %a) { ; RV32I-LABEL: sext_i16_to_i32: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 16 ; RV32I-NEXT: srai a0, a0, 16 ; RV32I-NEXT: jalr zero, ra, 0 @@ -86,7 +86,7 @@ define i32 @sext_i16_to_i32(i16 %a) { define i64 @sext_i16_to_i64(i16 %a) { ; RV32I-LABEL: sext_i16_to_i64: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: slli a1, a0, 16 ; RV32I-NEXT: srai a0, a1, 16 ; RV32I-NEXT: srai a1, a1, 31 @@ -97,7 +97,7 @@ define i64 @sext_i16_to_i64(i16 %a) { define i64 @sext_i32_to_i64(i32 %a) { ; RV32I-LABEL: sext_i32_to_i64: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: srai a1, a0, 31 ; RV32I-NEXT: jalr zero, ra, 0 %1 = sext i32 %a to i64 @@ -106,7 +106,7 @@ define i64 @sext_i32_to_i64(i32 %a) { define i8 @zext_i1_to_i8(i1 %a) { ; RV32I-LABEL: zext_i1_to_i8: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: jalr zero, ra, 0 %1 = zext i1 %a to i8 @@ -115,7 +115,7 @@ define i8 @zext_i1_to_i8(i1 %a) { define i16 @zext_i1_to_i16(i1 %a) { ; RV32I-LABEL: zext_i1_to_i16: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: jalr zero, ra, 0 %1 = zext i1 %a to i16 @@ -124,7 +124,7 @@ define i16 @zext_i1_to_i16(i1 %a) { define i32 @zext_i1_to_i32(i1 %a) { ; RV32I-LABEL: zext_i1_to_i32: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: jalr zero, ra, 0 %1 = zext i1 %a to i32 @@ -133,7 +133,7 @@ define i32 @zext_i1_to_i32(i1 %a) { define i64 @zext_i1_to_i64(i1 %a) { ; RV32I-LABEL: zext_i1_to_i64: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: addi a1, zero, 0 ; RV32I-NEXT: jalr zero, ra, 0 @@ -143,7 +143,7 @@ define i64 @zext_i1_to_i64(i1 %a) { define i16 @zext_i8_to_i16(i8 %a) { ; RV32I-LABEL: zext_i8_to_i16: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 255 ; RV32I-NEXT: jalr zero, ra, 0 %1 = zext i8 %a to i16 @@ -152,7 +152,7 @@ define i16 @zext_i8_to_i16(i8 %a) { define i32 @zext_i8_to_i32(i8 %a) { ; RV32I-LABEL: zext_i8_to_i32: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 255 ; RV32I-NEXT: jalr zero, ra, 0 %1 = zext i8 %a to i32 @@ -161,7 +161,7 @@ define i32 @zext_i8_to_i32(i8 %a) { define i64 @zext_i8_to_i64(i8 %a) { ; RV32I-LABEL: zext_i8_to_i64: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 255 ; RV32I-NEXT: addi a1, zero, 0 ; RV32I-NEXT: jalr zero, ra, 0 @@ -171,7 +171,7 @@ define i64 @zext_i8_to_i64(i8 %a) { define i32 @zext_i16_to_i32(i16 %a) { ; RV32I-LABEL: zext_i16_to_i32: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lui a1, 16 ; RV32I-NEXT: addi a1, a1, -1 ; RV32I-NEXT: and a0, a0, a1 @@ -182,7 +182,7 @@ define i32 @zext_i16_to_i32(i16 %a) { define i64 @zext_i16_to_i64(i16 %a) { ; RV32I-LABEL: zext_i16_to_i64: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lui a1, 16 ; RV32I-NEXT: addi a1, a1, -1 ; RV32I-NEXT: and a0, a0, a1 @@ -194,7 +194,7 @@ define i64 @zext_i16_to_i64(i16 %a) { define i64 @zext_i32_to_i64(i32 %a) { ; RV32I-LABEL: zext_i32_to_i64: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: addi a1, zero, 0 ; RV32I-NEXT: jalr zero, ra, 0 %1 = zext i32 %a to i64 @@ -206,7 +206,7 @@ define i64 @zext_i32_to_i64(i32 %a) { define i1 @trunc_i8_to_i1(i8 %a) { ; RV32I-LABEL: trunc_i8_to_i1: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: jalr zero, ra, 0 %1 = trunc i8 %a to i1 ret i1 %1 @@ -214,7 +214,7 @@ define i1 @trunc_i8_to_i1(i8 %a) { define i1 @trunc_i16_to_i1(i16 %a) { ; RV32I-LABEL: trunc_i16_to_i1: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: jalr zero, ra, 0 %1 = trunc i16 %a to i1 ret i1 %1 @@ -222,7 +222,7 @@ define i1 @trunc_i16_to_i1(i16 %a) { define i1 @trunc_i32_to_i1(i32 %a) { ; RV32I-LABEL: trunc_i32_to_i1: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: jalr zero, ra, 0 %1 = trunc i32 %a to i1 ret i1 %1 @@ -230,7 +230,7 @@ define i1 @trunc_i32_to_i1(i32 %a) { define i1 @trunc_i64_to_i1(i64 %a) { ; RV32I-LABEL: trunc_i64_to_i1: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: jalr zero, ra, 0 %1 = trunc i64 %a to i1 ret i1 %1 @@ -238,7 +238,7 @@ define i1 @trunc_i64_to_i1(i64 %a) { define i8 @trunc_i16_to_i8(i16 %a) { ; RV32I-LABEL: trunc_i16_to_i8: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: jalr zero, ra, 0 %1 = trunc i16 %a to i8 ret i8 %1 @@ -246,7 +246,7 @@ define i8 @trunc_i16_to_i8(i16 %a) { define i8 @trunc_i32_to_i8(i32 %a) { ; RV32I-LABEL: trunc_i32_to_i8: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: jalr zero, ra, 0 %1 = trunc i32 %a to i8 ret i8 %1 @@ -254,7 +254,7 @@ define i8 @trunc_i32_to_i8(i32 %a) { define i8 @trunc_i64_to_i8(i64 %a) { ; RV32I-LABEL: trunc_i64_to_i8: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: jalr zero, ra, 0 %1 = trunc i64 %a to i8 ret i8 %1 @@ -262,7 +262,7 @@ define i8 @trunc_i64_to_i8(i64 %a) { define i16 @trunc_i32_to_i16(i32 %a) { ; RV32I-LABEL: trunc_i32_to_i16: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: jalr zero, ra, 0 %1 = trunc i32 %a to i16 ret i16 %1 @@ -270,7 +270,7 @@ define i16 @trunc_i32_to_i16(i32 %a) { define i16 @trunc_i64_to_i16(i64 %a) { ; RV32I-LABEL: trunc_i64_to_i16: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: jalr zero, ra, 0 %1 = trunc i64 %a to i16 ret i16 %1 @@ -278,7 +278,7 @@ define i16 @trunc_i64_to_i16(i64 %a) { define i32 @trunc_i64_to_i32(i64 %a) { ; RV32I-LABEL: trunc_i64_to_i32: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: jalr zero, ra, 0 %1 = trunc i64 %a to i32 ret i32 %1 diff --git a/test/CodeGen/RISCV/shifts.ll b/test/CodeGen/RISCV/shifts.ll index d773a6ad62a..c4033c574ef 100644 --- a/test/CodeGen/RISCV/shifts.ll +++ b/test/CodeGen/RISCV/shifts.ll @@ -7,7 +7,7 @@ define i64 @lshr64(i64 %a, i64 %b) nounwind { ; RV32I-LABEL: lshr64: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a3, %hi(__lshrdi3) ; RV32I-NEXT: addi a3, a3, %lo(__lshrdi3) @@ -20,7 +20,7 @@ define i64 @lshr64(i64 %a, i64 %b) nounwind { define i64 @ashr64(i64 %a, i64 %b) nounwind { ; RV32I-LABEL: ashr64: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a3, %hi(__ashrdi3) ; RV32I-NEXT: addi a3, a3, %lo(__ashrdi3) @@ -33,7 +33,7 @@ define i64 @ashr64(i64 %a, i64 %b) nounwind { define i64 @shl64(i64 %a, i64 %b) nounwind { ; RV32I-LABEL: shl64: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: sw ra, 12(s0) ; RV32I-NEXT: lui a3, %hi(__ashldi3) ; RV32I-NEXT: addi a3, a3, %lo(__ashldi3) diff --git a/test/CodeGen/RISCV/wide-mem.ll b/test/CodeGen/RISCV/wide-mem.ll index 18ab52aaf13..cbb89f631a5 100644 --- a/test/CodeGen/RISCV/wide-mem.ll +++ b/test/CodeGen/RISCV/wide-mem.ll @@ -6,7 +6,7 @@ define i64 @load_i64(i64 *%a) nounwind { ; RV32I-LABEL: load_i64: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: lw a1, 4(a0) ; RV32I-NEXT: addi a0, a2, 0 @@ -21,7 +21,7 @@ define i64 @load_i64(i64 *%a) nounwind { ; generate two addi define i64 @load_i64_global() nounwind { ; RV32I-LABEL: load_i64_global: -; RV32I: # BB#0: +; RV32I: # %bb.0: ; RV32I-NEXT: lui a0, %hi(val64) ; RV32I-NEXT: addi a0, a0, %lo(val64) ; RV32I-NEXT: lw a0, 0(a0) diff --git a/test/CodeGen/SPARC/analyze-branch.ll b/test/CodeGen/SPARC/analyze-branch.ll index 7d2096033a0..c39dde5a2b8 100644 --- a/test/CodeGen/SPARC/analyze-branch.ll +++ b/test/CodeGen/SPARC/analyze-branch.ll @@ -18,7 +18,7 @@ define void @test_Bcc_fallthrough_taken(i32 %in) nounwind { ; CHECK: cmp {{%[goli][0-9]+}}, 42 ; CHECK: bne [[FALSE:.LBB[0-9]+_[0-9]+]] ; CHECK-NEXT: nop -; CHECK-NEXT: ! BB# +; CHECK-NEXT: ! %bb. ; CHECK-NEXT: call test_true ; CHECK: [[FALSE]]: @@ -42,7 +42,7 @@ define void @test_Bcc_fallthrough_nottaken(i32 %in) nounwind { ; CHECK: be [[TRUE:.LBB[0-9]+_[0-9]+]] ; CHECK-NEXT: nop -; CHECK-NEXT: ! BB# +; CHECK-NEXT: ! %bb. ; CHECK-NEXT: call test_false ; CHECK: [[TRUE]]: diff --git a/test/CodeGen/SPARC/vector-extract-elt.ll b/test/CodeGen/SPARC/vector-extract-elt.ll index 702f063bfcc..47f39d5b9fb 100644 --- a/test/CodeGen/SPARC/vector-extract-elt.ll +++ b/test/CodeGen/SPARC/vector-extract-elt.ll @@ -5,7 +5,7 @@ ; look-thru for extractelement then we we know that the add will yield a ; non-negative result. define i1 @test1(<4 x i16>* %in) { -; CHECK-LABEL: ! BB#0: +; CHECK-LABEL: ! %bb.0: ; CHECK-NEXT: retl ; CHECK-NEXT: sethi 0, %o0 %vec2 = load <4 x i16>, <4 x i16>* %in, align 1 diff --git a/test/CodeGen/SystemZ/DAGCombiner_isAlias.ll b/test/CodeGen/SystemZ/DAGCombiner_isAlias.ll index 8c31f073276..a42f625a536 100644 --- a/test/CodeGen/SystemZ/DAGCombiner_isAlias.ll +++ b/test/CodeGen/SystemZ/DAGCombiner_isAlias.ll @@ -9,7 +9,7 @@ ; store i1 true, i1* %g_717.sink.i, align 4 ; %.b = load i1, i1* @g_2, align 4 -; CHECK: # BB#6: # %crc32_gentab.exit +; CHECK: # %bb.6: # %crc32_gentab.exit ; CHECK: larl %r2, g_2 ; CHECK-NEXT: llc %r3, 0(%r2) ; CHECK-NOT: %r2 diff --git a/test/CodeGen/SystemZ/dag-combine-02.ll b/test/CodeGen/SystemZ/dag-combine-02.ll index b20133facb8..2d96aafb938 100644 --- a/test/CodeGen/SystemZ/dag-combine-02.ll +++ b/test/CodeGen/SystemZ/dag-combine-02.ll @@ -93,7 +93,7 @@ define signext i32 @main(i32 signext, i8** nocapture readonly) local_unnamed_add br i1 %60, label %61, label %13 ;