2017-10-11 00:33:29 +02:00
|
|
|
//===- SelectionDAGDumper.cpp - Implement SelectionDAG::dump() ------------===//
|
2012-03-13 06:47:27 +01:00
|
|
|
//
|
2019-01-19 09:50:56 +01:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2012-03-13 06:47:27 +01:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This implements the SelectionDAG::dump method and friends.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2017-10-11 00:33:29 +02:00
|
|
|
#include "llvm/ADT/APFloat.h"
|
|
|
|
#include "llvm/ADT/APInt.h"
|
|
|
|
#include "llvm/ADT/None.h"
|
|
|
|
#include "llvm/ADT/SmallPtrSet.h"
|
2012-12-03 17:50:05 +01:00
|
|
|
#include "llvm/ADT/StringExtras.h"
|
2017-10-11 00:33:29 +02:00
|
|
|
#include "llvm/CodeGen/ISDOpcodes.h"
|
|
|
|
#include "llvm/CodeGen/MachineBasicBlock.h"
|
2012-03-13 06:47:27 +01:00
|
|
|
#include "llvm/CodeGen/MachineConstantPool.h"
|
2017-10-11 00:33:29 +02:00
|
|
|
#include "llvm/CodeGen/MachineMemOperand.h"
|
2017-06-06 13:49:48 +02:00
|
|
|
#include "llvm/CodeGen/SelectionDAG.h"
|
2017-10-11 00:33:29 +02:00
|
|
|
#include "llvm/CodeGen/SelectionDAGNodes.h"
|
2017-11-08 02:01:31 +01:00
|
|
|
#include "llvm/CodeGen/TargetInstrInfo.h"
|
2017-11-17 02:07:10 +01:00
|
|
|
#include "llvm/CodeGen/TargetLowering.h"
|
|
|
|
#include "llvm/CodeGen/TargetRegisterInfo.h"
|
|
|
|
#include "llvm/CodeGen/TargetSubtargetInfo.h"
|
2018-03-29 19:21:10 +02:00
|
|
|
#include "llvm/CodeGen/ValueTypes.h"
|
2018-04-30 16:59:11 +02:00
|
|
|
#include "llvm/Config/llvm-config.h"
|
2017-10-11 00:33:29 +02:00
|
|
|
#include "llvm/IR/BasicBlock.h"
|
|
|
|
#include "llvm/IR/Constants.h"
|
|
|
|
#include "llvm/IR/DebugInfoMetadata.h"
|
|
|
|
#include "llvm/IR/DebugLoc.h"
|
2013-01-02 12:36:10 +01:00
|
|
|
#include "llvm/IR/Function.h"
|
|
|
|
#include "llvm/IR/Intrinsics.h"
|
2018-03-14 22:52:13 +01:00
|
|
|
#include "llvm/IR/ModuleSlotTracker.h"
|
2017-10-11 00:33:29 +02:00
|
|
|
#include "llvm/IR/Value.h"
|
|
|
|
#include "llvm/Support/Casting.h"
|
|
|
|
#include "llvm/Support/CommandLine.h"
|
|
|
|
#include "llvm/Support/Compiler.h"
|
2012-12-03 17:50:05 +01:00
|
|
|
#include "llvm/Support/Debug.h"
|
2017-10-11 00:33:29 +02:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2018-03-24 00:58:25 +01:00
|
|
|
#include "llvm/Support/MachineValueType.h"
|
2015-12-04 02:31:59 +01:00
|
|
|
#include "llvm/Support/Printable.h"
|
2012-12-03 17:50:05 +01:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2012-03-13 06:47:27 +01:00
|
|
|
#include "llvm/Target/TargetIntrinsicInfo.h"
|
|
|
|
#include "llvm/Target/TargetMachine.h"
|
2018-09-14 19:08:02 +02:00
|
|
|
#include "SDNodeDbgValue.h"
|
2017-10-11 00:33:29 +02:00
|
|
|
#include <cstdint>
|
|
|
|
#include <iterator>
|
|
|
|
|
2012-03-13 06:47:27 +01:00
|
|
|
using namespace llvm;
|
|
|
|
|
2015-09-18 19:57:28 +02:00
|
|
|
static cl::opt<bool>
|
|
|
|
VerboseDAGDumping("dag-dump-verbose", cl::Hidden,
|
|
|
|
cl::desc("Display more information when dumping selection "
|
|
|
|
"DAG nodes."));
|
|
|
|
|
2012-03-13 06:47:27 +01:00
|
|
|
std::string SDNode::getOperationName(const SelectionDAG *G) const {
|
|
|
|
switch (getOpcode()) {
|
|
|
|
default:
|
|
|
|
if (getOpcode() < ISD::BUILTIN_OP_END)
|
|
|
|
return "<<Unknown DAG Node>>";
|
|
|
|
if (isMachineOpcode()) {
|
|
|
|
if (G)
|
2014-08-05 04:39:49 +02:00
|
|
|
if (const TargetInstrInfo *TII = G->getSubtarget().getInstrInfo())
|
2012-03-13 06:47:27 +01:00
|
|
|
if (getMachineOpcode() < TII->getNumOpcodes())
|
2020-01-28 20:23:46 +01:00
|
|
|
return std::string(TII->getName(getMachineOpcode()));
|
2012-03-13 06:47:27 +01:00
|
|
|
return "<<Unknown Machine Node #" + utostr(getOpcode()) + ">>";
|
|
|
|
}
|
|
|
|
if (G) {
|
|
|
|
const TargetLowering &TLI = G->getTargetLoweringInfo();
|
|
|
|
const char *Name = TLI.getTargetNodeName(getOpcode());
|
|
|
|
if (Name) return Name;
|
|
|
|
return "<<Unknown Target Node #" + utostr(getOpcode()) + ">>";
|
|
|
|
}
|
|
|
|
return "<<Unknown Node #" + utostr(getOpcode()) + ">>";
|
|
|
|
|
|
|
|
#ifndef NDEBUG
|
|
|
|
case ISD::DELETED_NODE: return "<<Deleted Node!>>";
|
|
|
|
#endif
|
|
|
|
case ISD::PREFETCH: return "Prefetch";
|
|
|
|
case ISD::ATOMIC_FENCE: return "AtomicFence";
|
|
|
|
case ISD::ATOMIC_CMP_SWAP: return "AtomicCmpSwap";
|
IR: add "cmpxchg weak" variant to support permitted failure.
This commit adds a weak variant of the cmpxchg operation, as described
in C++11. A cmpxchg instruction with this modifier is permitted to
fail to store, even if the comparison indicated it should.
As a result, cmpxchg instructions must return a flag indicating
success in addition to their original iN value loaded. Thus, for
uniformity *all* cmpxchg instructions now return "{ iN, i1 }". The
second flag is 1 when the store succeeded.
At the DAG level, a new ATOMIC_CMP_SWAP_WITH_SUCCESS node has been
added as the natural representation for the new cmpxchg instructions.
It is a strong cmpxchg.
By default this gets Expanded to the existing ATOMIC_CMP_SWAP during
Legalization, so existing backends should see no change in behaviour.
If they wish to deal with the enhanced node instead, they can call
setOperationAction on it. Beware: as a node with 2 results, it cannot
be selected from TableGen.
Currently, no use is made of the extra information provided in this
patch. Test updates are almost entirely adapting the input IR to the
new scheme.
Summary for out of tree users:
------------------------------
+ Legacy Bitcode files are upgraded during read.
+ Legacy assembly IR files will be invalid.
+ Front-ends must adapt to different type for "cmpxchg".
+ Backends should be unaffected by default.
llvm-svn: 210903
2014-06-13 16:24:07 +02:00
|
|
|
case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: return "AtomicCmpSwapWithSuccess";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::ATOMIC_SWAP: return "AtomicSwap";
|
|
|
|
case ISD::ATOMIC_LOAD_ADD: return "AtomicLoadAdd";
|
|
|
|
case ISD::ATOMIC_LOAD_SUB: return "AtomicLoadSub";
|
|
|
|
case ISD::ATOMIC_LOAD_AND: return "AtomicLoadAnd";
|
2018-02-12 18:03:11 +01:00
|
|
|
case ISD::ATOMIC_LOAD_CLR: return "AtomicLoadClr";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::ATOMIC_LOAD_OR: return "AtomicLoadOr";
|
|
|
|
case ISD::ATOMIC_LOAD_XOR: return "AtomicLoadXor";
|
|
|
|
case ISD::ATOMIC_LOAD_NAND: return "AtomicLoadNand";
|
|
|
|
case ISD::ATOMIC_LOAD_MIN: return "AtomicLoadMin";
|
|
|
|
case ISD::ATOMIC_LOAD_MAX: return "AtomicLoadMax";
|
|
|
|
case ISD::ATOMIC_LOAD_UMIN: return "AtomicLoadUMin";
|
|
|
|
case ISD::ATOMIC_LOAD_UMAX: return "AtomicLoadUMax";
|
2019-01-22 19:36:06 +01:00
|
|
|
case ISD::ATOMIC_LOAD_FADD: return "AtomicLoadFAdd";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::ATOMIC_LOAD: return "AtomicLoad";
|
|
|
|
case ISD::ATOMIC_STORE: return "AtomicStore";
|
|
|
|
case ISD::PCMARKER: return "PCMarker";
|
|
|
|
case ISD::READCYCLECOUNTER: return "ReadCycleCounter";
|
|
|
|
case ISD::SRCVALUE: return "SrcValue";
|
|
|
|
case ISD::MDNODE_SDNODE: return "MDNode";
|
|
|
|
case ISD::EntryToken: return "EntryToken";
|
|
|
|
case ISD::TokenFactor: return "TokenFactor";
|
|
|
|
case ISD::AssertSext: return "AssertSext";
|
|
|
|
case ISD::AssertZext: return "AssertZext";
|
[SDAG] Add new AssertAlign ISD node.
Summary:
- AssertAlign node records the guaranteed alignment on its source node,
where these alignments are retrieved from alignment attributes in LLVM
IR. These tracked alignments could help DAG combining and lowering
generating efficient code.
- In this patch, the basic support of AssertAlign node is added. So far,
we only generate AssertAlign nodes on return values from intrinsic
calls.
- Addressing selection in AMDGPU is revised accordingly to capture the
new (base + offset) patterns.
Reviewers: arsenm, bogner
Subscribers: jvesely, wdng, nhaehnle, tpr, hiraditya, kerbowa, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D81711
2020-05-25 05:20:57 +02:00
|
|
|
case ISD::AssertAlign: return "AssertAlign";
|
2012-03-13 06:47:27 +01:00
|
|
|
|
|
|
|
case ISD::BasicBlock: return "BasicBlock";
|
|
|
|
case ISD::VALUETYPE: return "ValueType";
|
|
|
|
case ISD::Register: return "Register";
|
|
|
|
case ISD::RegisterMask: return "RegisterMask";
|
2014-01-25 03:02:55 +01:00
|
|
|
case ISD::Constant:
|
|
|
|
if (cast<ConstantSDNode>(this)->isOpaque())
|
|
|
|
return "OpaqueConstant";
|
|
|
|
return "Constant";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::ConstantFP: return "ConstantFP";
|
|
|
|
case ISD::GlobalAddress: return "GlobalAddress";
|
|
|
|
case ISD::GlobalTLSAddress: return "GlobalTLSAddress";
|
|
|
|
case ISD::FrameIndex: return "FrameIndex";
|
|
|
|
case ISD::JumpTable: return "JumpTable";
|
|
|
|
case ISD::GLOBAL_OFFSET_TABLE: return "GLOBAL_OFFSET_TABLE";
|
|
|
|
case ISD::RETURNADDR: return "RETURNADDR";
|
2016-10-13 00:13:19 +02:00
|
|
|
case ISD::ADDROFRETURNADDR: return "ADDROFRETURNADDR";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::FRAMEADDR: return "FRAMEADDR";
|
[COFF, ARM64] Implement Intrinsic.sponentry for AArch64
Summary: This patch adds Intrinsic.sponentry. This intrinsic is required to correctly support setjmp for AArch64 Windows platform.
Patch by: Yin Ma (yinma@codeaurora.org)
Reviewers: mgrang, ssijaric, eli.friedman, TomTan, mstorsjo, rnk, compnerd, efriedma
Reviewed By: efriedma
Subscribers: efriedma, javed.absar, kristof.beyls, chrib, llvm-commits
Differential Revision: https://reviews.llvm.org/D53996
llvm-svn: 345909
2018-11-02 00:22:25 +01:00
|
|
|
case ISD::SPONENTRY: return "SPONENTRY";
|
2016-06-19 14:37:52 +02:00
|
|
|
case ISD::LOCAL_RECOVER: return "LOCAL_RECOVER";
|
2014-05-06 18:51:25 +02:00
|
|
|
case ISD::READ_REGISTER: return "READ_REGISTER";
|
|
|
|
case ISD::WRITE_REGISTER: return "WRITE_REGISTER";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::FRAME_TO_ARGS_OFFSET: return "FRAME_TO_ARGS_OFFSET";
|
Add ISD::EH_DWARF_CFA, simplify @llvm.eh.dwarf.cfa on Mips, fix on PowerPC
LLVM has an @llvm.eh.dwarf.cfa intrinsic, used to lower the GCC-compatible
__builtin_dwarf_cfa() builtin. As pointed out in PR26761, this is currently
broken on PowerPC (and likely on ARM as well). Currently, @llvm.eh.dwarf.cfa is
lowered using:
ADD(FRAMEADDR, FRAME_TO_ARGS_OFFSET)
where FRAME_TO_ARGS_OFFSET defaults to the constant zero. On x86,
FRAME_TO_ARGS_OFFSET is lowered to 2*SlotSize. This setup, however, does not
work for PowerPC. Because of the way that the stack layout works, the canonical
frame address is not exactly (FRAMEADDR + FRAME_TO_ARGS_OFFSET) on PowerPC
(there is a lower save-area offset as well), so it is not just a matter of
implementing FRAME_TO_ARGS_OFFSET for PowerPC (unless we redefine its
semantics -- We can do that, since it is currently used only for
@llvm.eh.dwarf.cfa lowering, but the better to directly lower the CFA construct
itself (since it can be easily represented as a fixed-offset FrameIndex)). Mips
currently does this, but by using a custom lowering for ADD that specifically
recognizes the (FRAMEADDR, FRAME_TO_ARGS_OFFSET) pattern.
This change introduces a ISD::EH_DWARF_CFA node, which by default expands using
the existing logic, but can be directly lowered by the target. Mips is updated
to use this method (which simplifies its implementation, and I suspect makes it
more robust), and updates PowerPC to do the same.
Fixes PR26761.
Differential Revision: https://reviews.llvm.org/D24038
llvm-svn: 280350
2016-09-01 12:28:47 +02:00
|
|
|
case ISD::EH_DWARF_CFA: return "EH_DWARF_CFA";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::EH_RETURN: return "EH_RETURN";
|
|
|
|
case ISD::EH_SJLJ_SETJMP: return "EH_SJLJ_SETJMP";
|
|
|
|
case ISD::EH_SJLJ_LONGJMP: return "EH_SJLJ_LONGJMP";
|
2015-07-17 00:34:16 +02:00
|
|
|
case ISD::EH_SJLJ_SETUP_DISPATCH: return "EH_SJLJ_SETUP_DISPATCH";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::ConstantPool: return "ConstantPool";
|
2012-08-08 00:37:05 +02:00
|
|
|
case ISD::TargetIndex: return "TargetIndex";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::ExternalSymbol: return "ExternalSymbol";
|
|
|
|
case ISD::BlockAddress: return "BlockAddress";
|
|
|
|
case ISD::INTRINSIC_WO_CHAIN:
|
|
|
|
case ISD::INTRINSIC_VOID:
|
|
|
|
case ISD::INTRINSIC_W_CHAIN: {
|
|
|
|
unsigned OpNo = getOpcode() == ISD::INTRINSIC_WO_CHAIN ? 0 : 1;
|
|
|
|
unsigned IID = cast<ConstantSDNode>(getOperand(OpNo))->getZExtValue();
|
|
|
|
if (IID < Intrinsic::num_intrinsics)
|
2021-06-14 14:52:29 +02:00
|
|
|
return Intrinsic::getBaseName((Intrinsic::ID)IID).str();
|
2019-01-18 21:06:13 +01:00
|
|
|
else if (!G)
|
|
|
|
return "Unknown intrinsic";
|
2012-03-13 06:47:27 +01:00
|
|
|
else if (const TargetIntrinsicInfo *TII = G->getTarget().getIntrinsicInfo())
|
|
|
|
return TII->getName(IID);
|
|
|
|
llvm_unreachable("Invalid intrinsic ID");
|
|
|
|
}
|
|
|
|
|
|
|
|
case ISD::BUILD_VECTOR: return "BUILD_VECTOR";
|
2014-01-25 03:02:55 +01:00
|
|
|
case ISD::TargetConstant:
|
|
|
|
if (cast<ConstantSDNode>(this)->isOpaque())
|
|
|
|
return "OpaqueTargetConstant";
|
|
|
|
return "TargetConstant";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::TargetConstantFP: return "TargetConstantFP";
|
|
|
|
case ISD::TargetGlobalAddress: return "TargetGlobalAddress";
|
|
|
|
case ISD::TargetGlobalTLSAddress: return "TargetGlobalTLSAddress";
|
|
|
|
case ISD::TargetFrameIndex: return "TargetFrameIndex";
|
|
|
|
case ISD::TargetJumpTable: return "TargetJumpTable";
|
|
|
|
case ISD::TargetConstantPool: return "TargetConstantPool";
|
|
|
|
case ISD::TargetExternalSymbol: return "TargetExternalSymbol";
|
2015-06-22 19:46:53 +02:00
|
|
|
case ISD::MCSymbol: return "MCSymbol";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::TargetBlockAddress: return "TargetBlockAddress";
|
|
|
|
|
|
|
|
case ISD::CopyToReg: return "CopyToReg";
|
|
|
|
case ISD::CopyFromReg: return "CopyFromReg";
|
|
|
|
case ISD::UNDEF: return "undef";
|
Add support for (expressing) vscale.
In LLVM IR, vscale can be represented with an intrinsic. For some targets,
this is equivalent to the constexpr:
getelementptr <vscale x 1 x i8>, <vscale x 1 x i8>* null, i32 1
This can be used to propagate the value in CodeGenPrepare.
In ISel we add a node that can be legalized to one or more
instructions to materialize the runtime vector length.
This patch also adds SVE CodeGen support for VSCALE, which maps this
node to RDVL instructions (for scaled multiples of 16bytes) or CNT[HSD]
instructions (scaled multiples of 2, 4, or 8 bytes, respectively).
Reviewers: rengolin, cameron.mcinally, hfinkel, sebpop, SjoerdMeijer, efriedma, lattner
Reviewed by: efriedma
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68203
2020-01-21 11:20:27 +01:00
|
|
|
case ISD::VSCALE: return "vscale";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::MERGE_VALUES: return "merge_values";
|
|
|
|
case ISD::INLINEASM: return "inlineasm";
|
2019-02-08 21:48:56 +01:00
|
|
|
case ISD::INLINEASM_BR: return "inlineasm_br";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::EH_LABEL: return "eh_label";
|
2019-05-15 23:46:05 +02:00
|
|
|
case ISD::ANNOTATION_LABEL: return "annotation_label";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::HANDLENODE: return "handlenode";
|
|
|
|
|
|
|
|
// Unary operators
|
|
|
|
case ISD::FABS: return "fabs";
|
2014-10-22 01:01:01 +02:00
|
|
|
case ISD::FMINNUM: return "fminnum";
|
2018-10-30 22:01:29 +01:00
|
|
|
case ISD::STRICT_FMINNUM: return "strict_fminnum";
|
2014-10-22 01:01:01 +02:00
|
|
|
case ISD::FMAXNUM: return "fmaxnum";
|
2018-10-30 22:01:29 +01:00
|
|
|
case ISD::STRICT_FMAXNUM: return "strict_fmaxnum";
|
2018-10-22 18:27:27 +02:00
|
|
|
case ISD::FMINNUM_IEEE: return "fminnum_ieee";
|
|
|
|
case ISD::FMAXNUM_IEEE: return "fmaxnum_ieee";
|
[NFC] Rename minnan and maxnan to minimum and maximum
Summary:
Changes all uses of minnan/maxnan to minimum/maximum
globally. These names emphasize that the semantic difference between
these operations is more than just NaN-propagation.
Reviewers: arsenm, aheejin, dschuff, javed.absar
Subscribers: jholewinski, sdardis, wdng, sbc100, jgravelle-google, jrtc27, atanasyan, llvm-commits
Differential Revision: https://reviews.llvm.org/D53112
llvm-svn: 345218
2018-10-25 00:49:55 +02:00
|
|
|
case ISD::FMINIMUM: return "fminimum";
|
2019-12-18 21:33:10 +01:00
|
|
|
case ISD::STRICT_FMINIMUM: return "strict_fminimum";
|
[NFC] Rename minnan and maxnan to minimum and maximum
Summary:
Changes all uses of minnan/maxnan to minimum/maximum
globally. These names emphasize that the semantic difference between
these operations is more than just NaN-propagation.
Reviewers: arsenm, aheejin, dschuff, javed.absar
Subscribers: jholewinski, sdardis, wdng, sbc100, jgravelle-google, jrtc27, atanasyan, llvm-commits
Differential Revision: https://reviews.llvm.org/D53112
llvm-svn: 345218
2018-10-25 00:49:55 +02:00
|
|
|
case ISD::FMAXIMUM: return "fmaximum";
|
2019-12-18 21:33:10 +01:00
|
|
|
case ISD::STRICT_FMAXIMUM: return "strict_fmaximum";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::FNEG: return "fneg";
|
|
|
|
case ISD::FSQRT: return "fsqrt";
|
2018-02-06 23:28:15 +01:00
|
|
|
case ISD::STRICT_FSQRT: return "strict_fsqrt";
|
[DAGCombiner] try to convert pow(x, 1/3) to cbrt(x)
This is a follow-up suggested in D51630 and originally proposed as an IR transform in D49040.
Copying the motivational statement by @evandro from that patch:
"This transformation helps some benchmarks in SPEC CPU2000 and CPU2006, such as 188.ammp,
447.dealII, 453.povray, and especially 300.twolf, as well as some proprietary benchmarks.
Otherwise, no regressions on x86-64 or A64."
I'm proposing to add only the minimum support for a DAG node here. Since we don't have an
LLVM IR intrinsic for cbrt, and there are no other DAG ways to create a FCBRT node yet, I
don't think we need to worry about DAG builder, legalization, a strict variant, etc. We
should be able to expand as needed when adding more functionality/transforms. For reference,
these are transform suggestions currently listed in SimplifyLibCalls.cpp:
// * cbrt(expN(X)) -> expN(x/3)
// * cbrt(sqrt(x)) -> pow(x,1/6)
// * cbrt(cbrt(x)) -> pow(x,1/9)
Also, given that we bail out on long double for now, there should not be any logical
differences between platforms (unless there's some platform out there that has pow()
but not cbrt()).
Differential Revision: https://reviews.llvm.org/D51753
llvm-svn: 342348
2018-09-16 18:50:26 +02:00
|
|
|
case ISD::FCBRT: return "fcbrt";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::FSIN: return "fsin";
|
2018-02-06 23:28:15 +01:00
|
|
|
case ISD::STRICT_FSIN: return "strict_fsin";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::FCOS: return "fcos";
|
2018-02-06 23:28:15 +01:00
|
|
|
case ISD::STRICT_FCOS: return "strict_fcos";
|
2013-01-29 03:32:37 +01:00
|
|
|
case ISD::FSINCOS: return "fsincos";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::FTRUNC: return "ftrunc";
|
2018-11-05 16:59:49 +01:00
|
|
|
case ISD::STRICT_FTRUNC: return "strict_ftrunc";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::FFLOOR: return "ffloor";
|
2018-11-05 16:59:49 +01:00
|
|
|
case ISD::STRICT_FFLOOR: return "strict_ffloor";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::FCEIL: return "fceil";
|
2018-11-05 16:59:49 +01:00
|
|
|
case ISD::STRICT_FCEIL: return "strict_fceil";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::FRINT: return "frint";
|
2018-02-06 23:28:15 +01:00
|
|
|
case ISD::STRICT_FRINT: return "strict_frint";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::FNEARBYINT: return "fnearbyint";
|
2018-02-06 23:28:15 +01:00
|
|
|
case ISD::STRICT_FNEARBYINT: return "strict_fnearbyint";
|
2013-08-08 00:49:12 +02:00
|
|
|
case ISD::FROUND: return "fround";
|
2018-11-05 16:59:49 +01:00
|
|
|
case ISD::STRICT_FROUND: return "strict_fround";
|
2020-05-26 14:24:05 +02:00
|
|
|
case ISD::FROUNDEVEN: return "froundeven";
|
|
|
|
case ISD::STRICT_FROUNDEVEN: return "strict_froundeven";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::FEXP: return "fexp";
|
2018-02-06 23:28:15 +01:00
|
|
|
case ISD::STRICT_FEXP: return "strict_fexp";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::FEXP2: return "fexp2";
|
2018-02-06 23:28:15 +01:00
|
|
|
case ISD::STRICT_FEXP2: return "strict_fexp2";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::FLOG: return "flog";
|
2018-02-06 23:28:15 +01:00
|
|
|
case ISD::STRICT_FLOG: return "strict_flog";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::FLOG2: return "flog2";
|
2018-02-06 23:28:15 +01:00
|
|
|
case ISD::STRICT_FLOG2: return "strict_flog2";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::FLOG10: return "flog10";
|
2018-02-06 23:28:15 +01:00
|
|
|
case ISD::STRICT_FLOG10: return "strict_flog10";
|
2012-03-13 06:47:27 +01:00
|
|
|
|
|
|
|
// Binary operators
|
|
|
|
case ISD::ADD: return "add";
|
|
|
|
case ISD::SUB: return "sub";
|
|
|
|
case ISD::MUL: return "mul";
|
|
|
|
case ISD::MULHU: return "mulhu";
|
|
|
|
case ISD::MULHS: return "mulhs";
|
2021-06-26 20:34:16 +02:00
|
|
|
case ISD::ABDS: return "abds";
|
|
|
|
case ISD::ABDU: return "abdu";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::SDIV: return "sdiv";
|
|
|
|
case ISD::UDIV: return "udiv";
|
|
|
|
case ISD::SREM: return "srem";
|
|
|
|
case ISD::UREM: return "urem";
|
|
|
|
case ISD::SMUL_LOHI: return "smul_lohi";
|
|
|
|
case ISD::UMUL_LOHI: return "umul_lohi";
|
|
|
|
case ISD::SDIVREM: return "sdivrem";
|
|
|
|
case ISD::UDIVREM: return "udivrem";
|
|
|
|
case ISD::AND: return "and";
|
|
|
|
case ISD::OR: return "or";
|
|
|
|
case ISD::XOR: return "xor";
|
|
|
|
case ISD::SHL: return "shl";
|
|
|
|
case ISD::SRA: return "sra";
|
|
|
|
case ISD::SRL: return "srl";
|
|
|
|
case ISD::ROTL: return "rotl";
|
|
|
|
case ISD::ROTR: return "rotr";
|
2018-12-05 12:12:12 +01:00
|
|
|
case ISD::FSHL: return "fshl";
|
|
|
|
case ISD::FSHR: return "fshr";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::FADD: return "fadd";
|
2018-02-06 23:28:15 +01:00
|
|
|
case ISD::STRICT_FADD: return "strict_fadd";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::FSUB: return "fsub";
|
2018-02-06 23:28:15 +01:00
|
|
|
case ISD::STRICT_FSUB: return "strict_fsub";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::FMUL: return "fmul";
|
2018-02-06 23:28:15 +01:00
|
|
|
case ISD::STRICT_FMUL: return "strict_fmul";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::FDIV: return "fdiv";
|
2018-02-06 23:28:15 +01:00
|
|
|
case ISD::STRICT_FDIV: return "strict_fdiv";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::FMA: return "fma";
|
2018-02-06 23:28:15 +01:00
|
|
|
case ISD::STRICT_FMA: return "strict_fma";
|
2015-02-20 23:10:33 +01:00
|
|
|
case ISD::FMAD: return "fmad";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::FREM: return "frem";
|
2018-02-06 23:28:15 +01:00
|
|
|
case ISD::STRICT_FREM: return "strict_frem";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::FCOPYSIGN: return "fcopysign";
|
|
|
|
case ISD::FGETSIGN: return "fgetsign";
|
2016-04-14 03:42:16 +02:00
|
|
|
case ISD::FCANONICALIZE: return "fcanonicalize";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::FPOW: return "fpow";
|
2018-02-06 23:28:15 +01:00
|
|
|
case ISD::STRICT_FPOW: return "strict_fpow";
|
2015-05-15 11:03:15 +02:00
|
|
|
case ISD::SMIN: return "smin";
|
|
|
|
case ISD::SMAX: return "smax";
|
|
|
|
case ISD::UMIN: return "umin";
|
|
|
|
case ISD::UMAX: return "umax";
|
2012-03-13 06:47:27 +01:00
|
|
|
|
|
|
|
case ISD::FPOWI: return "fpowi";
|
2018-02-06 23:28:15 +01:00
|
|
|
case ISD::STRICT_FPOWI: return "strict_fpowi";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::SETCC: return "setcc";
|
2017-06-01 13:14:17 +02:00
|
|
|
case ISD::SETCCCARRY: return "setcccarry";
|
[FPEnv] Constrained FCmp intrinsics
This adds support for constrained floating-point comparison intrinsics.
Specifically, we add:
declare <ty2>
@llvm.experimental.constrained.fcmp(<type> <op1>, <type> <op2>,
metadata <condition code>,
metadata <exception behavior>)
declare <ty2>
@llvm.experimental.constrained.fcmps(<type> <op1>, <type> <op2>,
metadata <condition code>,
metadata <exception behavior>)
The first variant implements an IEEE "quiet" comparison (i.e. we only
get an invalid FP exception if either argument is a SNaN), while the
second variant implements an IEEE "signaling" comparison (i.e. we get
an invalid FP exception if either argument is any NaN).
The condition code is implemented as a metadata string. The same set
of predicates as for the fcmp instruction is supported (except for the
"true" and "false" predicates).
These new intrinsics are mapped by SelectionDAG codegen onto two new
ISD opcodes, ISD::STRICT_FSETCC and ISD::STRICT_FSETCCS, again
representing quiet vs. signaling comparison operations. Otherwise
those nodes look like SETCC nodes, with an additional chain argument
and result as usual for strict FP nodes. The patch includes support
for the common legalization operations for those nodes.
The patch also includes full SystemZ back-end support for the new
ISD nodes, mapping them to all available SystemZ instruction to
fully implement strict semantics (scalar and vector).
Differential Revision: https://reviews.llvm.org/D69281
2019-12-06 11:30:04 +01:00
|
|
|
case ISD::STRICT_FSETCC: return "strict_fsetcc";
|
|
|
|
case ISD::STRICT_FSETCCS: return "strict_fsetccs";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::SELECT: return "select";
|
|
|
|
case ISD::VSELECT: return "vselect";
|
|
|
|
case ISD::SELECT_CC: return "select_cc";
|
|
|
|
case ISD::INSERT_VECTOR_ELT: return "insert_vector_elt";
|
|
|
|
case ISD::EXTRACT_VECTOR_ELT: return "extract_vector_elt";
|
|
|
|
case ISD::CONCAT_VECTORS: return "concat_vectors";
|
|
|
|
case ISD::INSERT_SUBVECTOR: return "insert_subvector";
|
|
|
|
case ISD::EXTRACT_SUBVECTOR: return "extract_subvector";
|
|
|
|
case ISD::SCALAR_TO_VECTOR: return "scalar_to_vector";
|
|
|
|
case ISD::VECTOR_SHUFFLE: return "vector_shuffle";
|
[IR] Introduce llvm.experimental.vector.splice intrinsic
This patch introduces a new intrinsic @llvm.experimental.vector.splice
that constructs a vector of the same type as the two input vectors,
based on a immediate where the sign of the immediate distinguishes two
variants. A positive immediate specifies an index into the first vector
and a negative immediate specifies the number of trailing elements to
extract from the first vector.
For example:
@llvm.experimental.vector.splice(<A,B,C,D>, <E,F,G,H>, 1) ==> <B, C, D, E> ; index
@llvm.experimental.vector.splice(<A,B,C,D>, <E,F,G,H>, -3) ==> <B, C, D, E> ; trailing element count
These intrinsics support both fixed and scalable vectors, where the
former is lowered to a shufflevector to maintain existing behaviour,
although while marked as experimental the recommended way to express
this operation for fixed-width vectors is to use shufflevector. For
scalable vectors where it is not possible to express a shufflevector
mask for this operation, a new ISD node has been implemented.
This is one of the named shufflevector intrinsics proposed on the
mailing-list in the RFC at [1].
Patch by Paul Walker and Cullen Rhodes.
[1] https://lists.llvm.org/pipermail/llvm-dev/2020-November/146864.html
Reviewed By: sdesmalen
Differential Revision: https://reviews.llvm.org/D94708
2021-01-08 15:06:13 +01:00
|
|
|
case ISD::VECTOR_SPLICE: return "vector_splice";
|
2019-10-18 13:48:35 +02:00
|
|
|
case ISD::SPLAT_VECTOR: return "splat_vector";
|
2021-03-10 18:46:16 +01:00
|
|
|
case ISD::SPLAT_VECTOR_PARTS: return "splat_vector_parts";
|
2021-01-15 17:46:42 +01:00
|
|
|
case ISD::VECTOR_REVERSE: return "vector_reverse";
|
[IR][SVE] Add new llvm.experimental.stepvector intrinsic
This patch adds a new llvm.experimental.stepvector intrinsic,
which takes no arguments and returns a linear integer sequence of
values of the form <0, 1, ...>. It is primarily intended for
scalable vectors, although it will work for fixed width vectors
too. It is intended that later patches will make use of this
new intrinsic when vectorising induction variables, currently only
supported for fixed width. I've added a new CreateStepVector
method to the IRBuilder, which will generate a call to this
intrinsic for scalable vectors and fall back on creating a
ConstantVector for fixed width.
For scalable vectors this intrinsic is lowered to a new ISD node
called STEP_VECTOR, which takes a single constant integer argument
as the step. During lowering this argument is set to a value of 1.
The reason for this additional argument at the codegen level is
because in future patches we will introduce various generic DAG
combines such as
mul step_vector(1), 2 -> step_vector(2)
add step_vector(1), step_vector(1) -> step_vector(2)
shl step_vector(1), 1 -> step_vector(2)
etc.
that encourage a canonical format for all targets. This hopefully
means all other targets supporting scalable vectors can benefit
from this too.
I've added cost model tests for both fixed width and scalable
vectors:
llvm/test/Analysis/CostModel/AArch64/neon-stepvector.ll
llvm/test/Analysis/CostModel/AArch64/sve-stepvector.ll
as well as codegen lowering tests for fixed width and scalable
vectors:
llvm/test/CodeGen/AArch64/neon-stepvector.ll
llvm/test/CodeGen/AArch64/sve-stepvector.ll
See this thread for discussion of the intrinsic:
https://lists.llvm.org/pipermail/llvm-dev/2021-January/147943.html
2021-02-08 16:46:24 +01:00
|
|
|
case ISD::STEP_VECTOR: return "step_vector";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::CARRY_FALSE: return "carry_false";
|
|
|
|
case ISD::ADDC: return "addc";
|
|
|
|
case ISD::ADDE: return "adde";
|
2017-04-30 21:24:09 +02:00
|
|
|
case ISD::ADDCARRY: return "addcarry";
|
2020-10-13 08:18:22 +02:00
|
|
|
case ISD::SADDO_CARRY: return "saddo_carry";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::SADDO: return "saddo";
|
|
|
|
case ISD::UADDO: return "uaddo";
|
|
|
|
case ISD::SSUBO: return "ssubo";
|
|
|
|
case ISD::USUBO: return "usubo";
|
|
|
|
case ISD::SMULO: return "smulo";
|
|
|
|
case ISD::UMULO: return "umulo";
|
|
|
|
case ISD::SUBC: return "subc";
|
|
|
|
case ISD::SUBE: return "sube";
|
2017-04-30 21:24:09 +02:00
|
|
|
case ISD::SUBCARRY: return "subcarry";
|
2020-10-13 08:18:22 +02:00
|
|
|
case ISD::SSUBO_CARRY: return "ssubo_carry";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::SHL_PARTS: return "shl_parts";
|
|
|
|
case ISD::SRA_PARTS: return "sra_parts";
|
|
|
|
case ISD::SRL_PARTS: return "srl_parts";
|
|
|
|
|
2018-10-16 19:35:41 +02:00
|
|
|
case ISD::SADDSAT: return "saddsat";
|
2018-10-23 01:08:40 +02:00
|
|
|
case ISD::UADDSAT: return "uaddsat";
|
2018-10-29 17:54:37 +01:00
|
|
|
case ISD::SSUBSAT: return "ssubsat";
|
|
|
|
case ISD::USUBSAT: return "usubsat";
|
[Intrinsic] Add sshl.sat/ushl.sat, saturated shift intrinsics.
Summary:
This patch adds two intrinsics, llvm.sshl.sat and llvm.ushl.sat,
which perform signed and unsigned saturating left shift,
respectively.
These are useful for implementing the Embedded-C fixed point
support in Clang, originally discussed in
http://lists.llvm.org/pipermail/llvm-dev/2018-August/125433.html
and
http://lists.llvm.org/pipermail/cfe-dev/2018-May/058019.html
Reviewers: leonardchan, craig.topper, bjope, jdoerfert
Subscribers: hiraditya, jdoerfert, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D83216
2020-07-16 17:02:04 +02:00
|
|
|
case ISD::SSHLSAT: return "sshlsat";
|
|
|
|
case ISD::USHLSAT: return "ushlsat";
|
2019-05-21 21:17:19 +02:00
|
|
|
|
2018-12-12 07:29:14 +01:00
|
|
|
case ISD::SMULFIX: return "smulfix";
|
2019-05-21 21:17:19 +02:00
|
|
|
case ISD::SMULFIXSAT: return "smulfixsat";
|
2019-02-04 18:18:11 +01:00
|
|
|
case ISD::UMULFIX: return "umulfix";
|
[Intrinsic] Add the llvm.umul.fix.sat intrinsic
Summary:
Add an intrinsic that takes 2 unsigned integers with
the scale of them provided as the third argument and
performs fixed point multiplication on them. The
result is saturated and clamped between the largest and
smallest representable values of the first 2 operands.
This is a part of implementing fixed point arithmetic
in clang where some of the more complex operations
will be implemented as intrinsics.
Patch by: leonardchan, bjope
Reviewers: RKSimon, craig.topper, bevinh, leonardchan, lebedev.ri, spatel
Reviewed By: leonardchan
Subscribers: ychen, wuzish, nemanjai, MaskRay, jsji, jdoerfert, Ka-Ka, hiraditya, rjmccall, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D57836
llvm-svn: 371308
2019-09-07 14:16:14 +02:00
|
|
|
case ISD::UMULFIXSAT: return "umulfixsat";
|
2018-10-16 19:35:41 +02:00
|
|
|
|
2020-01-08 15:05:03 +01:00
|
|
|
case ISD::SDIVFIX: return "sdivfix";
|
2019-12-16 15:25:52 +01:00
|
|
|
case ISD::SDIVFIXSAT: return "sdivfixsat";
|
2020-01-08 15:05:03 +01:00
|
|
|
case ISD::UDIVFIX: return "udivfix";
|
2019-12-16 15:25:52 +01:00
|
|
|
case ISD::UDIVFIXSAT: return "udivfixsat";
|
2020-01-08 15:05:03 +01:00
|
|
|
|
2012-03-13 06:47:27 +01:00
|
|
|
// Conversion operators.
|
|
|
|
case ISD::SIGN_EXTEND: return "sign_extend";
|
|
|
|
case ISD::ZERO_EXTEND: return "zero_extend";
|
|
|
|
case ISD::ANY_EXTEND: return "any_extend";
|
|
|
|
case ISD::SIGN_EXTEND_INREG: return "sign_extend_inreg";
|
2014-07-10 14:32:32 +02:00
|
|
|
case ISD::ANY_EXTEND_VECTOR_INREG: return "any_extend_vector_inreg";
|
|
|
|
case ISD::SIGN_EXTEND_VECTOR_INREG: return "sign_extend_vector_inreg";
|
2014-07-09 12:58:18 +02:00
|
|
|
case ISD::ZERO_EXTEND_VECTOR_INREG: return "zero_extend_vector_inreg";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::TRUNCATE: return "truncate";
|
|
|
|
case ISD::FP_ROUND: return "fp_round";
|
2019-05-13 15:23:30 +02:00
|
|
|
case ISD::STRICT_FP_ROUND: return "strict_fp_round";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::FP_EXTEND: return "fp_extend";
|
2019-05-13 15:23:30 +02:00
|
|
|
case ISD::STRICT_FP_EXTEND: return "strict_fp_extend";
|
2012-03-13 06:47:27 +01:00
|
|
|
|
|
|
|
case ISD::SINT_TO_FP: return "sint_to_fp";
|
2019-10-16 21:24:47 +02:00
|
|
|
case ISD::STRICT_SINT_TO_FP: return "strict_sint_to_fp";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::UINT_TO_FP: return "uint_to_fp";
|
2019-10-16 21:24:47 +02:00
|
|
|
case ISD::STRICT_UINT_TO_FP: return "strict_uint_to_fp";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::FP_TO_SINT: return "fp_to_sint";
|
2019-08-28 18:33:36 +02:00
|
|
|
case ISD::STRICT_FP_TO_SINT: return "strict_fp_to_sint";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::FP_TO_UINT: return "fp_to_uint";
|
2019-08-28 18:33:36 +02:00
|
|
|
case ISD::STRICT_FP_TO_UINT: return "strict_fp_to_uint";
|
Add intrinsics for saturating float to int casts
This patch adds support for the fptoui.sat and fptosi.sat intrinsics,
which provide basically the same functionality as the existing fptoui
and fptosi instructions, but will saturate (or return 0 for NaN) on
values unrepresentable in the target type, instead of returning
poison. Related mailing list discussion can be found at:
https://groups.google.com/d/msg/llvm-dev/cgDFaBmCnDQ/CZAIMj4IBAAJ
The intrinsics have overloaded source and result type and support
vector operands:
i32 @llvm.fptoui.sat.i32.f32(float %f)
i100 @llvm.fptoui.sat.i100.f64(double %f)
<4 x i32> @llvm.fptoui.sat.v4i32.v4f16(half %f)
// etc
On the SelectionDAG layer two new ISD opcodes are added,
FP_TO_UINT_SAT and FP_TO_SINT_SAT. These opcodes have two operands
and one result. The second operand is an integer constant specifying
the scalar saturation width. The idea here is that initially the
second operand and the scalar width of the result type are the same,
but they may change during type legalization. For example:
i19 @llvm.fptsi.sat.i19.f32(float %f)
// builds
i19 fp_to_sint_sat f, 19
// type legalizes (through integer result promotion)
i32 fp_to_sint_sat f, 19
I went for this approach, because saturated conversion does not
compose well. There is no good way of "adjusting" a saturating
conversion to i32 into one to i19 short of saturating twice.
Specifying the saturation width separately allows directly saturating
to the correct width.
There are two baseline expansions for the fp_to_xint_sat opcodes. If
the integer bounds can be exactly represented in the float type and
fminnum/fmaxnum are legal, we can expand to something like:
f = fmaxnum f, FP(MIN)
f = fminnum f, FP(MAX)
i = fptoxi f
i = select f uo f, 0, i # unnecessary if unsigned as 0 = MIN
If the bounds cannot be exactly represented, we expand to something
like this instead:
i = fptoxi f
i = select f ult FP(MIN), MIN, i
i = select f ogt FP(MAX), MAX, i
i = select f uo f, 0, i # unnecessary if unsigned as 0 = MIN
It should be noted that this expansion assumes a non-trapping fptoxi.
Initial tests are for AArch64, x86_64 and ARM. This exercises all of
the scalar and vector legalization. ARM is included to test float
softening.
Original patch by @nikic and @ebevhan (based on D54696).
Differential Revision: https://reviews.llvm.org/D54749
2020-12-17 21:33:32 +01:00
|
|
|
case ISD::FP_TO_SINT_SAT: return "fp_to_sint_sat";
|
|
|
|
case ISD::FP_TO_UINT_SAT: return "fp_to_uint_sat";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::BITCAST: return "bitcast";
|
2013-11-15 02:34:59 +01:00
|
|
|
case ISD::ADDRSPACECAST: return "addrspacecast";
|
2014-07-17 12:51:23 +02:00
|
|
|
case ISD::FP16_TO_FP: return "fp16_to_fp";
|
2020-02-12 05:24:51 +01:00
|
|
|
case ISD::STRICT_FP16_TO_FP: return "strict_fp16_to_fp";
|
2014-07-17 12:51:23 +02:00
|
|
|
case ISD::FP_TO_FP16: return "fp_to_fp16";
|
2020-02-12 05:24:51 +01:00
|
|
|
case ISD::STRICT_FP_TO_FP16: return "strict_fp_to_fp16";
|
2019-05-16 15:15:27 +02:00
|
|
|
case ISD::LROUND: return "lround";
|
2019-10-07 15:20:00 +02:00
|
|
|
case ISD::STRICT_LROUND: return "strict_lround";
|
2019-05-16 15:15:27 +02:00
|
|
|
case ISD::LLROUND: return "llround";
|
2019-10-07 15:20:00 +02:00
|
|
|
case ISD::STRICT_LLROUND: return "strict_llround";
|
2019-05-28 22:47:44 +02:00
|
|
|
case ISD::LRINT: return "lrint";
|
2019-10-07 15:20:00 +02:00
|
|
|
case ISD::STRICT_LRINT: return "strict_lrint";
|
2019-05-28 22:47:44 +02:00
|
|
|
case ISD::LLRINT: return "llrint";
|
2019-10-07 15:20:00 +02:00
|
|
|
case ISD::STRICT_LLRINT: return "strict_llrint";
|
2012-03-13 06:47:27 +01:00
|
|
|
|
|
|
|
// Control flow instructions
|
|
|
|
case ISD::BR: return "br";
|
|
|
|
case ISD::BRIND: return "brind";
|
|
|
|
case ISD::BR_JT: return "br_jt";
|
|
|
|
case ISD::BRCOND: return "brcond";
|
|
|
|
case ISD::BR_CC: return "br_cc";
|
|
|
|
case ISD::CALLSEQ_START: return "callseq_start";
|
|
|
|
case ISD::CALLSEQ_END: return "callseq_end";
|
|
|
|
|
2015-08-28 01:27:47 +02:00
|
|
|
// EH instructions
|
|
|
|
case ISD::CATCHRET: return "catchret";
|
|
|
|
case ISD::CLEANUPRET: return "cleanupret";
|
|
|
|
|
2012-03-13 06:47:27 +01:00
|
|
|
// Other operators
|
|
|
|
case ISD::LOAD: return "load";
|
|
|
|
case ISD::STORE: return "store";
|
2014-12-04 10:40:44 +01:00
|
|
|
case ISD::MLOAD: return "masked_load";
|
|
|
|
case ISD::MSTORE: return "masked_store";
|
2015-04-30 10:38:48 +02:00
|
|
|
case ISD::MGATHER: return "masked_gather";
|
|
|
|
case ISD::MSCATTER: return "masked_scatter";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::VAARG: return "vaarg";
|
|
|
|
case ISD::VACOPY: return "vacopy";
|
|
|
|
case ISD::VAEND: return "vaend";
|
|
|
|
case ISD::VASTART: return "vastart";
|
|
|
|
case ISD::DYNAMIC_STACKALLOC: return "dynamic_stackalloc";
|
|
|
|
case ISD::EXTRACT_ELEMENT: return "extract_element";
|
|
|
|
case ISD::BUILD_PAIR: return "build_pair";
|
|
|
|
case ISD::STACKSAVE: return "stacksave";
|
|
|
|
case ISD::STACKRESTORE: return "stackrestore";
|
|
|
|
case ISD::TRAP: return "trap";
|
2012-05-14 20:58:10 +02:00
|
|
|
case ISD::DEBUGTRAP: return "debugtrap";
|
2020-10-21 11:11:25 +02:00
|
|
|
case ISD::UBSANTRAP: return "ubsantrap";
|
2012-09-06 11:17:37 +02:00
|
|
|
case ISD::LIFETIME_START: return "lifetime.start";
|
|
|
|
case ISD::LIFETIME_END: return "lifetime.end";
|
[CSSPGO] MIR target-independent pseudo instruction for pseudo-probe intrinsic
This change introduces a MIR target-independent pseudo instruction corresponding to the IR intrinsic llvm.pseudoprobe for pseudo-probe block instrumentation. Please refer to https://reviews.llvm.org/D86193 for the whole story.
An `llvm.pseudoprobe` intrinsic call will be lowered into a target-independent operation named `PSEUDO_PROBE`. Given the following instrumented IR,
```
define internal void @foo2(i32 %x, void (i32)* %f) !dbg !4 {
bb0:
%cmp = icmp eq i32 %x, 0
call void @llvm.pseudoprobe(i64 837061429793323041, i64 1)
br i1 %cmp, label %bb1, label %bb2
bb1:
call void @llvm.pseudoprobe(i64 837061429793323041, i64 2)
br label %bb3
bb2:
call void @llvm.pseudoprobe(i64 837061429793323041, i64 3)
br label %bb3
bb3:
call void @llvm.pseudoprobe(i64 837061429793323041, i64 4)
ret void
}
```
the corresponding MIR is shown below. Note that block `bb3` is duplicated into `bb1` and `bb2` where its probe is duplicated too. This allows for an accurate execution count to be collected for `bb3`, which is basically the sum of the counts of `bb1` and `bb2`.
```
bb.0.bb0:
frame-setup PUSH64r undef $rax, implicit-def $rsp, implicit $rsp
TEST32rr killed renamable $edi, renamable $edi, implicit-def $eflags
PSEUDO_PROBE 837061429793323041, 1, 0
$edi = MOV32ri 1, debug-location !13; test.c:0
JCC_1 %bb.1, 4, implicit $eflags
bb.2.bb2:
PSEUDO_PROBE 837061429793323041, 3, 0
PSEUDO_PROBE 837061429793323041, 4, 0
$rax = frame-destroy POP64r implicit-def $rsp, implicit $rsp
RETQ
bb.1.bb1:
PSEUDO_PROBE 837061429793323041, 2, 0
PSEUDO_PROBE 837061429793323041, 4, 0
$rax = frame-destroy POP64r implicit-def $rsp, implicit $rsp
RETQ
```
The target op PSEUDO_PROBE will be converted into a piece of binary data by the object emitter with no machine instructions generated. This is done in a different patch.
Reviewed By: wmi
Differential Revision: https://reviews.llvm.org/D86495
2020-09-10 20:21:19 +02:00
|
|
|
case ISD::PSEUDO_PROBE:
|
|
|
|
return "pseudoprobe";
|
Extend the statepoint intrinsic to allow statepoints to be marked as transitions from GC-aware code to code that is not GC-aware.
This changes the shape of the statepoint intrinsic from:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 unused, ...call args, i32 # deopt args, ...deopt args, ...gc args)
to:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 flags, ...call args, i32 # transition args, ...transition args, i32 # deopt args, ...deopt args, ...gc args)
This extension offers the backend the opportunity to insert (somewhat) arbitrary code to manage the transition from GC-aware code to code that is not GC-aware and back.
In order to support the injection of transition code, this extension wraps the STATEPOINT ISD node generated by the usual lowering lowering with two additional nodes: GC_TRANSITION_START and GC_TRANSITION_END. The transition arguments that were passed passed to the intrinsic (if any) are lowered and provided as operands to these nodes and may be used by the backend during code generation.
Eventually, the lowering of the GC_TRANSITION_{START,END} nodes should be informed by the GC strategy in use for the function containing the intrinsic call; for now, these nodes are instead replaced with no-ops.
Differential Revision: http://reviews.llvm.org/D9501
llvm-svn: 236888
2015-05-08 20:07:42 +02:00
|
|
|
case ISD::GC_TRANSITION_START: return "gc_transition.start";
|
|
|
|
case ISD::GC_TRANSITION_END: return "gc_transition.end";
|
2015-12-01 12:40:55 +01:00
|
|
|
case ISD::GET_DYNAMIC_AREA_OFFSET: return "get.dynamic.area.offset";
|
[SelDag] Add FREEZE
Summary:
- Add FREEZE node to SelDag
- Lower FreezeInst (in IR) to FREEZE node
- Add Legalization for FREEZE node
Reviewers: qcolombet, bogner, efriedma, lebedev.ri, nlopes, craig.topper, arsenm
Reviewed By: lebedev.ri
Subscribers: wdng, xbolva00, Petar.Avramovic, liuz, lkail, dylanmckay, hiraditya, Jim, arsenm, craig.topper, RKSimon, spatel, lebedev.ri, regehr, trentxintong, nlopes, mkuper, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D29014
2020-03-23 05:03:13 +01:00
|
|
|
case ISD::FREEZE: return "freeze";
|
Reland [X86] Codegen for preallocated
See https://reviews.llvm.org/D74651 for the preallocated IR constructs
and LangRef changes.
In X86TargetLowering::LowerCall(), if a call is preallocated, record
each argument's offset from the stack pointer and the total stack
adjustment. Associate the call Value with an integer index. Store the
info in X86MachineFunctionInfo with the integer index as the key.
This adds two new target independent ISDOpcodes and two new target
dependent Opcodes corresponding to @llvm.call.preallocated.{setup,arg}.
The setup ISelDAG node takes in a chain and outputs a chain and a
SrcValue of the preallocated call Value. It is lowered to a target
dependent node with the SrcValue replaced with the integer index key by
looking in X86MachineFunctionInfo. In
X86TargetLowering::EmitInstrWithCustomInserter() this is lowered to an
%esp adjustment, the exact amount determined by looking in
X86MachineFunctionInfo with the integer index key.
The arg ISelDAG node takes in a chain, a SrcValue of the preallocated
call Value, and the arg index int constant. It produces a chain and the
pointer fo the arg. It is lowered to a target dependent node with the
SrcValue replaced with the integer index key by looking in
X86MachineFunctionInfo. In
X86TargetLowering::EmitInstrWithCustomInserter() this is lowered to a
lea of the stack pointer plus an offset determined by looking in
X86MachineFunctionInfo with the integer index key.
Force any function containing a preallocated call to use the frame
pointer.
Does not yet handle a setup without a call, or a conditional call.
Does not yet handle musttail. That requires a LangRef change first.
Tried to look at all references to inalloca and see if they apply to
preallocated. I've made preallocated versions of tests testing inalloca
whenever possible and when they make sense (e.g. not alloca related,
inalloca edge cases).
Aside from the tests added here, I checked that this codegen produces
correct code for something like
```
struct A {
A();
A(A&&);
~A();
};
void bar() {
foo(foo(foo(foo(foo(A(), 4), 5), 6), 7), 8);
}
```
by replacing the inalloca version of the .ll file with the appropriate
preallocated code. Running the executable produces the same results as
using the current inalloca implementation.
Reverted due to unexpectedly passing tests, added REQUIRES: asserts for reland.
Subscribers: hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D77689
2020-03-16 20:32:36 +01:00
|
|
|
case ISD::PREALLOCATED_SETUP:
|
|
|
|
return "call_setup";
|
|
|
|
case ISD::PREALLOCATED_ARG:
|
|
|
|
return "call_alloc";
|
2012-03-13 06:47:27 +01:00
|
|
|
|
[FPEnv] Intrinsic for setting rounding mode
To set non-default rounding mode user usually calls function 'fesetround'
from standard C library. This way has some disadvantages.
* It creates unnecessary dependency on libc. On the other hand, setting
rounding mode requires few instructions and could be made by compiler.
Sometimes standard C library even is not available, like in the case of
GPU or AI cores that execute small kernels.
* Compiler could generate more effective code if it knows that a particular
call just sets rounding mode.
This change introduces new IR intrinsic, namely 'llvm.set.rounding', which
sets current rounding mode, similar to 'fesetround'. It however differs
from the latter, because it is a lower level facility:
* 'llvm.set.rounding' does not return any value, whereas 'fesetround'
returns non-zero value in the case of failure. In glibc 'fesetround'
reports failure if its argument is invalid or unsupported or if floating
point operations are unavailable on the hardware. Compiler usually knows
what core it generates code for and it can validate arguments in many
cases.
* Rounding mode is specified in 'fesetround' using constants like
'FE_TONEAREST', which are target dependent. It is inconvenient to work
with such constants at IR level.
C standard provides a target-independent way to specify rounding mode, it
is used in FLT_ROUNDS, however it does not define standard way to set
rounding mode using this encoding.
This change implements only IR intrinsic. Lowering it to machine code is
target-specific and will be implemented latter. Mapping of 'fesetround'
to 'llvm.set.rounding' is also not implemented here.
Differential Revision: https://reviews.llvm.org/D74729
2020-02-03 11:44:42 +01:00
|
|
|
// Floating point environment manipulation
|
|
|
|
case ISD::FLT_ROUNDS_: return "flt_rounds";
|
|
|
|
case ISD::SET_ROUNDING: return "set_rounding";
|
|
|
|
|
2012-03-13 06:47:27 +01:00
|
|
|
// Bit manipulation
|
2017-03-14 22:26:58 +01:00
|
|
|
case ISD::ABS: return "abs";
|
2015-11-12 13:29:09 +01:00
|
|
|
case ISD::BITREVERSE: return "bitreverse";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::BSWAP: return "bswap";
|
|
|
|
case ISD::CTPOP: return "ctpop";
|
|
|
|
case ISD::CTTZ: return "cttz";
|
|
|
|
case ISD::CTTZ_ZERO_UNDEF: return "cttz_zero_undef";
|
|
|
|
case ISD::CTLZ: return "ctlz";
|
|
|
|
case ISD::CTLZ_ZERO_UNDEF: return "ctlz_zero_undef";
|
2020-09-12 20:42:18 +02:00
|
|
|
case ISD::PARITY: return "parity";
|
2017-01-10 23:38:02 +01:00
|
|
|
|
2012-03-13 06:47:27 +01:00
|
|
|
// Trampolines
|
|
|
|
case ISD::INIT_TRAMPOLINE: return "init_trampoline";
|
|
|
|
case ISD::ADJUST_TRAMPOLINE: return "adjust_trampoline";
|
|
|
|
|
|
|
|
case ISD::CONDCODE:
|
|
|
|
switch (cast<CondCodeSDNode>(this)->get()) {
|
|
|
|
default: llvm_unreachable("Unknown setcc condition!");
|
|
|
|
case ISD::SETOEQ: return "setoeq";
|
|
|
|
case ISD::SETOGT: return "setogt";
|
|
|
|
case ISD::SETOGE: return "setoge";
|
|
|
|
case ISD::SETOLT: return "setolt";
|
|
|
|
case ISD::SETOLE: return "setole";
|
|
|
|
case ISD::SETONE: return "setone";
|
|
|
|
|
|
|
|
case ISD::SETO: return "seto";
|
|
|
|
case ISD::SETUO: return "setuo";
|
2015-08-11 23:10:07 +02:00
|
|
|
case ISD::SETUEQ: return "setueq";
|
2012-03-13 06:47:27 +01:00
|
|
|
case ISD::SETUGT: return "setugt";
|
|
|
|
case ISD::SETUGE: return "setuge";
|
|
|
|
case ISD::SETULT: return "setult";
|
|
|
|
case ISD::SETULE: return "setule";
|
|
|
|
case ISD::SETUNE: return "setune";
|
|
|
|
|
|
|
|
case ISD::SETEQ: return "seteq";
|
|
|
|
case ISD::SETGT: return "setgt";
|
|
|
|
case ISD::SETGE: return "setge";
|
|
|
|
case ISD::SETLT: return "setlt";
|
|
|
|
case ISD::SETLE: return "setle";
|
|
|
|
case ISD::SETNE: return "setne";
|
|
|
|
|
|
|
|
case ISD::SETTRUE: return "settrue";
|
|
|
|
case ISD::SETTRUE2: return "settrue2";
|
|
|
|
case ISD::SETFALSE: return "setfalse";
|
|
|
|
case ISD::SETFALSE2: return "setfalse2";
|
|
|
|
}
|
2017-05-09 12:43:25 +02:00
|
|
|
case ISD::VECREDUCE_FADD: return "vecreduce_fadd";
|
2020-10-04 07:06:54 +02:00
|
|
|
case ISD::VECREDUCE_SEQ_FADD: return "vecreduce_seq_fadd";
|
2017-05-09 12:43:25 +02:00
|
|
|
case ISD::VECREDUCE_FMUL: return "vecreduce_fmul";
|
2020-10-04 07:06:54 +02:00
|
|
|
case ISD::VECREDUCE_SEQ_FMUL: return "vecreduce_seq_fmul";
|
2017-05-09 12:43:25 +02:00
|
|
|
case ISD::VECREDUCE_ADD: return "vecreduce_add";
|
|
|
|
case ISD::VECREDUCE_MUL: return "vecreduce_mul";
|
|
|
|
case ISD::VECREDUCE_AND: return "vecreduce_and";
|
|
|
|
case ISD::VECREDUCE_OR: return "vecreduce_or";
|
|
|
|
case ISD::VECREDUCE_XOR: return "vecreduce_xor";
|
|
|
|
case ISD::VECREDUCE_SMAX: return "vecreduce_smax";
|
|
|
|
case ISD::VECREDUCE_SMIN: return "vecreduce_smin";
|
|
|
|
case ISD::VECREDUCE_UMAX: return "vecreduce_umax";
|
|
|
|
case ISD::VECREDUCE_UMIN: return "vecreduce_umin";
|
|
|
|
case ISD::VECREDUCE_FMAX: return "vecreduce_fmax";
|
|
|
|
case ISD::VECREDUCE_FMIN: return "vecreduce_fmin";
|
2020-12-09 11:36:30 +01:00
|
|
|
|
|
|
|
// Vector Predication
|
|
|
|
#define BEGIN_REGISTER_VP_SDNODE(SDID, LEGALARG, NAME, ...) \
|
|
|
|
case ISD::SDID: \
|
|
|
|
return #NAME;
|
|
|
|
#include "llvm/IR/VPIntrinsics.def"
|
2012-03-13 06:47:27 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
const char *SDNode::getIndexedModeName(ISD::MemIndexedMode AM) {
|
|
|
|
switch (AM) {
|
|
|
|
default: return "";
|
|
|
|
case ISD::PRE_INC: return "<pre-inc>";
|
|
|
|
case ISD::PRE_DEC: return "<pre-dec>";
|
|
|
|
case ISD::POST_INC: return "<post-inc>";
|
|
|
|
case ISD::POST_DEC: return "<post-dec>";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-04 02:31:59 +01:00
|
|
|
static Printable PrintNodeId(const SDNode &Node) {
|
|
|
|
return Printable([&Node](raw_ostream &OS) {
|
SelectionDAG: Introduce PersistentID to SDNode for assert builds.
This gives us more human readable numbers to identify nodes in debug
dumps.
Before:
0x7fcbd9700160: ch = EntryToken
0x7fcbd985c7c8: i64 = Register %RAX
...
0x7fcbd9700160: <multiple use>
0x7fcbd985c578: i64,ch = MOV64rm 0x7fcbd985c6a0, 0x7fcbd985cc68, 0x7fcbd985c200, 0x7fcbd985cd90, 0x7fcbd985ceb8, 0x7fcbd9700160<Mem:LD8[@foo]> [ORD=2]
0x7fcbd985c8f0: ch,glue = CopyToReg 0x7fcbd9700160, 0x7fcbd985c7c8, 0x7fcbd985c578 [ORD=3]
0x7fcbd985c7c8: <multiple use>
0x7fcbd985c8f0: <multiple use>
0x7fcbd985c8f0: <multiple use>
0x7fcbd985ca18: ch = RETQ 0x7fcbd985c7c8, 0x7fcbd985c8f0, 0x7fcbd985c8f0:1 [ORD=3]
Now:
t0: ch = EntryToken
t5: i64 = Register %RAX
...
t0: <multiple use>
t3: i64,ch = MOV64rm t10, t12, t11, t13, t14, t0<Mem:LD8[@foo]> [ORD=2]
t6: ch,glue = CopyToReg t0, t5, t3 [ORD=3]
t5: <multiple use>
t6: <multiple use>
t6: <multiple use>
t7: ch = RETQ t5, t6, t6:1 [ORD=3]
Differential Revision: http://reviews.llvm.org/D12564
llvm-svn: 248010
2015-09-18 19:41:00 +02:00
|
|
|
#ifndef NDEBUG
|
|
|
|
OS << 't' << Node.PersistentId;
|
|
|
|
#else
|
|
|
|
OS << (const void*)&Node;
|
|
|
|
#endif
|
2015-12-04 02:31:59 +01:00
|
|
|
});
|
SelectionDAG: Introduce PersistentID to SDNode for assert builds.
This gives us more human readable numbers to identify nodes in debug
dumps.
Before:
0x7fcbd9700160: ch = EntryToken
0x7fcbd985c7c8: i64 = Register %RAX
...
0x7fcbd9700160: <multiple use>
0x7fcbd985c578: i64,ch = MOV64rm 0x7fcbd985c6a0, 0x7fcbd985cc68, 0x7fcbd985c200, 0x7fcbd985cd90, 0x7fcbd985ceb8, 0x7fcbd9700160<Mem:LD8[@foo]> [ORD=2]
0x7fcbd985c8f0: ch,glue = CopyToReg 0x7fcbd9700160, 0x7fcbd985c7c8, 0x7fcbd985c578 [ORD=3]
0x7fcbd985c7c8: <multiple use>
0x7fcbd985c8f0: <multiple use>
0x7fcbd985c8f0: <multiple use>
0x7fcbd985ca18: ch = RETQ 0x7fcbd985c7c8, 0x7fcbd985c8f0, 0x7fcbd985c8f0:1 [ORD=3]
Now:
t0: ch = EntryToken
t5: i64 = Register %RAX
...
t0: <multiple use>
t3: i64,ch = MOV64rm t10, t12, t11, t13, t14, t0<Mem:LD8[@foo]> [ORD=2]
t6: ch,glue = CopyToReg t0, t5, t3 [ORD=3]
t5: <multiple use>
t6: <multiple use>
t6: <multiple use>
t7: ch = RETQ t5, t6, t6:1 [ORD=3]
Differential Revision: http://reviews.llvm.org/D12564
llvm-svn: 248010
2015-09-18 19:41:00 +02:00
|
|
|
}
|
|
|
|
|
2018-03-14 22:52:13 +01:00
|
|
|
// Print the MMO with more information from the SelectionDAG.
|
|
|
|
static void printMemOperand(raw_ostream &OS, const MachineMemOperand &MMO,
|
2018-04-12 14:59:50 +02:00
|
|
|
const MachineFunction *MF, const Module *M,
|
|
|
|
const MachineFrameInfo *MFI,
|
|
|
|
const TargetInstrInfo *TII, LLVMContext &Ctx) {
|
|
|
|
ModuleSlotTracker MST(M);
|
|
|
|
if (MF)
|
|
|
|
MST.incorporateFunction(MF->getFunction());
|
2018-03-14 22:52:13 +01:00
|
|
|
SmallVector<StringRef, 0> SSNs;
|
2020-01-10 11:18:11 +01:00
|
|
|
MMO.print(OS, MST, SSNs, Ctx, MFI, TII);
|
2018-04-12 14:59:50 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void printMemOperand(raw_ostream &OS, const MachineMemOperand &MMO,
|
|
|
|
const SelectionDAG *G) {
|
|
|
|
if (G) {
|
|
|
|
const MachineFunction *MF = &G->getMachineFunction();
|
|
|
|
return printMemOperand(OS, MMO, MF, MF->getFunction().getParent(),
|
|
|
|
&MF->getFrameInfo(), G->getSubtarget().getInstrInfo(),
|
|
|
|
*G->getContext());
|
|
|
|
} else {
|
|
|
|
LLVMContext Ctx;
|
|
|
|
return printMemOperand(OS, MMO, /*MF=*/nullptr, /*M=*/nullptr,
|
|
|
|
/*MFI=*/nullptr, /*TII=*/nullptr, Ctx);
|
|
|
|
}
|
2018-03-14 22:52:13 +01:00
|
|
|
}
|
|
|
|
|
2017-10-15 16:32:27 +02:00
|
|
|
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
|
2016-01-29 21:50:44 +01:00
|
|
|
LLVM_DUMP_METHOD void SDNode::dump() const { dump(nullptr); }
|
2017-10-11 00:33:29 +02:00
|
|
|
|
2017-01-28 03:02:38 +01:00
|
|
|
LLVM_DUMP_METHOD void SDNode::dump(const SelectionDAG *G) const {
|
2012-03-13 06:47:27 +01:00
|
|
|
print(dbgs(), G);
|
2017-04-01 03:26:17 +02:00
|
|
|
dbgs() << '\n';
|
2012-03-13 06:47:27 +01:00
|
|
|
}
|
2017-01-28 03:02:38 +01:00
|
|
|
#endif
|
2012-03-13 06:47:27 +01:00
|
|
|
|
|
|
|
void SDNode::print_types(raw_ostream &OS, const SelectionDAG *G) const {
|
|
|
|
for (unsigned i = 0, e = getNumValues(); i != e; ++i) {
|
|
|
|
if (i) OS << ",";
|
|
|
|
if (getValueType(i) == MVT::Other)
|
|
|
|
OS << "ch";
|
|
|
|
else
|
|
|
|
OS << getValueType(i).getEVTString();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void SDNode::print_details(raw_ostream &OS, const SelectionDAG *G) const {
|
2017-10-13 17:41:40 +02:00
|
|
|
if (getFlags().hasNoUnsignedWrap())
|
|
|
|
OS << " nuw";
|
|
|
|
|
|
|
|
if (getFlags().hasNoSignedWrap())
|
|
|
|
OS << " nsw";
|
|
|
|
|
|
|
|
if (getFlags().hasExact())
|
|
|
|
OS << " exact";
|
|
|
|
|
|
|
|
if (getFlags().hasNoNaNs())
|
|
|
|
OS << " nnan";
|
|
|
|
|
|
|
|
if (getFlags().hasNoInfs())
|
|
|
|
OS << " ninf";
|
|
|
|
|
|
|
|
if (getFlags().hasNoSignedZeros())
|
|
|
|
OS << " nsz";
|
|
|
|
|
|
|
|
if (getFlags().hasAllowReciprocal())
|
|
|
|
OS << " arcp";
|
|
|
|
|
|
|
|
if (getFlags().hasAllowContract())
|
|
|
|
OS << " contract";
|
|
|
|
|
Fast Math Flag mapping into SDNode
Summary: Adding support for Fast flags in the SDNode to leverage fast math sub flag usage.
Reviewers: spatel, arsenm, jbhateja, hfinkel, escha, qcolombet, echristo, wristow, javed.absar
Reviewed By: spatel
Subscribers: llvm-commits, rampitec, nhaehnle, tstellar, FarhanaAleen, nemanjai, javed.absar, jbhateja, hfinkel, wdng
Differential Revision: https://reviews.llvm.org/D45710
llvm-svn: 331547
2018-05-04 20:48:20 +02:00
|
|
|
if (getFlags().hasApproximateFuncs())
|
|
|
|
OS << " afn";
|
|
|
|
|
|
|
|
if (getFlags().hasAllowReassociation())
|
|
|
|
OS << " reassoc";
|
|
|
|
|
[FPEnv] Default NoFPExcept SDNodeFlag to false
The NoFPExcept bit in SDNodeFlags currently defaults to true, unlike all
other such flags. This is a problem, because it implies that all code that
transforms SDNodes without copying flags can introduce a correctness bug,
not just a missed optimization.
This patch changes the default to false. This makes it necessary to move
setting the (No)FPExcept flag for constrained intrinsics from the
visitConstrainedIntrinsic routine to the generic visit routine at the
place where the other flags are set, or else the intersectFlagsWith
call would erase the NoFPExcept flag again.
In order to avoid making non-strict FP code worse, whenever
SelectionDAGISel::SelectCodeCommon matches on a set of orignal nodes
none of which can raise FP exceptions, it will preserve this property
on all results nodes generated, by setting the NoFPExcept flag on
those result nodes that would otherwise be considered as raising
an FP exception.
To check whether or not an SD node should be considered as raising
an FP exception, the following logic applies:
- For machine nodes, check the mayRaiseFPException property of
the underlying MI instruction
- For regular nodes, check isStrictFPOpcode
- For target nodes, check a newly introduced isTargetStrictFPOpcode
The latter is implemented by reserving a range of target opcodes,
similarly to how memory opcodes are identified. (Note that there a
bit of a quirk in identifying target nodes that are both memory nodes
and strict FP nodes. To simplify the logic, right now all target memory
nodes are automatically also considered strict FP nodes -- this could
be fixed by adding one more range.)
Reviewed By: craig.topper
Differential Revision: https://reviews.llvm.org/D71841
2020-01-02 16:54:49 +01:00
|
|
|
if (getFlags().hasNoFPExcept())
|
|
|
|
OS << " nofpexcept";
|
2019-12-17 03:04:25 +01:00
|
|
|
|
2012-03-13 06:47:27 +01:00
|
|
|
if (const MachineSDNode *MN = dyn_cast<MachineSDNode>(this)) {
|
|
|
|
if (!MN->memoperands_empty()) {
|
|
|
|
OS << "<";
|
|
|
|
OS << "Mem:";
|
|
|
|
for (MachineSDNode::mmo_iterator i = MN->memoperands_begin(),
|
|
|
|
e = MN->memoperands_end(); i != e; ++i) {
|
2018-03-14 22:52:13 +01:00
|
|
|
printMemOperand(OS, **i, G);
|
2014-03-02 13:27:27 +01:00
|
|
|
if (std::next(i) != e)
|
2012-03-13 06:47:27 +01:00
|
|
|
OS << " ";
|
|
|
|
}
|
|
|
|
OS << ">";
|
|
|
|
}
|
|
|
|
} else if (const ShuffleVectorSDNode *SVN =
|
|
|
|
dyn_cast<ShuffleVectorSDNode>(this)) {
|
|
|
|
OS << "<";
|
|
|
|
for (unsigned i = 0, e = ValueList[0].getVectorNumElements(); i != e; ++i) {
|
|
|
|
int Idx = SVN->getMaskElt(i);
|
|
|
|
if (i) OS << ",";
|
|
|
|
if (Idx < 0)
|
|
|
|
OS << "u";
|
|
|
|
else
|
|
|
|
OS << Idx;
|
|
|
|
}
|
|
|
|
OS << ">";
|
|
|
|
} else if (const ConstantSDNode *CSDN = dyn_cast<ConstantSDNode>(this)) {
|
|
|
|
OS << '<' << CSDN->getAPIntValue() << '>';
|
|
|
|
} else if (const ConstantFPSDNode *CSDN = dyn_cast<ConstantFPSDNode>(this)) {
|
2017-10-11 00:33:29 +02:00
|
|
|
if (&CSDN->getValueAPF().getSemantics() == &APFloat::IEEEsingle())
|
2012-03-13 06:47:27 +01:00
|
|
|
OS << '<' << CSDN->getValueAPF().convertToFloat() << '>';
|
2017-10-11 00:33:29 +02:00
|
|
|
else if (&CSDN->getValueAPF().getSemantics() == &APFloat::IEEEdouble())
|
2012-03-13 06:47:27 +01:00
|
|
|
OS << '<' << CSDN->getValueAPF().convertToDouble() << '>';
|
|
|
|
else {
|
|
|
|
OS << "<APFloat(";
|
2017-01-28 03:02:38 +01:00
|
|
|
CSDN->getValueAPF().bitcastToAPInt().print(OS, false);
|
2012-03-13 06:47:27 +01:00
|
|
|
OS << ")>";
|
|
|
|
}
|
|
|
|
} else if (const GlobalAddressSDNode *GADN =
|
|
|
|
dyn_cast<GlobalAddressSDNode>(this)) {
|
|
|
|
int64_t offset = GADN->getOffset();
|
|
|
|
OS << '<';
|
2014-01-09 03:29:41 +01:00
|
|
|
GADN->getGlobal()->printAsOperand(OS);
|
2012-03-13 06:47:27 +01:00
|
|
|
OS << '>';
|
|
|
|
if (offset > 0)
|
|
|
|
OS << " + " << offset;
|
|
|
|
else
|
|
|
|
OS << " " << offset;
|
|
|
|
if (unsigned int TF = GADN->getTargetFlags())
|
|
|
|
OS << " [TF=" << TF << ']';
|
|
|
|
} else if (const FrameIndexSDNode *FIDN = dyn_cast<FrameIndexSDNode>(this)) {
|
|
|
|
OS << "<" << FIDN->getIndex() << ">";
|
|
|
|
} else if (const JumpTableSDNode *JTDN = dyn_cast<JumpTableSDNode>(this)) {
|
|
|
|
OS << "<" << JTDN->getIndex() << ">";
|
|
|
|
if (unsigned int TF = JTDN->getTargetFlags())
|
|
|
|
OS << " [TF=" << TF << ']';
|
|
|
|
} else if (const ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(this)){
|
|
|
|
int offset = CP->getOffset();
|
|
|
|
if (CP->isMachineConstantPoolEntry())
|
|
|
|
OS << "<" << *CP->getMachineCPVal() << ">";
|
|
|
|
else
|
|
|
|
OS << "<" << *CP->getConstVal() << ">";
|
|
|
|
if (offset > 0)
|
|
|
|
OS << " + " << offset;
|
|
|
|
else
|
|
|
|
OS << " " << offset;
|
|
|
|
if (unsigned int TF = CP->getTargetFlags())
|
|
|
|
OS << " [TF=" << TF << ']';
|
2012-08-08 00:37:05 +02:00
|
|
|
} else if (const TargetIndexSDNode *TI = dyn_cast<TargetIndexSDNode>(this)) {
|
|
|
|
OS << "<" << TI->getIndex() << '+' << TI->getOffset() << ">";
|
|
|
|
if (unsigned TF = TI->getTargetFlags())
|
|
|
|
OS << " [TF=" << TF << ']';
|
2012-03-13 06:47:27 +01:00
|
|
|
} else if (const BasicBlockSDNode *BBDN = dyn_cast<BasicBlockSDNode>(this)) {
|
|
|
|
OS << "<";
|
|
|
|
const Value *LBB = (const Value*)BBDN->getBasicBlock()->getBasicBlock();
|
|
|
|
if (LBB)
|
|
|
|
OS << LBB->getName() << " ";
|
|
|
|
OS << (const void*)BBDN->getBasicBlock() << ">";
|
|
|
|
} else if (const RegisterSDNode *R = dyn_cast<RegisterSDNode>(this)) {
|
2017-11-28 13:42:37 +01:00
|
|
|
OS << ' ' << printReg(R->getReg(),
|
2014-08-05 04:39:49 +02:00
|
|
|
G ? G->getSubtarget().getRegisterInfo() : nullptr);
|
2012-03-13 06:47:27 +01:00
|
|
|
} else if (const ExternalSymbolSDNode *ES =
|
|
|
|
dyn_cast<ExternalSymbolSDNode>(this)) {
|
|
|
|
OS << "'" << ES->getSymbol() << "'";
|
|
|
|
if (unsigned int TF = ES->getTargetFlags())
|
|
|
|
OS << " [TF=" << TF << ']';
|
|
|
|
} else if (const SrcValueSDNode *M = dyn_cast<SrcValueSDNode>(this)) {
|
|
|
|
if (M->getValue())
|
|
|
|
OS << "<" << M->getValue() << ">";
|
|
|
|
else
|
|
|
|
OS << "<null>";
|
|
|
|
} else if (const MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(this)) {
|
|
|
|
if (MD->getMD())
|
|
|
|
OS << "<" << MD->getMD() << ">";
|
|
|
|
else
|
|
|
|
OS << "<null>";
|
|
|
|
} else if (const VTSDNode *N = dyn_cast<VTSDNode>(this)) {
|
|
|
|
OS << ":" << N->getVT().getEVTString();
|
|
|
|
}
|
|
|
|
else if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(this)) {
|
2018-03-14 22:52:13 +01:00
|
|
|
OS << "<";
|
|
|
|
|
|
|
|
printMemOperand(OS, *LD->getMemOperand(), G);
|
2012-03-13 06:47:27 +01:00
|
|
|
|
|
|
|
bool doExt = true;
|
|
|
|
switch (LD->getExtensionType()) {
|
|
|
|
default: doExt = false; break;
|
|
|
|
case ISD::EXTLOAD: OS << ", anyext"; break;
|
|
|
|
case ISD::SEXTLOAD: OS << ", sext"; break;
|
|
|
|
case ISD::ZEXTLOAD: OS << ", zext"; break;
|
|
|
|
}
|
|
|
|
if (doExt)
|
|
|
|
OS << " from " << LD->getMemoryVT().getEVTString();
|
|
|
|
|
|
|
|
const char *AM = getIndexedModeName(LD->getAddressingMode());
|
|
|
|
if (*AM)
|
|
|
|
OS << ", " << AM;
|
|
|
|
|
|
|
|
OS << ">";
|
|
|
|
} else if (const StoreSDNode *ST = dyn_cast<StoreSDNode>(this)) {
|
2018-03-14 22:52:13 +01:00
|
|
|
OS << "<";
|
|
|
|
printMemOperand(OS, *ST->getMemOperand(), G);
|
2012-03-13 06:47:27 +01:00
|
|
|
|
|
|
|
if (ST->isTruncatingStore())
|
|
|
|
OS << ", trunc to " << ST->getMemoryVT().getEVTString();
|
|
|
|
|
|
|
|
const char *AM = getIndexedModeName(ST->getAddressingMode());
|
|
|
|
if (*AM)
|
|
|
|
OS << ", " << AM;
|
|
|
|
|
2019-01-24 08:51:34 +01:00
|
|
|
OS << ">";
|
|
|
|
} else if (const MaskedLoadSDNode *MLd = dyn_cast<MaskedLoadSDNode>(this)) {
|
|
|
|
OS << "<";
|
|
|
|
|
|
|
|
printMemOperand(OS, *MLd->getMemOperand(), G);
|
|
|
|
|
|
|
|
bool doExt = true;
|
|
|
|
switch (MLd->getExtensionType()) {
|
|
|
|
default: doExt = false; break;
|
|
|
|
case ISD::EXTLOAD: OS << ", anyext"; break;
|
|
|
|
case ISD::SEXTLOAD: OS << ", sext"; break;
|
|
|
|
case ISD::ZEXTLOAD: OS << ", zext"; break;
|
|
|
|
}
|
|
|
|
if (doExt)
|
|
|
|
OS << " from " << MLd->getMemoryVT().getEVTString();
|
|
|
|
|
2019-11-21 15:56:37 +01:00
|
|
|
const char *AM = getIndexedModeName(MLd->getAddressingMode());
|
|
|
|
if (*AM)
|
|
|
|
OS << ", " << AM;
|
|
|
|
|
2019-01-24 08:51:34 +01:00
|
|
|
if (MLd->isExpandingLoad())
|
|
|
|
OS << ", expanding";
|
|
|
|
|
|
|
|
OS << ">";
|
|
|
|
} else if (const MaskedStoreSDNode *MSt = dyn_cast<MaskedStoreSDNode>(this)) {
|
|
|
|
OS << "<";
|
|
|
|
printMemOperand(OS, *MSt->getMemOperand(), G);
|
|
|
|
|
|
|
|
if (MSt->isTruncatingStore())
|
|
|
|
OS << ", trunc to " << MSt->getMemoryVT().getEVTString();
|
|
|
|
|
2019-11-21 15:56:37 +01:00
|
|
|
const char *AM = getIndexedModeName(MSt->getAddressingMode());
|
|
|
|
if (*AM)
|
|
|
|
OS << ", " << AM;
|
|
|
|
|
2019-01-24 08:51:34 +01:00
|
|
|
if (MSt->isCompressingStore())
|
|
|
|
OS << ", compressing";
|
|
|
|
|
2020-12-09 11:49:43 +01:00
|
|
|
OS << ">";
|
|
|
|
} else if (const auto *MGather = dyn_cast<MaskedGatherSDNode>(this)) {
|
|
|
|
OS << "<";
|
|
|
|
printMemOperand(OS, *MGather->getMemOperand(), G);
|
|
|
|
|
|
|
|
bool doExt = true;
|
|
|
|
switch (MGather->getExtensionType()) {
|
|
|
|
default: doExt = false; break;
|
|
|
|
case ISD::EXTLOAD: OS << ", anyext"; break;
|
|
|
|
case ISD::SEXTLOAD: OS << ", sext"; break;
|
|
|
|
case ISD::ZEXTLOAD: OS << ", zext"; break;
|
|
|
|
}
|
|
|
|
if (doExt)
|
|
|
|
OS << " from " << MGather->getMemoryVT().getEVTString();
|
|
|
|
|
|
|
|
auto Signed = MGather->isIndexSigned() ? "signed" : "unsigned";
|
|
|
|
auto Scaled = MGather->isIndexScaled() ? "scaled" : "unscaled";
|
|
|
|
OS << ", " << Signed << " " << Scaled << " offset";
|
|
|
|
|
2012-03-13 06:47:27 +01:00
|
|
|
OS << ">";
|
2020-11-11 11:34:49 +01:00
|
|
|
} else if (const auto *MScatter = dyn_cast<MaskedScatterSDNode>(this)) {
|
|
|
|
OS << "<";
|
|
|
|
printMemOperand(OS, *MScatter->getMemOperand(), G);
|
|
|
|
|
|
|
|
if (MScatter->isTruncatingStore())
|
|
|
|
OS << ", trunc to " << MScatter->getMemoryVT().getEVTString();
|
|
|
|
|
|
|
|
auto Signed = MScatter->isIndexSigned() ? "signed" : "unsigned";
|
|
|
|
auto Scaled = MScatter->isIndexScaled() ? "scaled" : "unscaled";
|
|
|
|
OS << ", " << Signed << " " << Scaled << " offset";
|
|
|
|
|
|
|
|
OS << ">";
|
|
|
|
} else if (const MemSDNode *M = dyn_cast<MemSDNode>(this)) {
|
2018-03-14 22:52:13 +01:00
|
|
|
OS << "<";
|
|
|
|
printMemOperand(OS, *M->getMemOperand(), G);
|
|
|
|
OS << ">";
|
2012-03-13 06:47:27 +01:00
|
|
|
} else if (const BlockAddressSDNode *BA =
|
|
|
|
dyn_cast<BlockAddressSDNode>(this)) {
|
2012-09-12 23:43:09 +02:00
|
|
|
int64_t offset = BA->getOffset();
|
2012-03-13 06:47:27 +01:00
|
|
|
OS << "<";
|
2014-01-09 03:29:41 +01:00
|
|
|
BA->getBlockAddress()->getFunction()->printAsOperand(OS, false);
|
2012-03-13 06:47:27 +01:00
|
|
|
OS << ", ";
|
2014-01-09 03:29:41 +01:00
|
|
|
BA->getBlockAddress()->getBasicBlock()->printAsOperand(OS, false);
|
2012-03-13 06:47:27 +01:00
|
|
|
OS << ">";
|
2012-09-12 23:43:09 +02:00
|
|
|
if (offset > 0)
|
|
|
|
OS << " + " << offset;
|
|
|
|
else
|
|
|
|
OS << " " << offset;
|
2012-03-13 06:47:27 +01:00
|
|
|
if (unsigned int TF = BA->getTargetFlags())
|
|
|
|
OS << " [TF=" << TF << ']';
|
2013-11-15 02:34:59 +01:00
|
|
|
} else if (const AddrSpaceCastSDNode *ASC =
|
|
|
|
dyn_cast<AddrSpaceCastSDNode>(this)) {
|
|
|
|
OS << '['
|
|
|
|
<< ASC->getSrcAddressSpace()
|
|
|
|
<< " -> "
|
|
|
|
<< ASC->getDestAddressSpace()
|
|
|
|
<< ']';
|
2019-02-21 13:59:36 +01:00
|
|
|
} else if (const LifetimeSDNode *LN = dyn_cast<LifetimeSDNode>(this)) {
|
|
|
|
if (LN->hasOffset())
|
|
|
|
OS << "<" << LN->getOffset() << " to " << LN->getOffset() + LN->getSize() << ">";
|
2012-03-13 06:47:27 +01:00
|
|
|
}
|
|
|
|
|
2015-09-18 19:57:28 +02:00
|
|
|
if (VerboseDAGDumping) {
|
|
|
|
if (unsigned Order = getIROrder())
|
|
|
|
OS << " [ORD=" << Order << ']';
|
2012-03-13 06:47:27 +01:00
|
|
|
|
2015-09-18 19:57:28 +02:00
|
|
|
if (getNodeId() != -1)
|
|
|
|
OS << " [ID=" << getNodeId() << ']';
|
2018-03-05 16:12:21 +01:00
|
|
|
if (!(isa<ConstantSDNode>(this) || (isa<ConstantFPSDNode>(this))))
|
2019-01-18 21:06:13 +01:00
|
|
|
OS << " # D:" << isDivergent();
|
|
|
|
|
|
|
|
if (G && !G->GetDbgValues(this).empty()) {
|
|
|
|
OS << " [NoOfDbgValues=" << G->GetDbgValues(this).size() << ']';
|
|
|
|
for (SDDbgValue *Dbg : G->GetDbgValues(this))
|
|
|
|
if (!Dbg->isInvalidated())
|
|
|
|
Dbg->print(OS);
|
|
|
|
} else if (getHasDebugValue())
|
|
|
|
OS << " [NoOfDbgValues>0]";
|
2015-09-18 19:57:28 +02:00
|
|
|
}
|
2012-03-13 06:47:27 +01:00
|
|
|
}
|
|
|
|
|
2019-01-18 21:06:13 +01:00
|
|
|
LLVM_DUMP_METHOD void SDDbgValue::print(raw_ostream &OS) const {
|
|
|
|
OS << " DbgVal(Order=" << getOrder() << ')';
|
2020-09-28 15:02:51 +02:00
|
|
|
if (isInvalidated())
|
|
|
|
OS << "(Invalidated)";
|
|
|
|
if (isEmitted())
|
|
|
|
OS << "(Emitted)";
|
|
|
|
OS << "(";
|
|
|
|
bool Comma = false;
|
|
|
|
for (const SDDbgOperand &Op : getLocationOps()) {
|
|
|
|
if (Comma)
|
|
|
|
OS << ", ";
|
|
|
|
switch (Op.getKind()) {
|
|
|
|
case SDDbgOperand::SDNODE:
|
|
|
|
if (Op.getSDNode())
|
|
|
|
OS << "SDNODE=" << PrintNodeId(*Op.getSDNode()) << ':' << Op.getResNo();
|
|
|
|
else
|
|
|
|
OS << "SDNODE";
|
|
|
|
break;
|
|
|
|
case SDDbgOperand::CONST:
|
|
|
|
OS << "CONST";
|
|
|
|
break;
|
|
|
|
case SDDbgOperand::FRAMEIX:
|
|
|
|
OS << "FRAMEIX=" << Op.getFrameIx();
|
|
|
|
break;
|
|
|
|
case SDDbgOperand::VREG:
|
|
|
|
OS << "VREG=" << Op.getVReg();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
Comma = true;
|
2019-01-18 21:06:13 +01:00
|
|
|
}
|
2020-09-28 15:02:51 +02:00
|
|
|
OS << ")";
|
2019-01-18 21:06:13 +01:00
|
|
|
if (isIndirect()) OS << "(Indirect)";
|
2020-09-28 15:02:51 +02:00
|
|
|
if (isVariadic())
|
|
|
|
OS << "(Variadic)";
|
2019-01-18 21:06:13 +01:00
|
|
|
OS << ":\"" << Var->getName() << '"';
|
2018-09-14 19:32:52 +02:00
|
|
|
#ifndef NDEBUG
|
2019-01-18 21:06:13 +01:00
|
|
|
if (Expr->getNumElements())
|
|
|
|
Expr->dump();
|
2018-09-14 19:32:52 +02:00
|
|
|
#endif
|
2018-09-14 19:08:02 +02:00
|
|
|
}
|
|
|
|
|
2019-01-18 21:06:13 +01:00
|
|
|
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
|
|
|
|
LLVM_DUMP_METHOD void SDDbgValue::dump() const {
|
|
|
|
if (isInvalidated())
|
|
|
|
return;
|
|
|
|
print(dbgs());
|
|
|
|
dbgs() << "\n";
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
SelectionDAGDumper: Print simple operands inline.
Print simple operands inline instead of their pointer/value number.
Simple operands are SDNodes without predecessors like Constant(FP), Register,
UNDEF. This unifies the behaviour with dumpr() which was already doing this.
Previously:
t0: ch = EntryToken
t1: i64 = Register %vreg0
t2: i64,ch = CopyFromReg t0, t1
t3: i64 = Constant<1>
t4: i64 = add t2, t3
t5: i64 = Constant<2>
t6: i64 = add t2, t5
t10: i64 = undef
t11: i8,ch = load t0, t2, t10<LD1[%tmp81]>
t12: i8,ch = load t0, t4, t10<LD1[%tmp10]>
t13: i8,ch = load t0, t6, t10<LD1[%tmp12]>
Now:
t0: ch = EntryToken
t2: i64,ch = CopyFromReg t0, Register:i64 %vreg0
t4: i64 = add t2, Constant:i64<1>
t6: i64 = add t2, Constant:i64<2>
t11: i8,ch = load<LD1[%tmp81]> t0, t2, undef:i64
t12: i8,ch = load<LD1[%tmp10]> t0, t4, undef:i64
t13: i8,ch = load<LD1[%tmp12]> t0, t6, undef:i64
Differential Revision: http://reviews.llvm.org/D12567
llvm-svn: 248628
2015-09-26 00:27:02 +02:00
|
|
|
/// Return true if this node is so simple that we should just print it inline
|
|
|
|
/// if it appears as an operand.
|
2019-01-18 21:06:13 +01:00
|
|
|
static bool shouldPrintInline(const SDNode &Node, const SelectionDAG *G) {
|
|
|
|
// Avoid lots of cluttering when inline printing nodes with associated
|
|
|
|
// DbgValues in verbose mode.
|
|
|
|
if (VerboseDAGDumping && G && !G->GetDbgValues(&Node).empty())
|
|
|
|
return false;
|
SelectionDAGDumper: Print simple operands inline.
Print simple operands inline instead of their pointer/value number.
Simple operands are SDNodes without predecessors like Constant(FP), Register,
UNDEF. This unifies the behaviour with dumpr() which was already doing this.
Previously:
t0: ch = EntryToken
t1: i64 = Register %vreg0
t2: i64,ch = CopyFromReg t0, t1
t3: i64 = Constant<1>
t4: i64 = add t2, t3
t5: i64 = Constant<2>
t6: i64 = add t2, t5
t10: i64 = undef
t11: i8,ch = load t0, t2, t10<LD1[%tmp81]>
t12: i8,ch = load t0, t4, t10<LD1[%tmp10]>
t13: i8,ch = load t0, t6, t10<LD1[%tmp12]>
Now:
t0: ch = EntryToken
t2: i64,ch = CopyFromReg t0, Register:i64 %vreg0
t4: i64 = add t2, Constant:i64<1>
t6: i64 = add t2, Constant:i64<2>
t11: i8,ch = load<LD1[%tmp81]> t0, t2, undef:i64
t12: i8,ch = load<LD1[%tmp10]> t0, t4, undef:i64
t13: i8,ch = load<LD1[%tmp12]> t0, t6, undef:i64
Differential Revision: http://reviews.llvm.org/D12567
llvm-svn: 248628
2015-09-26 00:27:02 +02:00
|
|
|
if (Node.getOpcode() == ISD::EntryToken)
|
|
|
|
return false;
|
|
|
|
return Node.getNumOperands() == 0;
|
|
|
|
}
|
|
|
|
|
2017-10-15 16:32:27 +02:00
|
|
|
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
|
2012-03-13 06:47:27 +01:00
|
|
|
static void DumpNodes(const SDNode *N, unsigned indent, const SelectionDAG *G) {
|
SelectionDAGDumper: Print simple operands inline.
Print simple operands inline instead of their pointer/value number.
Simple operands are SDNodes without predecessors like Constant(FP), Register,
UNDEF. This unifies the behaviour with dumpr() which was already doing this.
Previously:
t0: ch = EntryToken
t1: i64 = Register %vreg0
t2: i64,ch = CopyFromReg t0, t1
t3: i64 = Constant<1>
t4: i64 = add t2, t3
t5: i64 = Constant<2>
t6: i64 = add t2, t5
t10: i64 = undef
t11: i8,ch = load t0, t2, t10<LD1[%tmp81]>
t12: i8,ch = load t0, t4, t10<LD1[%tmp10]>
t13: i8,ch = load t0, t6, t10<LD1[%tmp12]>
Now:
t0: ch = EntryToken
t2: i64,ch = CopyFromReg t0, Register:i64 %vreg0
t4: i64 = add t2, Constant:i64<1>
t6: i64 = add t2, Constant:i64<2>
t11: i8,ch = load<LD1[%tmp81]> t0, t2, undef:i64
t12: i8,ch = load<LD1[%tmp10]> t0, t4, undef:i64
t13: i8,ch = load<LD1[%tmp12]> t0, t6, undef:i64
Differential Revision: http://reviews.llvm.org/D12567
llvm-svn: 248628
2015-09-26 00:27:02 +02:00
|
|
|
for (const SDValue &Op : N->op_values()) {
|
2019-01-18 21:06:13 +01:00
|
|
|
if (shouldPrintInline(*Op.getNode(), G))
|
SelectionDAGDumper: Print simple operands inline.
Print simple operands inline instead of their pointer/value number.
Simple operands are SDNodes without predecessors like Constant(FP), Register,
UNDEF. This unifies the behaviour with dumpr() which was already doing this.
Previously:
t0: ch = EntryToken
t1: i64 = Register %vreg0
t2: i64,ch = CopyFromReg t0, t1
t3: i64 = Constant<1>
t4: i64 = add t2, t3
t5: i64 = Constant<2>
t6: i64 = add t2, t5
t10: i64 = undef
t11: i8,ch = load t0, t2, t10<LD1[%tmp81]>
t12: i8,ch = load t0, t4, t10<LD1[%tmp10]>
t13: i8,ch = load t0, t6, t10<LD1[%tmp12]>
Now:
t0: ch = EntryToken
t2: i64,ch = CopyFromReg t0, Register:i64 %vreg0
t4: i64 = add t2, Constant:i64<1>
t6: i64 = add t2, Constant:i64<2>
t11: i8,ch = load<LD1[%tmp81]> t0, t2, undef:i64
t12: i8,ch = load<LD1[%tmp10]> t0, t4, undef:i64
t13: i8,ch = load<LD1[%tmp12]> t0, t6, undef:i64
Differential Revision: http://reviews.llvm.org/D12567
llvm-svn: 248628
2015-09-26 00:27:02 +02:00
|
|
|
continue;
|
2015-06-26 21:37:02 +02:00
|
|
|
if (Op.getNode()->hasOneUse())
|
|
|
|
DumpNodes(Op.getNode(), indent+2, G);
|
SelectionDAGDumper: Print simple operands inline.
Print simple operands inline instead of their pointer/value number.
Simple operands are SDNodes without predecessors like Constant(FP), Register,
UNDEF. This unifies the behaviour with dumpr() which was already doing this.
Previously:
t0: ch = EntryToken
t1: i64 = Register %vreg0
t2: i64,ch = CopyFromReg t0, t1
t3: i64 = Constant<1>
t4: i64 = add t2, t3
t5: i64 = Constant<2>
t6: i64 = add t2, t5
t10: i64 = undef
t11: i8,ch = load t0, t2, t10<LD1[%tmp81]>
t12: i8,ch = load t0, t4, t10<LD1[%tmp10]>
t13: i8,ch = load t0, t6, t10<LD1[%tmp12]>
Now:
t0: ch = EntryToken
t2: i64,ch = CopyFromReg t0, Register:i64 %vreg0
t4: i64 = add t2, Constant:i64<1>
t6: i64 = add t2, Constant:i64<2>
t11: i8,ch = load<LD1[%tmp81]> t0, t2, undef:i64
t12: i8,ch = load<LD1[%tmp10]> t0, t4, undef:i64
t13: i8,ch = load<LD1[%tmp12]> t0, t6, undef:i64
Differential Revision: http://reviews.llvm.org/D12567
llvm-svn: 248628
2015-09-26 00:27:02 +02:00
|
|
|
}
|
2012-03-13 06:47:27 +01:00
|
|
|
|
|
|
|
dbgs().indent(indent);
|
|
|
|
N->dump(G);
|
|
|
|
}
|
|
|
|
|
2016-01-29 21:50:44 +01:00
|
|
|
LLVM_DUMP_METHOD void SelectionDAG::dump() const {
|
SelectionDAGDumper: Avoid unnecessary newlines
Before:
t0 = EntryToken:ch
t0: <multiple use>
t0: <multiple use>
t1 = CopyFromReg:v4f32,ch t0, Register:v4f32 %vreg0
t25 = IMPLICIT_DEF:v4f32
t26 = HADDPSrr:v4f32 t1, t25
t23 = CopyToReg:ch,glue t0, Register:v4f32 %XMM0, t26
t23: <multiple use>
t23: <multiple use>
t24 = RETQ:ch Register:v4f32 %XMM0, t23, t23:1
After:
t0: <multiple use>
t0: <multiple use>
t1 = CopyFromReg:v4f32,ch t0, Register:v4f32 %vreg0
t26 = X86ISD::FHADD:v4f32 t1, undef:v4f32
t23 = CopyToReg:ch,glue t0, Register:v4f32 %XMM0, t26
t23: <multiple use>
t21 = TargetConstant:i16<0>
t23: <multiple use>
t24 = X86ISD::RET_FLAG:ch t23, t21, Register:v4f32 %XMM0, t23:1
Differential Revision: http://reviews.llvm.org/D12568
llvm-svn: 248012
2015-09-18 19:57:31 +02:00
|
|
|
dbgs() << "SelectionDAG has " << AllNodes.size() << " nodes:\n";
|
2012-03-13 06:47:27 +01:00
|
|
|
|
2021-02-21 06:46:02 +01:00
|
|
|
for (const SDNode &N : allnodes()) {
|
|
|
|
if (!N.hasOneUse() && &N != getRoot().getNode() &&
|
|
|
|
(!shouldPrintInline(N, this) || N.use_empty()))
|
|
|
|
DumpNodes(&N, 2, this);
|
2012-03-13 06:47:27 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (getRoot().getNode()) DumpNodes(getRoot().getNode(), 2, this);
|
2019-01-18 21:06:13 +01:00
|
|
|
dbgs() << "\n";
|
|
|
|
|
|
|
|
if (VerboseDAGDumping) {
|
|
|
|
if (DbgBegin() != DbgEnd())
|
|
|
|
dbgs() << "SDDbgValues:\n";
|
|
|
|
for (auto *Dbg : make_range(DbgBegin(), DbgEnd()))
|
|
|
|
Dbg->dump();
|
|
|
|
if (ByvalParmDbgBegin() != ByvalParmDbgEnd())
|
|
|
|
dbgs() << "Byval SDDbgValues:\n";
|
|
|
|
for (auto *Dbg : make_range(ByvalParmDbgBegin(), ByvalParmDbgEnd()))
|
|
|
|
Dbg->dump();
|
|
|
|
}
|
|
|
|
dbgs() << "\n";
|
2012-03-13 06:47:27 +01:00
|
|
|
}
|
2017-01-28 03:02:38 +01:00
|
|
|
#endif
|
2012-03-13 06:47:27 +01:00
|
|
|
|
|
|
|
void SDNode::printr(raw_ostream &OS, const SelectionDAG *G) const {
|
SelectionDAGDumper: Print simple operands inline.
Print simple operands inline instead of their pointer/value number.
Simple operands are SDNodes without predecessors like Constant(FP), Register,
UNDEF. This unifies the behaviour with dumpr() which was already doing this.
Previously:
t0: ch = EntryToken
t1: i64 = Register %vreg0
t2: i64,ch = CopyFromReg t0, t1
t3: i64 = Constant<1>
t4: i64 = add t2, t3
t5: i64 = Constant<2>
t6: i64 = add t2, t5
t10: i64 = undef
t11: i8,ch = load t0, t2, t10<LD1[%tmp81]>
t12: i8,ch = load t0, t4, t10<LD1[%tmp10]>
t13: i8,ch = load t0, t6, t10<LD1[%tmp12]>
Now:
t0: ch = EntryToken
t2: i64,ch = CopyFromReg t0, Register:i64 %vreg0
t4: i64 = add t2, Constant:i64<1>
t6: i64 = add t2, Constant:i64<2>
t11: i8,ch = load<LD1[%tmp81]> t0, t2, undef:i64
t12: i8,ch = load<LD1[%tmp10]> t0, t4, undef:i64
t13: i8,ch = load<LD1[%tmp12]> t0, t6, undef:i64
Differential Revision: http://reviews.llvm.org/D12567
llvm-svn: 248628
2015-09-26 00:27:02 +02:00
|
|
|
OS << PrintNodeId(*this) << ": ";
|
2012-03-13 06:47:27 +01:00
|
|
|
print_types(OS, G);
|
SelectionDAGDumper: Print simple operands inline.
Print simple operands inline instead of their pointer/value number.
Simple operands are SDNodes without predecessors like Constant(FP), Register,
UNDEF. This unifies the behaviour with dumpr() which was already doing this.
Previously:
t0: ch = EntryToken
t1: i64 = Register %vreg0
t2: i64,ch = CopyFromReg t0, t1
t3: i64 = Constant<1>
t4: i64 = add t2, t3
t5: i64 = Constant<2>
t6: i64 = add t2, t5
t10: i64 = undef
t11: i8,ch = load t0, t2, t10<LD1[%tmp81]>
t12: i8,ch = load t0, t4, t10<LD1[%tmp10]>
t13: i8,ch = load t0, t6, t10<LD1[%tmp12]>
Now:
t0: ch = EntryToken
t2: i64,ch = CopyFromReg t0, Register:i64 %vreg0
t4: i64 = add t2, Constant:i64<1>
t6: i64 = add t2, Constant:i64<2>
t11: i8,ch = load<LD1[%tmp81]> t0, t2, undef:i64
t12: i8,ch = load<LD1[%tmp10]> t0, t4, undef:i64
t13: i8,ch = load<LD1[%tmp12]> t0, t6, undef:i64
Differential Revision: http://reviews.llvm.org/D12567
llvm-svn: 248628
2015-09-26 00:27:02 +02:00
|
|
|
OS << " = " << getOperationName(G);
|
2012-03-13 06:47:27 +01:00
|
|
|
print_details(OS, G);
|
|
|
|
}
|
|
|
|
|
SelectionDAGDumper: Print simple operands inline.
Print simple operands inline instead of their pointer/value number.
Simple operands are SDNodes without predecessors like Constant(FP), Register,
UNDEF. This unifies the behaviour with dumpr() which was already doing this.
Previously:
t0: ch = EntryToken
t1: i64 = Register %vreg0
t2: i64,ch = CopyFromReg t0, t1
t3: i64 = Constant<1>
t4: i64 = add t2, t3
t5: i64 = Constant<2>
t6: i64 = add t2, t5
t10: i64 = undef
t11: i8,ch = load t0, t2, t10<LD1[%tmp81]>
t12: i8,ch = load t0, t4, t10<LD1[%tmp10]>
t13: i8,ch = load t0, t6, t10<LD1[%tmp12]>
Now:
t0: ch = EntryToken
t2: i64,ch = CopyFromReg t0, Register:i64 %vreg0
t4: i64 = add t2, Constant:i64<1>
t6: i64 = add t2, Constant:i64<2>
t11: i8,ch = load<LD1[%tmp81]> t0, t2, undef:i64
t12: i8,ch = load<LD1[%tmp10]> t0, t4, undef:i64
t13: i8,ch = load<LD1[%tmp12]> t0, t6, undef:i64
Differential Revision: http://reviews.llvm.org/D12567
llvm-svn: 248628
2015-09-26 00:27:02 +02:00
|
|
|
static bool printOperand(raw_ostream &OS, const SelectionDAG *G,
|
|
|
|
const SDValue Value) {
|
[X86] Part 1 to fix x86-64 fp128 calling convention.
Almost all these changes are conditioned and only apply to the new
x86-64 f128 type configuration, which will be enabled in a follow up
patch. They are required together to make new f128 work. If there is
any error, we should fix or revert them as a whole.
These changes should have no impact to current configurations.
* Relax type legalization checks to accept new f128 type configuration,
whose TypeAction is TypeSoftenFloat, not TypeLegal, but also has
TLI.isTypeLegal true.
* Relax GetSoftenedFloat to return in some cases f128 type SDValue,
which is TLI.isTypeLegal but not "softened" to i128 node.
* Allow customized FABS, FNEG, FCOPYSIGN on new f128 type configuration,
to generate optimized bitwise operators for libm functions.
* Enhance related Lower* functions to handle f128 type.
* Enhance DAGTypeLegalizer::run, SoftenFloatResult, and related functions
to keep new f128 type in register, and convert f128 operators to library calls.
* Fix Combiner, Emitter, Legalizer routines that did not handle f128 type.
* Add ExpandConstant to handle i128 constants, ExpandNode
to handle ISD::Constant node.
* Add one more parameter to getCommonSubClass and firstCommonClass,
to guarantee that returned common sub class will contain the specified
simple value type.
This extra parameter is used by EmitCopyFromReg in InstrEmitter.cpp.
* Fix infinite loop in getTypeLegalizationCost when f128 is the value type.
* Fix printOperand to handle null operand.
* Enhance ISD::BITCAST node to handle f128 constant.
* Expand new f128 type for BR_CC, SELECT_CC, SELECT, SETCC nodes.
* Enhance X86AsmPrinter to emit f128 values in comments.
Differential Revision: http://reviews.llvm.org/D15134
llvm-svn: 254653
2015-12-03 23:02:40 +01:00
|
|
|
if (!Value.getNode()) {
|
|
|
|
OS << "<null>";
|
|
|
|
return false;
|
2019-01-18 21:06:13 +01:00
|
|
|
} else if (shouldPrintInline(*Value.getNode(), G)) {
|
SelectionDAGDumper: Print simple operands inline.
Print simple operands inline instead of their pointer/value number.
Simple operands are SDNodes without predecessors like Constant(FP), Register,
UNDEF. This unifies the behaviour with dumpr() which was already doing this.
Previously:
t0: ch = EntryToken
t1: i64 = Register %vreg0
t2: i64,ch = CopyFromReg t0, t1
t3: i64 = Constant<1>
t4: i64 = add t2, t3
t5: i64 = Constant<2>
t6: i64 = add t2, t5
t10: i64 = undef
t11: i8,ch = load t0, t2, t10<LD1[%tmp81]>
t12: i8,ch = load t0, t4, t10<LD1[%tmp10]>
t13: i8,ch = load t0, t6, t10<LD1[%tmp12]>
Now:
t0: ch = EntryToken
t2: i64,ch = CopyFromReg t0, Register:i64 %vreg0
t4: i64 = add t2, Constant:i64<1>
t6: i64 = add t2, Constant:i64<2>
t11: i8,ch = load<LD1[%tmp81]> t0, t2, undef:i64
t12: i8,ch = load<LD1[%tmp10]> t0, t4, undef:i64
t13: i8,ch = load<LD1[%tmp12]> t0, t6, undef:i64
Differential Revision: http://reviews.llvm.org/D12567
llvm-svn: 248628
2015-09-26 00:27:02 +02:00
|
|
|
OS << Value->getOperationName(G) << ':';
|
|
|
|
Value->print_types(OS, G);
|
|
|
|
Value->print_details(OS, G);
|
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
OS << PrintNodeId(*Value.getNode());
|
|
|
|
if (unsigned RN = Value.getResNo())
|
|
|
|
OS << ':' << RN;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-15 16:32:27 +02:00
|
|
|
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
|
2017-10-11 00:33:29 +02:00
|
|
|
using VisitedSDNodeSet = SmallPtrSet<const SDNode *, 32>;
|
|
|
|
|
2012-03-13 06:47:27 +01:00
|
|
|
static void DumpNodesr(raw_ostream &OS, const SDNode *N, unsigned indent,
|
|
|
|
const SelectionDAG *G, VisitedSDNodeSet &once) {
|
2014-11-19 08:49:26 +01:00
|
|
|
if (!once.insert(N).second) // If we've been here before, return now.
|
2012-03-13 06:47:27 +01:00
|
|
|
return;
|
|
|
|
|
|
|
|
// Dump the current SDNode, but don't end the line yet.
|
|
|
|
OS.indent(indent);
|
|
|
|
N->printr(OS, G);
|
|
|
|
|
|
|
|
// Having printed this SDNode, walk the children:
|
|
|
|
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
|
|
|
|
if (i) OS << ",";
|
|
|
|
OS << " ";
|
|
|
|
|
SelectionDAGDumper: Print simple operands inline.
Print simple operands inline instead of their pointer/value number.
Simple operands are SDNodes without predecessors like Constant(FP), Register,
UNDEF. This unifies the behaviour with dumpr() which was already doing this.
Previously:
t0: ch = EntryToken
t1: i64 = Register %vreg0
t2: i64,ch = CopyFromReg t0, t1
t3: i64 = Constant<1>
t4: i64 = add t2, t3
t5: i64 = Constant<2>
t6: i64 = add t2, t5
t10: i64 = undef
t11: i8,ch = load t0, t2, t10<LD1[%tmp81]>
t12: i8,ch = load t0, t4, t10<LD1[%tmp10]>
t13: i8,ch = load t0, t6, t10<LD1[%tmp12]>
Now:
t0: ch = EntryToken
t2: i64,ch = CopyFromReg t0, Register:i64 %vreg0
t4: i64 = add t2, Constant:i64<1>
t6: i64 = add t2, Constant:i64<2>
t11: i8,ch = load<LD1[%tmp81]> t0, t2, undef:i64
t12: i8,ch = load<LD1[%tmp10]> t0, t4, undef:i64
t13: i8,ch = load<LD1[%tmp12]> t0, t6, undef:i64
Differential Revision: http://reviews.llvm.org/D12567
llvm-svn: 248628
2015-09-26 00:27:02 +02:00
|
|
|
const SDValue Op = N->getOperand(i);
|
|
|
|
bool printedInline = printOperand(OS, G, Op);
|
|
|
|
if (printedInline)
|
|
|
|
once.insert(Op.getNode());
|
2012-03-13 06:47:27 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
OS << "\n";
|
|
|
|
|
|
|
|
// Dump children that have grandchildren on their own line(s).
|
2015-06-26 21:37:02 +02:00
|
|
|
for (const SDValue &Op : N->op_values())
|
|
|
|
DumpNodesr(OS, Op.getNode(), indent+2, G, once);
|
2012-03-13 06:47:27 +01:00
|
|
|
}
|
|
|
|
|
2017-01-28 03:02:38 +01:00
|
|
|
LLVM_DUMP_METHOD void SDNode::dumpr() const {
|
2012-03-13 06:47:27 +01:00
|
|
|
VisitedSDNodeSet once;
|
2014-04-14 02:51:57 +02:00
|
|
|
DumpNodesr(dbgs(), this, 0, nullptr, once);
|
2012-03-13 06:47:27 +01:00
|
|
|
}
|
|
|
|
|
2017-01-28 03:02:38 +01:00
|
|
|
LLVM_DUMP_METHOD void SDNode::dumpr(const SelectionDAG *G) const {
|
2012-03-13 06:47:27 +01:00
|
|
|
VisitedSDNodeSet once;
|
|
|
|
DumpNodesr(dbgs(), this, 0, G, once);
|
|
|
|
}
|
2017-01-28 03:02:38 +01:00
|
|
|
#endif
|
2012-03-13 06:47:27 +01:00
|
|
|
|
|
|
|
static void printrWithDepthHelper(raw_ostream &OS, const SDNode *N,
|
|
|
|
const SelectionDAG *G, unsigned depth,
|
|
|
|
unsigned indent) {
|
|
|
|
if (depth == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
OS.indent(indent);
|
|
|
|
|
|
|
|
N->print(OS, G);
|
|
|
|
|
|
|
|
if (depth < 1)
|
|
|
|
return;
|
|
|
|
|
2015-06-26 21:37:02 +02:00
|
|
|
for (const SDValue &Op : N->op_values()) {
|
2012-03-13 06:47:27 +01:00
|
|
|
// Don't follow chain operands.
|
2015-06-26 21:37:02 +02:00
|
|
|
if (Op.getValueType() == MVT::Other)
|
2012-03-13 06:47:27 +01:00
|
|
|
continue;
|
|
|
|
OS << '\n';
|
2015-06-26 21:37:02 +02:00
|
|
|
printrWithDepthHelper(OS, Op.getNode(), G, depth-1, indent+2);
|
2012-03-13 06:47:27 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void SDNode::printrWithDepth(raw_ostream &OS, const SelectionDAG *G,
|
|
|
|
unsigned depth) const {
|
|
|
|
printrWithDepthHelper(OS, this, G, depth, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void SDNode::printrFull(raw_ostream &OS, const SelectionDAG *G) const {
|
|
|
|
// Don't print impossibly deep things.
|
|
|
|
printrWithDepth(OS, G, 10);
|
|
|
|
}
|
|
|
|
|
2017-10-15 16:32:27 +02:00
|
|
|
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
|
2017-01-28 03:02:38 +01:00
|
|
|
LLVM_DUMP_METHOD
|
2012-03-13 06:47:27 +01:00
|
|
|
void SDNode::dumprWithDepth(const SelectionDAG *G, unsigned depth) const {
|
|
|
|
printrWithDepth(dbgs(), G, depth);
|
|
|
|
}
|
|
|
|
|
2017-01-28 03:02:38 +01:00
|
|
|
LLVM_DUMP_METHOD void SDNode::dumprFull(const SelectionDAG *G) const {
|
2012-03-13 06:47:27 +01:00
|
|
|
// Don't print impossibly deep things.
|
|
|
|
dumprWithDepth(G, 10);
|
|
|
|
}
|
2017-01-28 03:02:38 +01:00
|
|
|
#endif
|
2012-03-13 06:47:27 +01:00
|
|
|
|
|
|
|
void SDNode::print(raw_ostream &OS, const SelectionDAG *G) const {
|
SelectionDAGDumper: Print simple operands inline.
Print simple operands inline instead of their pointer/value number.
Simple operands are SDNodes without predecessors like Constant(FP), Register,
UNDEF. This unifies the behaviour with dumpr() which was already doing this.
Previously:
t0: ch = EntryToken
t1: i64 = Register %vreg0
t2: i64,ch = CopyFromReg t0, t1
t3: i64 = Constant<1>
t4: i64 = add t2, t3
t5: i64 = Constant<2>
t6: i64 = add t2, t5
t10: i64 = undef
t11: i8,ch = load t0, t2, t10<LD1[%tmp81]>
t12: i8,ch = load t0, t4, t10<LD1[%tmp10]>
t13: i8,ch = load t0, t6, t10<LD1[%tmp12]>
Now:
t0: ch = EntryToken
t2: i64,ch = CopyFromReg t0, Register:i64 %vreg0
t4: i64 = add t2, Constant:i64<1>
t6: i64 = add t2, Constant:i64<2>
t11: i8,ch = load<LD1[%tmp81]> t0, t2, undef:i64
t12: i8,ch = load<LD1[%tmp10]> t0, t4, undef:i64
t13: i8,ch = load<LD1[%tmp12]> t0, t6, undef:i64
Differential Revision: http://reviews.llvm.org/D12567
llvm-svn: 248628
2015-09-26 00:27:02 +02:00
|
|
|
printr(OS, G);
|
2012-03-13 06:47:27 +01:00
|
|
|
for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
|
|
|
|
if (i) OS << ", "; else OS << " ";
|
SelectionDAGDumper: Print simple operands inline.
Print simple operands inline instead of their pointer/value number.
Simple operands are SDNodes without predecessors like Constant(FP), Register,
UNDEF. This unifies the behaviour with dumpr() which was already doing this.
Previously:
t0: ch = EntryToken
t1: i64 = Register %vreg0
t2: i64,ch = CopyFromReg t0, t1
t3: i64 = Constant<1>
t4: i64 = add t2, t3
t5: i64 = Constant<2>
t6: i64 = add t2, t5
t10: i64 = undef
t11: i8,ch = load t0, t2, t10<LD1[%tmp81]>
t12: i8,ch = load t0, t4, t10<LD1[%tmp10]>
t13: i8,ch = load t0, t6, t10<LD1[%tmp12]>
Now:
t0: ch = EntryToken
t2: i64,ch = CopyFromReg t0, Register:i64 %vreg0
t4: i64 = add t2, Constant:i64<1>
t6: i64 = add t2, Constant:i64<2>
t11: i8,ch = load<LD1[%tmp81]> t0, t2, undef:i64
t12: i8,ch = load<LD1[%tmp10]> t0, t4, undef:i64
t13: i8,ch = load<LD1[%tmp12]> t0, t6, undef:i64
Differential Revision: http://reviews.llvm.org/D12567
llvm-svn: 248628
2015-09-26 00:27:02 +02:00
|
|
|
printOperand(OS, G, getOperand(i));
|
2012-03-13 06:47:27 +01:00
|
|
|
}
|
2018-04-23 19:18:24 +02:00
|
|
|
if (DebugLoc DL = getDebugLoc()) {
|
|
|
|
OS << ", ";
|
|
|
|
DL.print(OS);
|
|
|
|
}
|
2012-03-13 06:47:27 +01:00
|
|
|
}
|