2012-02-18 13:03:15 +01:00
|
|
|
//===-- XCoreISelLowering.cpp - XCore DAG Lowering Implementation ---------===//
|
2008-11-07 11:59:00 +01:00
|
|
|
//
|
2019-01-19 09:50:56 +01:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2008-11-07 11:59:00 +01:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file implements the XCoreTargetLowering class.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "XCoreISelLowering.h"
|
|
|
|
#include "XCore.h"
|
2012-12-03 17:50:05 +01:00
|
|
|
#include "XCoreMachineFunctionInfo.h"
|
2008-11-07 11:59:00 +01:00
|
|
|
#include "XCoreSubtarget.h"
|
2012-12-03 17:50:05 +01:00
|
|
|
#include "XCoreTargetMachine.h"
|
|
|
|
#include "XCoreTargetObjectFile.h"
|
2008-11-07 11:59:00 +01:00
|
|
|
#include "llvm/CodeGen/CallingConvLower.h"
|
|
|
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
2010-02-23 14:25:07 +01:00
|
|
|
#include "llvm/CodeGen/MachineJumpTableInfo.h"
|
2008-11-07 11:59:00 +01:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2018-03-29 19:21:10 +02:00
|
|
|
#include "llvm/CodeGen/ValueTypes.h"
|
2013-01-02 12:36:10 +01:00
|
|
|
#include "llvm/IR/CallingConv.h"
|
2013-12-02 11:18:31 +01:00
|
|
|
#include "llvm/IR/Constants.h"
|
2013-01-02 12:36:10 +01:00
|
|
|
#include "llvm/IR/DerivedTypes.h"
|
|
|
|
#include "llvm/IR/Function.h"
|
|
|
|
#include "llvm/IR/GlobalAlias.h"
|
|
|
|
#include "llvm/IR/GlobalVariable.h"
|
|
|
|
#include "llvm/IR/Intrinsics.h"
|
2019-12-11 16:55:26 +01:00
|
|
|
#include "llvm/IR/IntrinsicsXCore.h"
|
2008-11-07 11:59:00 +01:00
|
|
|
#include "llvm/Support/Debug.h"
|
2009-07-08 22:53:28 +02:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2017-04-28 07:31:46 +02:00
|
|
|
#include "llvm/Support/KnownBits.h"
|
2009-07-25 02:23:56 +02:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2013-05-04 19:24:33 +02:00
|
|
|
#include <algorithm>
|
|
|
|
|
2008-11-07 11:59:00 +01:00
|
|
|
using namespace llvm;
|
|
|
|
|
2014-04-22 04:41:26 +02:00
|
|
|
#define DEBUG_TYPE "xcore-lower"
|
|
|
|
|
2008-11-07 11:59:00 +01:00
|
|
|
const char *XCoreTargetLowering::
|
2011-02-25 22:41:48 +01:00
|
|
|
getTargetNodeName(unsigned Opcode) const
|
2008-11-07 11:59:00 +01:00
|
|
|
{
|
2015-05-07 23:33:59 +02:00
|
|
|
switch ((XCoreISD::NodeType)Opcode)
|
2008-11-07 11:59:00 +01:00
|
|
|
{
|
2015-05-07 23:33:59 +02:00
|
|
|
case XCoreISD::FIRST_NUMBER : break;
|
2008-11-07 11:59:00 +01:00
|
|
|
case XCoreISD::BL : return "XCoreISD::BL";
|
|
|
|
case XCoreISD::PCRelativeWrapper : return "XCoreISD::PCRelativeWrapper";
|
|
|
|
case XCoreISD::DPRelativeWrapper : return "XCoreISD::DPRelativeWrapper";
|
|
|
|
case XCoreISD::CPRelativeWrapper : return "XCoreISD::CPRelativeWrapper";
|
2014-02-27 18:47:54 +01:00
|
|
|
case XCoreISD::LDWSP : return "XCoreISD::LDWSP";
|
2008-11-07 11:59:00 +01:00
|
|
|
case XCoreISD::STWSP : return "XCoreISD::STWSP";
|
|
|
|
case XCoreISD::RETSP : return "XCoreISD::RETSP";
|
2009-10-08 19:14:57 +02:00
|
|
|
case XCoreISD::LADD : return "XCoreISD::LADD";
|
|
|
|
case XCoreISD::LSUB : return "XCoreISD::LSUB";
|
2010-03-10 14:27:10 +01:00
|
|
|
case XCoreISD::LMUL : return "XCoreISD::LMUL";
|
2010-03-10 12:41:08 +01:00
|
|
|
case XCoreISD::MACCU : return "XCoreISD::MACCU";
|
|
|
|
case XCoreISD::MACCS : return "XCoreISD::MACCS";
|
2013-01-25 22:20:28 +01:00
|
|
|
case XCoreISD::CRC8 : return "XCoreISD::CRC8";
|
2010-02-23 14:25:07 +01:00
|
|
|
case XCoreISD::BR_JT : return "XCoreISD::BR_JT";
|
|
|
|
case XCoreISD::BR_JT32 : return "XCoreISD::BR_JT32";
|
2014-01-06 15:21:00 +01:00
|
|
|
case XCoreISD::FRAME_TO_ARGS_OFFSET : return "XCoreISD::FRAME_TO_ARGS_OFFSET";
|
2014-01-06 15:21:07 +01:00
|
|
|
case XCoreISD::EH_RETURN : return "XCoreISD::EH_RETURN";
|
2013-11-12 11:11:26 +01:00
|
|
|
case XCoreISD::MEMBARRIER : return "XCoreISD::MEMBARRIER";
|
2008-11-07 11:59:00 +01:00
|
|
|
}
|
2015-05-07 23:33:59 +02:00
|
|
|
return nullptr;
|
2008-11-07 11:59:00 +01:00
|
|
|
}
|
|
|
|
|
2015-02-02 18:52:27 +01:00
|
|
|
XCoreTargetLowering::XCoreTargetLowering(const TargetMachine &TM,
|
|
|
|
const XCoreSubtarget &Subtarget)
|
|
|
|
: TargetLowering(TM), TM(TM), Subtarget(Subtarget) {
|
2008-11-07 11:59:00 +01:00
|
|
|
|
|
|
|
// Set up the register classes.
|
2012-04-20 09:30:17 +02:00
|
|
|
addRegisterClass(MVT::i32, &XCore::GRRegsRegClass);
|
2008-11-07 11:59:00 +01:00
|
|
|
|
|
|
|
// Compute derived properties from the register classes
|
2015-02-26 01:00:24 +01:00
|
|
|
computeRegisterProperties(Subtarget.getRegisterInfo());
|
2008-11-07 11:59:00 +01:00
|
|
|
|
|
|
|
setStackPointerRegisterToSaveRestore(XCore::SP);
|
|
|
|
|
2013-09-09 12:42:05 +02:00
|
|
|
setSchedulingPreference(Sched::Source);
|
2008-11-07 11:59:00 +01:00
|
|
|
|
|
|
|
// Use i32 for setcc operations results (slt, sgt, ...).
|
2008-11-23 16:47:28 +01:00
|
|
|
setBooleanContents(ZeroOrOneBooleanContent);
|
2011-09-06 21:07:46 +02:00
|
|
|
setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct?
|
2008-11-07 11:59:00 +01:00
|
|
|
|
|
|
|
// XCore does not have the NodeTypes below.
|
2013-03-08 16:36:57 +01:00
|
|
|
setOperationAction(ISD::BR_CC, MVT::i32, Expand);
|
2014-06-10 18:01:22 +02:00
|
|
|
setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
|
2008-11-07 11:59:00 +01:00
|
|
|
|
|
|
|
// 64bit
|
2009-10-06 18:01:09 +02:00
|
|
|
setOperationAction(ISD::ADD, MVT::i64, Custom);
|
|
|
|
setOperationAction(ISD::SUB, MVT::i64, Custom);
|
2010-03-10 14:20:07 +01:00
|
|
|
setOperationAction(ISD::SMUL_LOHI, MVT::i32, Custom);
|
|
|
|
setOperationAction(ISD::UMUL_LOHI, MVT::i32, Custom);
|
2009-08-11 22:47:22 +02:00
|
|
|
setOperationAction(ISD::MULHS, MVT::i32, Expand);
|
|
|
|
setOperationAction(ISD::MULHU, MVT::i32, Expand);
|
|
|
|
setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
|
|
|
|
setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
|
|
|
|
setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
|
2011-02-25 22:41:48 +01:00
|
|
|
|
2008-11-07 11:59:00 +01:00
|
|
|
// Bit Manipulation
|
2009-08-11 22:47:22 +02:00
|
|
|
setOperationAction(ISD::CTPOP, MVT::i32, Expand);
|
|
|
|
setOperationAction(ISD::ROTL , MVT::i32, Expand);
|
|
|
|
setOperationAction(ISD::ROTR , MVT::i32, Expand);
|
2020-02-21 02:28:49 +01:00
|
|
|
setOperationAction(ISD::BITREVERSE , MVT::i32, Legal);
|
2011-02-25 22:41:48 +01:00
|
|
|
|
2009-08-11 22:47:22 +02:00
|
|
|
setOperationAction(ISD::TRAP, MVT::Other, Legal);
|
2011-02-25 22:41:48 +01:00
|
|
|
|
2010-02-23 14:25:07 +01:00
|
|
|
// Jump tables.
|
|
|
|
setOperationAction(ISD::BR_JT, MVT::Other, Custom);
|
2008-11-07 11:59:00 +01:00
|
|
|
|
2009-08-11 22:47:22 +02:00
|
|
|
setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
|
2009-11-19 00:20:42 +01:00
|
|
|
setOperationAction(ISD::BlockAddress, MVT::i32 , Custom);
|
|
|
|
|
2008-11-07 11:59:00 +01:00
|
|
|
// Conversion of i64 -> double produces constantpool nodes
|
2009-08-11 22:47:22 +02:00
|
|
|
setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
|
2008-11-07 11:59:00 +01:00
|
|
|
|
|
|
|
// Loads
|
[SelectionDAG] Allow targets to specify legality of extloads' result
type (in addition to the memory type).
The *LoadExt* legalization handling used to only have one type, the
memory type. This forced users to assume that as long as the extload
for the memory type was declared legal, and the result type was legal,
the whole extload was legal.
However, this isn't always the case. For instance, on X86, with AVX,
this is legal:
v4i32 load, zext from v4i8
but this isn't:
v4i64 load, zext from v4i8
Whereas v4i64 is (arguably) legal, even without AVX2.
Note that the same thing was done a while ago for truncstores (r46140),
but I assume no one needed it yet for extloads, so here we go.
Calls to getLoadExtAction were changed to add the value type, found
manually in the surrounding code.
Calls to setLoadExtAction were mechanically changed, by wrapping the
call in a loop, to match previous behavior. The loop iterates over
the MVT subrange corresponding to the memory type (FP vectors, etc...).
I also pulled neighboring setTruncStoreActions into some of the loops;
those shouldn't make a difference, as the additional types are illegal.
(e.g., i128->i1 truncstores on PPC.)
No functional change intended.
Differential Revision: http://reviews.llvm.org/D6532
llvm-svn: 225421
2015-01-08 01:51:32 +01:00
|
|
|
for (MVT VT : MVT::integer_valuetypes()) {
|
|
|
|
setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
|
|
|
|
setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
|
|
|
|
setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
|
2008-11-07 11:59:00 +01:00
|
|
|
|
[SelectionDAG] Allow targets to specify legality of extloads' result
type (in addition to the memory type).
The *LoadExt* legalization handling used to only have one type, the
memory type. This forced users to assume that as long as the extload
for the memory type was declared legal, and the result type was legal,
the whole extload was legal.
However, this isn't always the case. For instance, on X86, with AVX,
this is legal:
v4i32 load, zext from v4i8
but this isn't:
v4i64 load, zext from v4i8
Whereas v4i64 is (arguably) legal, even without AVX2.
Note that the same thing was done a while ago for truncstores (r46140),
but I assume no one needed it yet for extloads, so here we go.
Calls to getLoadExtAction were changed to add the value type, found
manually in the surrounding code.
Calls to setLoadExtAction were mechanically changed, by wrapping the
call in a loop, to match previous behavior. The loop iterates over
the MVT subrange corresponding to the memory type (FP vectors, etc...).
I also pulled neighboring setTruncStoreActions into some of the loops;
those shouldn't make a difference, as the additional types are illegal.
(e.g., i128->i1 truncstores on PPC.)
No functional change intended.
Differential Revision: http://reviews.llvm.org/D6532
llvm-svn: 225421
2015-01-08 01:51:32 +01:00
|
|
|
setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand);
|
|
|
|
setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Expand);
|
|
|
|
}
|
2009-07-16 12:21:18 +02:00
|
|
|
|
|
|
|
// Custom expand misaligned loads / stores.
|
2009-08-11 22:47:22 +02:00
|
|
|
setOperationAction(ISD::LOAD, MVT::i32, Custom);
|
|
|
|
setOperationAction(ISD::STORE, MVT::i32, Custom);
|
2009-07-16 12:21:18 +02:00
|
|
|
|
2008-11-07 11:59:00 +01:00
|
|
|
// Varargs
|
2009-08-11 22:47:22 +02:00
|
|
|
setOperationAction(ISD::VAEND, MVT::Other, Expand);
|
|
|
|
setOperationAction(ISD::VACOPY, MVT::Other, Expand);
|
|
|
|
setOperationAction(ISD::VAARG, MVT::Other, Custom);
|
|
|
|
setOperationAction(ISD::VASTART, MVT::Other, Custom);
|
2011-02-25 22:41:48 +01:00
|
|
|
|
2008-11-07 11:59:00 +01:00
|
|
|
// Dynamic stack
|
2009-08-11 22:47:22 +02:00
|
|
|
setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
|
|
|
|
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
|
|
|
|
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
|
2011-02-02 15:57:41 +01:00
|
|
|
|
2013-11-13 11:19:31 +01:00
|
|
|
// Exception handling
|
2014-01-06 15:21:07 +01:00
|
|
|
setOperationAction(ISD::EH_RETURN, MVT::Other, Custom);
|
2014-01-06 15:21:00 +01:00
|
|
|
setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
|
2013-11-13 11:19:31 +01:00
|
|
|
|
2013-11-12 11:11:26 +01:00
|
|
|
// Atomic operations
|
2014-02-11 11:36:18 +01:00
|
|
|
// We request a fence for ATOMIC_* instructions, to reduce them to Monotonic.
|
|
|
|
// As we are always Sequential Consistent, an ATOMIC_FENCE becomes a no OP.
|
2013-11-12 11:11:26 +01:00
|
|
|
setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
|
2014-02-11 11:36:18 +01:00
|
|
|
setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
|
|
|
|
setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
|
2013-11-12 11:11:26 +01:00
|
|
|
|
2011-02-02 15:57:41 +01:00
|
|
|
// TRAMPOLINE is custom lowered.
|
2011-09-06 15:37:06 +02:00
|
|
|
setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
|
|
|
|
setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
|
2011-02-02 15:57:41 +01:00
|
|
|
|
2013-01-25 22:20:28 +01:00
|
|
|
// We want to custom lower some of our intrinsics.
|
|
|
|
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
|
|
|
|
|
2013-02-20 22:13:59 +01:00
|
|
|
MaxStoresPerMemset = MaxStoresPerMemsetOptSize = 4;
|
|
|
|
MaxStoresPerMemmove = MaxStoresPerMemmoveOptSize
|
|
|
|
= MaxStoresPerMemcpy = MaxStoresPerMemcpyOptSize = 2;
|
2009-07-16 14:50:48 +02:00
|
|
|
|
|
|
|
// We have target-specific dag combine patterns for the following nodes:
|
|
|
|
setTargetDAGCombine(ISD::STORE);
|
2010-03-10 17:19:31 +01:00
|
|
|
setTargetDAGCombine(ISD::ADD);
|
2014-02-27 14:20:11 +01:00
|
|
|
setTargetDAGCombine(ISD::INTRINSIC_VOID);
|
|
|
|
setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
|
2011-05-06 22:34:06 +02:00
|
|
|
|
2019-09-27 14:54:21 +02:00
|
|
|
setMinFunctionAlignment(Align(2));
|
|
|
|
setPrefFunctionAlignment(Align(4));
|
2008-11-07 11:59:00 +01:00
|
|
|
}
|
|
|
|
|
2013-10-11 12:26:29 +02:00
|
|
|
bool XCoreTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
|
|
|
|
if (Val.getOpcode() != ISD::LOAD)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
EVT VT1 = Val.getValueType();
|
|
|
|
if (!VT1.isSimple() || !VT1.isInteger() ||
|
|
|
|
!VT2.isSimple() || !VT2.isInteger())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
switch (VT1.getSimpleVT().SimpleTy) {
|
|
|
|
default: break;
|
|
|
|
case MVT::i8:
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2008-11-07 11:59:00 +01:00
|
|
|
SDValue XCoreTargetLowering::
|
2010-04-17 17:26:15 +02:00
|
|
|
LowerOperation(SDValue Op, SelectionDAG &DAG) const {
|
2011-02-25 22:41:48 +01:00
|
|
|
switch (Op.getOpcode())
|
2008-11-07 11:59:00 +01:00
|
|
|
{
|
2014-01-06 15:21:07 +01:00
|
|
|
case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
|
2013-01-25 22:20:28 +01:00
|
|
|
case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
|
|
|
|
case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
|
|
|
|
case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
|
|
|
|
case ISD::BR_JT: return LowerBR_JT(Op, DAG);
|
|
|
|
case ISD::LOAD: return LowerLOAD(Op, DAG);
|
|
|
|
case ISD::STORE: return LowerSTORE(Op, DAG);
|
|
|
|
case ISD::VAARG: return LowerVAARG(Op, DAG);
|
|
|
|
case ISD::VASTART: return LowerVASTART(Op, DAG);
|
|
|
|
case ISD::SMUL_LOHI: return LowerSMUL_LOHI(Op, DAG);
|
|
|
|
case ISD::UMUL_LOHI: return LowerUMUL_LOHI(Op, DAG);
|
2008-11-07 11:59:00 +01:00
|
|
|
// FIXME: Remove these when LegalizeDAGTypes lands.
|
|
|
|
case ISD::ADD:
|
2013-01-25 22:20:28 +01:00
|
|
|
case ISD::SUB: return ExpandADDSUB(Op.getNode(), DAG);
|
|
|
|
case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
|
2014-01-06 15:20:53 +01:00
|
|
|
case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
|
2014-01-06 15:21:00 +01:00
|
|
|
case ISD::FRAME_TO_ARGS_OFFSET: return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
|
2013-01-25 22:20:28 +01:00
|
|
|
case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
|
|
|
|
case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
|
|
|
|
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
|
2013-11-12 11:11:26 +01:00
|
|
|
case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG);
|
2014-02-11 11:36:18 +01:00
|
|
|
case ISD::ATOMIC_LOAD: return LowerATOMIC_LOAD(Op, DAG);
|
|
|
|
case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG);
|
2008-11-07 11:59:00 +01:00
|
|
|
default:
|
2009-07-14 18:55:14 +02:00
|
|
|
llvm_unreachable("unimplemented operand");
|
2008-11-07 11:59:00 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-12-01 12:39:25 +01:00
|
|
|
/// ReplaceNodeResults - Replace the results of node with an illegal result
|
|
|
|
/// type with new values built out of custom code.
|
|
|
|
void XCoreTargetLowering::ReplaceNodeResults(SDNode *N,
|
|
|
|
SmallVectorImpl<SDValue>&Results,
|
2010-04-17 17:26:15 +02:00
|
|
|
SelectionDAG &DAG) const {
|
2008-11-07 11:59:00 +01:00
|
|
|
switch (N->getOpcode()) {
|
|
|
|
default:
|
2009-07-14 18:55:14 +02:00
|
|
|
llvm_unreachable("Don't know how to custom expand this!");
|
2008-11-14 16:59:19 +01:00
|
|
|
case ISD::ADD:
|
2008-12-01 12:39:25 +01:00
|
|
|
case ISD::SUB:
|
|
|
|
Results.push_back(ExpandADDSUB(N, DAG));
|
|
|
|
return;
|
2008-11-07 11:59:00 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Misc Lower Operation implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2014-05-23 22:39:23 +02:00
|
|
|
SDValue XCoreTargetLowering::getGlobalAddressWrapper(SDValue GA,
|
|
|
|
const GlobalValue *GV,
|
|
|
|
SelectionDAG &DAG) const {
|
2009-02-07 01:55:49 +01:00
|
|
|
// FIXME there is no actual debug info here
|
2013-05-25 04:42:55 +02:00
|
|
|
SDLoc dl(GA);
|
2014-06-03 04:41:57 +02:00
|
|
|
|
2016-01-16 21:30:46 +01:00
|
|
|
if (GV->getValueType()->isFunctionTy())
|
2014-06-03 04:41:57 +02:00
|
|
|
return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA);
|
|
|
|
|
|
|
|
const auto *GVar = dyn_cast<GlobalVariable>(GV);
|
2016-05-11 20:21:59 +02:00
|
|
|
if ((GV->hasSection() && GV->getSection().startswith(".cp.")) ||
|
2014-06-03 04:41:57 +02:00
|
|
|
(GVar && GVar->isConstant() && GV->hasLocalLinkage()))
|
|
|
|
return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA);
|
|
|
|
|
|
|
|
return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, GA);
|
2008-11-07 11:59:00 +01:00
|
|
|
}
|
|
|
|
|
2014-01-06 15:20:32 +01:00
|
|
|
static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL) {
|
|
|
|
if (XTL.getTargetMachine().getCodeModel() == CodeModel::Small)
|
|
|
|
return true;
|
|
|
|
|
2016-01-16 21:30:46 +01:00
|
|
|
Type *ObjType = GV->getValueType();
|
2014-01-06 15:20:32 +01:00
|
|
|
if (!ObjType->isSized())
|
|
|
|
return false;
|
|
|
|
|
2015-07-09 04:09:52 +02:00
|
|
|
auto &DL = GV->getParent()->getDataLayout();
|
|
|
|
unsigned ObjSize = DL.getTypeAllocSize(ObjType);
|
2014-01-06 15:20:32 +01:00
|
|
|
return ObjSize < CodeModelLargeSize && ObjSize != 0;
|
|
|
|
}
|
|
|
|
|
2008-11-07 11:59:00 +01:00
|
|
|
SDValue XCoreTargetLowering::
|
2010-04-17 17:26:15 +02:00
|
|
|
LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
|
2008-11-07 11:59:00 +01:00
|
|
|
{
|
2013-05-04 19:24:33 +02:00
|
|
|
const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
|
|
|
|
const GlobalValue *GV = GN->getGlobal();
|
2013-12-02 11:18:31 +01:00
|
|
|
SDLoc DL(GN);
|
2013-05-04 19:24:33 +02:00
|
|
|
int64_t Offset = GN->getOffset();
|
2014-01-06 15:20:32 +01:00
|
|
|
if (IsSmallObject(GV, *this)) {
|
2013-12-02 11:18:31 +01:00
|
|
|
// We can only fold positive offsets that are a multiple of the word size.
|
|
|
|
int64_t FoldedOffset = std::max(Offset & ~3, (int64_t)0);
|
|
|
|
SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, FoldedOffset);
|
|
|
|
GA = getGlobalAddressWrapper(GA, GV, DAG);
|
|
|
|
// Handle the rest of the offset.
|
|
|
|
if (Offset != FoldedOffset) {
|
2015-04-28 16:05:47 +02:00
|
|
|
SDValue Remaining = DAG.getConstant(Offset - FoldedOffset, DL, MVT::i32);
|
2013-12-02 11:18:31 +01:00
|
|
|
GA = DAG.getNode(ISD::ADD, DL, MVT::i32, GA, Remaining);
|
|
|
|
}
|
|
|
|
return GA;
|
|
|
|
} else {
|
|
|
|
// Ideally we would not fold in offset with an index <= 11.
|
|
|
|
Type *Ty = Type::getInt8PtrTy(*DAG.getContext());
|
|
|
|
Constant *GA = ConstantExpr::getBitCast(const_cast<GlobalValue*>(GV), Ty);
|
|
|
|
Ty = Type::getInt32Ty(*DAG.getContext());
|
|
|
|
Constant *Idx = ConstantInt::get(Ty, Offset);
|
2015-04-02 20:55:32 +02:00
|
|
|
Constant *GAI = ConstantExpr::getGetElementPtr(
|
|
|
|
Type::getInt8Ty(*DAG.getContext()), GA, Idx);
|
2013-12-02 11:18:31 +01:00
|
|
|
SDValue CP = DAG.getConstantPool(GAI, MVT::i32);
|
2015-07-09 04:09:04 +02:00
|
|
|
return DAG.getLoad(getPointerTy(DAG.getDataLayout()), DL,
|
[SelectionDAG] Get rid of bool parameters in SelectionDAG::getLoad, getStore, and friends.
Summary:
Instead, we take a single flags arg (a bitset).
Also add a default 0 alignment, and change the order of arguments so the
alignment comes before the flags.
This greatly simplifies many callsites, and fixes a bug in
AMDGPUISelLowering, wherein the order of the args to getLoad was
inverted. It also greatly simplifies the process of adding another flag
to getLoad.
Reviewers: chandlerc, tstellarAMD
Subscribers: jholewinski, arsenm, jyknight, dsanders, nemanjai, llvm-commits
Differential Revision: http://reviews.llvm.org/D22249
llvm-svn: 275592
2016-07-15 20:27:10 +02:00
|
|
|
DAG.getEntryNode(), CP, MachinePointerInfo());
|
2013-05-04 19:24:33 +02:00
|
|
|
}
|
2008-11-07 11:59:00 +01:00
|
|
|
}
|
|
|
|
|
2009-11-19 00:20:42 +01:00
|
|
|
SDValue XCoreTargetLowering::
|
2010-04-17 17:26:15 +02:00
|
|
|
LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
|
2009-11-19 00:20:42 +01:00
|
|
|
{
|
2013-05-25 04:42:55 +02:00
|
|
|
SDLoc DL(Op);
|
2015-07-09 04:09:04 +02:00
|
|
|
auto PtrVT = getPointerTy(DAG.getDataLayout());
|
2010-04-15 03:51:59 +02:00
|
|
|
const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
|
2015-07-09 04:09:04 +02:00
|
|
|
SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT);
|
2009-11-19 00:20:42 +01:00
|
|
|
|
2015-07-09 04:09:04 +02:00
|
|
|
return DAG.getNode(XCoreISD::PCRelativeWrapper, DL, PtrVT, Result);
|
2009-11-19 00:20:42 +01:00
|
|
|
}
|
|
|
|
|
2008-11-07 11:59:00 +01:00
|
|
|
SDValue XCoreTargetLowering::
|
2010-04-17 17:26:15 +02:00
|
|
|
LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
|
2008-11-07 11:59:00 +01:00
|
|
|
{
|
|
|
|
ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
|
2009-02-06 22:50:26 +01:00
|
|
|
// FIXME there isn't really debug info here
|
2013-05-25 04:42:55 +02:00
|
|
|
SDLoc dl(CP);
|
2009-10-06 18:01:09 +02:00
|
|
|
EVT PtrVT = Op.getValueType();
|
|
|
|
SDValue Res;
|
|
|
|
if (CP->isMachineConstantPoolEntry()) {
|
|
|
|
Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
|
2020-05-09 00:06:15 +02:00
|
|
|
CP->getAlign(), CP->getOffset());
|
2008-11-07 11:59:00 +01:00
|
|
|
} else {
|
2020-05-09 00:06:15 +02:00
|
|
|
Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlign(),
|
|
|
|
CP->getOffset());
|
2008-11-07 11:59:00 +01:00
|
|
|
}
|
2009-10-06 18:01:09 +02:00
|
|
|
return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, Res);
|
2008-11-07 11:59:00 +01:00
|
|
|
}
|
|
|
|
|
2010-03-11 15:58:56 +01:00
|
|
|
unsigned XCoreTargetLowering::getJumpTableEncoding() const {
|
|
|
|
return MachineJumpTableInfo::EK_Inline;
|
|
|
|
}
|
|
|
|
|
2010-02-23 14:25:07 +01:00
|
|
|
SDValue XCoreTargetLowering::
|
2010-04-17 17:26:15 +02:00
|
|
|
LowerBR_JT(SDValue Op, SelectionDAG &DAG) const
|
2010-02-23 14:25:07 +01:00
|
|
|
{
|
|
|
|
SDValue Chain = Op.getOperand(0);
|
|
|
|
SDValue Table = Op.getOperand(1);
|
|
|
|
SDValue Index = Op.getOperand(2);
|
2013-05-25 04:42:55 +02:00
|
|
|
SDLoc dl(Op);
|
2010-02-23 14:25:07 +01:00
|
|
|
JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
|
|
|
|
unsigned JTI = JT->getIndex();
|
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
|
|
|
|
SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32);
|
|
|
|
|
|
|
|
unsigned NumEntries = MJTI->getJumpTables()[JTI].MBBs.size();
|
|
|
|
if (NumEntries <= 32) {
|
|
|
|
return DAG.getNode(XCoreISD::BR_JT, dl, MVT::Other, Chain, TargetJT, Index);
|
|
|
|
}
|
|
|
|
assert((NumEntries >> 31) == 0);
|
|
|
|
SDValue ScaledIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index,
|
2015-04-28 16:05:47 +02:00
|
|
|
DAG.getConstant(1, dl, MVT::i32));
|
2010-02-23 14:25:07 +01:00
|
|
|
return DAG.getNode(XCoreISD::BR_JT32, dl, MVT::Other, Chain, TargetJT,
|
|
|
|
ScaledIndex);
|
|
|
|
}
|
|
|
|
|
2016-06-12 17:39:02 +02:00
|
|
|
SDValue XCoreTargetLowering::lowerLoadWordFromAlignedBasePlusOffset(
|
|
|
|
const SDLoc &DL, SDValue Chain, SDValue Base, int64_t Offset,
|
|
|
|
SelectionDAG &DAG) const {
|
2015-07-09 04:09:04 +02:00
|
|
|
auto PtrVT = getPointerTy(DAG.getDataLayout());
|
2013-05-04 19:17:10 +02:00
|
|
|
if ((Offset & 0x3) == 0) {
|
[SelectionDAG] Get rid of bool parameters in SelectionDAG::getLoad, getStore, and friends.
Summary:
Instead, we take a single flags arg (a bitset).
Also add a default 0 alignment, and change the order of arguments so the
alignment comes before the flags.
This greatly simplifies many callsites, and fixes a bug in
AMDGPUISelLowering, wherein the order of the args to getLoad was
inverted. It also greatly simplifies the process of adding another flag
to getLoad.
Reviewers: chandlerc, tstellarAMD
Subscribers: jholewinski, arsenm, jyknight, dsanders, nemanjai, llvm-commits
Differential Revision: http://reviews.llvm.org/D22249
llvm-svn: 275592
2016-07-15 20:27:10 +02:00
|
|
|
return DAG.getLoad(PtrVT, DL, Chain, Base, MachinePointerInfo());
|
2009-07-16 12:42:35 +02:00
|
|
|
}
|
2013-05-04 19:17:10 +02:00
|
|
|
// Lower to pair of consecutive word aligned loads plus some bit shifting.
|
2016-01-14 22:06:47 +01:00
|
|
|
int32_t HighOffset = alignTo(Offset, 4);
|
2013-05-04 19:17:10 +02:00
|
|
|
int32_t LowOffset = HighOffset - 4;
|
2013-05-04 19:24:33 +02:00
|
|
|
SDValue LowAddr, HighAddr;
|
|
|
|
if (GlobalAddressSDNode *GASD =
|
|
|
|
dyn_cast<GlobalAddressSDNode>(Base.getNode())) {
|
|
|
|
LowAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(),
|
|
|
|
LowOffset);
|
|
|
|
HighAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(),
|
|
|
|
HighOffset);
|
|
|
|
} else {
|
|
|
|
LowAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base,
|
2015-04-28 16:05:47 +02:00
|
|
|
DAG.getConstant(LowOffset, DL, MVT::i32));
|
2013-05-04 19:24:33 +02:00
|
|
|
HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base,
|
2015-04-28 16:05:47 +02:00
|
|
|
DAG.getConstant(HighOffset, DL, MVT::i32));
|
2013-05-04 19:24:33 +02:00
|
|
|
}
|
2015-04-28 16:05:47 +02:00
|
|
|
SDValue LowShift = DAG.getConstant((Offset - LowOffset) * 8, DL, MVT::i32);
|
|
|
|
SDValue HighShift = DAG.getConstant((HighOffset - Offset) * 8, DL, MVT::i32);
|
2013-05-04 19:17:10 +02:00
|
|
|
|
[SelectionDAG] Get rid of bool parameters in SelectionDAG::getLoad, getStore, and friends.
Summary:
Instead, we take a single flags arg (a bitset).
Also add a default 0 alignment, and change the order of arguments so the
alignment comes before the flags.
This greatly simplifies many callsites, and fixes a bug in
AMDGPUISelLowering, wherein the order of the args to getLoad was
inverted. It also greatly simplifies the process of adding another flag
to getLoad.
Reviewers: chandlerc, tstellarAMD
Subscribers: jholewinski, arsenm, jyknight, dsanders, nemanjai, llvm-commits
Differential Revision: http://reviews.llvm.org/D22249
llvm-svn: 275592
2016-07-15 20:27:10 +02:00
|
|
|
SDValue Low = DAG.getLoad(PtrVT, DL, Chain, LowAddr, MachinePointerInfo());
|
|
|
|
SDValue High = DAG.getLoad(PtrVT, DL, Chain, HighAddr, MachinePointerInfo());
|
2013-05-04 19:17:10 +02:00
|
|
|
SDValue LowShifted = DAG.getNode(ISD::SRL, DL, MVT::i32, Low, LowShift);
|
|
|
|
SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, HighShift);
|
|
|
|
SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, LowShifted, HighShifted);
|
|
|
|
Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
|
|
|
|
High.getValue(1));
|
|
|
|
SDValue Ops[] = { Result, Chain };
|
2014-04-27 21:20:57 +02:00
|
|
|
return DAG.getMergeValues(Ops, DL);
|
2013-05-04 19:17:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool isWordAligned(SDValue Value, SelectionDAG &DAG)
|
|
|
|
{
|
2018-12-21 16:35:32 +01:00
|
|
|
KnownBits Known = DAG.computeKnownBits(Value);
|
2017-05-12 19:20:30 +02:00
|
|
|
return Known.countMinTrailingZeros() >= 2;
|
2009-07-16 12:42:35 +02:00
|
|
|
}
|
|
|
|
|
2019-06-12 12:46:50 +02:00
|
|
|
SDValue XCoreTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
|
2013-05-04 19:17:10 +02:00
|
|
|
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
|
2019-06-12 12:46:50 +02:00
|
|
|
LLVMContext &Context = *DAG.getContext();
|
2009-07-16 12:21:18 +02:00
|
|
|
LoadSDNode *LD = cast<LoadSDNode>(Op);
|
2009-08-15 23:21:19 +02:00
|
|
|
assert(LD->getExtensionType() == ISD::NON_EXTLOAD &&
|
|
|
|
"Unexpected extension type");
|
2009-08-11 22:47:22 +02:00
|
|
|
assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT");
|
2010-09-21 19:04:51 +02:00
|
|
|
|
2019-09-26 02:16:01 +02:00
|
|
|
if (allowsMemoryAccessForAlignment(Context, DAG.getDataLayout(),
|
|
|
|
LD->getMemoryVT(), *LD->getMemOperand()))
|
2009-07-16 12:21:18 +02:00
|
|
|
return SDValue();
|
2010-09-21 19:04:51 +02:00
|
|
|
|
2009-07-16 12:21:18 +02:00
|
|
|
SDValue Chain = LD->getChain();
|
|
|
|
SDValue BasePtr = LD->getBasePtr();
|
2013-05-25 04:42:55 +02:00
|
|
|
SDLoc DL(Op);
|
2011-02-25 22:41:48 +01:00
|
|
|
|
2013-05-04 19:17:10 +02:00
|
|
|
if (!LD->isVolatile()) {
|
|
|
|
const GlobalValue *GV;
|
|
|
|
int64_t Offset = 0;
|
|
|
|
if (DAG.isBaseWithConstantOffset(BasePtr) &&
|
|
|
|
isWordAligned(BasePtr->getOperand(0), DAG)) {
|
|
|
|
SDValue NewBasePtr = BasePtr->getOperand(0);
|
|
|
|
Offset = cast<ConstantSDNode>(BasePtr->getOperand(1))->getSExtValue();
|
|
|
|
return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr,
|
|
|
|
Offset, DAG);
|
|
|
|
}
|
|
|
|
if (TLI.isGAPlusOffset(BasePtr.getNode(), GV, Offset) &&
|
2020-05-19 05:38:13 +02:00
|
|
|
GV->getPointerAlignment(DAG.getDataLayout()) >= 4) {
|
2013-05-04 19:17:10 +02:00
|
|
|
SDValue NewBasePtr = DAG.getGlobalAddress(GV, DL,
|
|
|
|
BasePtr->getValueType(0));
|
|
|
|
return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr,
|
|
|
|
Offset, DAG);
|
2009-07-16 12:42:35 +02:00
|
|
|
}
|
|
|
|
}
|
2011-02-25 22:41:48 +01:00
|
|
|
|
2009-07-16 12:42:35 +02:00
|
|
|
if (LD->getAlignment() == 2) {
|
2020-09-14 22:54:50 +02:00
|
|
|
SDValue Low = DAG.getExtLoad(ISD::ZEXTLOAD, DL, MVT::i32, Chain, BasePtr,
|
|
|
|
LD->getPointerInfo(), MVT::i16, Align(2),
|
|
|
|
LD->getMemOperand()->getFlags());
|
2010-09-21 19:04:51 +02:00
|
|
|
SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
|
2015-04-28 16:05:47 +02:00
|
|
|
DAG.getConstant(2, DL, MVT::i32));
|
[SelectionDAG] Get rid of bool parameters in SelectionDAG::getLoad, getStore, and friends.
Summary:
Instead, we take a single flags arg (a bitset).
Also add a default 0 alignment, and change the order of arguments so the
alignment comes before the flags.
This greatly simplifies many callsites, and fixes a bug in
AMDGPUISelLowering, wherein the order of the args to getLoad was
inverted. It also greatly simplifies the process of adding another flag
to getLoad.
Reviewers: chandlerc, tstellarAMD
Subscribers: jholewinski, arsenm, jyknight, dsanders, nemanjai, llvm-commits
Differential Revision: http://reviews.llvm.org/D22249
llvm-svn: 275592
2016-07-15 20:27:10 +02:00
|
|
|
SDValue High =
|
|
|
|
DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, HighAddr,
|
|
|
|
LD->getPointerInfo().getWithOffset(2), MVT::i16,
|
2020-09-14 22:54:50 +02:00
|
|
|
Align(2), LD->getMemOperand()->getFlags());
|
2010-09-21 19:04:51 +02:00
|
|
|
SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High,
|
2015-04-28 16:05:47 +02:00
|
|
|
DAG.getConstant(16, DL, MVT::i32));
|
2010-09-21 19:04:51 +02:00
|
|
|
SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, Low, HighShifted);
|
|
|
|
Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
|
2009-07-16 12:42:35 +02:00
|
|
|
High.getValue(1));
|
|
|
|
SDValue Ops[] = { Result, Chain };
|
2014-04-27 21:20:57 +02:00
|
|
|
return DAG.getMergeValues(Ops, DL);
|
2009-07-16 12:42:35 +02:00
|
|
|
}
|
2011-02-25 22:41:48 +01:00
|
|
|
|
2009-07-16 12:21:18 +02:00
|
|
|
// Lower to a call to __misaligned_load(BasePtr).
|
2019-06-12 12:46:50 +02:00
|
|
|
Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(Context);
|
2009-07-16 12:21:18 +02:00
|
|
|
TargetLowering::ArgListTy Args;
|
|
|
|
TargetLowering::ArgListEntry Entry;
|
2011-02-25 22:41:48 +01:00
|
|
|
|
2009-07-16 12:21:18 +02:00
|
|
|
Entry.Ty = IntPtrTy;
|
|
|
|
Entry.Node = BasePtr;
|
|
|
|
Args.push_back(Entry);
|
2011-02-25 22:41:48 +01:00
|
|
|
|
2014-05-17 23:50:17 +02:00
|
|
|
TargetLowering::CallLoweringInfo CLI(DAG);
|
2017-03-18 01:44:07 +01:00
|
|
|
CLI.setDebugLoc(DL).setChain(Chain).setLibCallee(
|
2015-07-09 04:09:04 +02:00
|
|
|
CallingConv::C, IntPtrTy,
|
|
|
|
DAG.getExternalSymbol("__misaligned_load",
|
|
|
|
getPointerTy(DAG.getDataLayout())),
|
2016-06-22 14:54:25 +02:00
|
|
|
std::move(Args));
|
2009-07-16 12:21:18 +02:00
|
|
|
|
2014-05-17 23:50:17 +02:00
|
|
|
std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
|
|
|
|
SDValue Ops[] = { CallResult.first, CallResult.second };
|
2014-04-27 21:20:57 +02:00
|
|
|
return DAG.getMergeValues(Ops, DL);
|
2009-07-16 12:21:18 +02:00
|
|
|
}
|
|
|
|
|
2019-06-12 12:46:50 +02:00
|
|
|
SDValue XCoreTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
|
|
|
|
LLVMContext &Context = *DAG.getContext();
|
2009-07-16 12:21:18 +02:00
|
|
|
StoreSDNode *ST = cast<StoreSDNode>(Op);
|
|
|
|
assert(!ST->isTruncatingStore() && "Unexpected store type");
|
2009-08-11 22:47:22 +02:00
|
|
|
assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT");
|
2019-06-12 12:46:50 +02:00
|
|
|
|
2019-09-26 02:16:01 +02:00
|
|
|
if (allowsMemoryAccessForAlignment(Context, DAG.getDataLayout(),
|
|
|
|
ST->getMemoryVT(), *ST->getMemOperand()))
|
2009-07-16 12:21:18 +02:00
|
|
|
return SDValue();
|
2019-06-12 12:46:50 +02:00
|
|
|
|
2009-07-16 12:21:18 +02:00
|
|
|
SDValue Chain = ST->getChain();
|
|
|
|
SDValue BasePtr = ST->getBasePtr();
|
|
|
|
SDValue Value = ST->getValue();
|
2013-05-25 04:42:55 +02:00
|
|
|
SDLoc dl(Op);
|
2011-02-25 22:41:48 +01:00
|
|
|
|
2009-07-16 12:42:35 +02:00
|
|
|
if (ST->getAlignment() == 2) {
|
|
|
|
SDValue Low = Value;
|
2009-08-11 22:47:22 +02:00
|
|
|
SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value,
|
2019-06-12 12:46:50 +02:00
|
|
|
DAG.getConstant(16, dl, MVT::i32));
|
2020-09-14 22:54:50 +02:00
|
|
|
SDValue StoreLow =
|
|
|
|
DAG.getTruncStore(Chain, dl, Low, BasePtr, ST->getPointerInfo(),
|
|
|
|
MVT::i16, Align(2), ST->getMemOperand()->getFlags());
|
2009-08-11 22:47:22 +02:00
|
|
|
SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr,
|
2015-04-28 16:05:47 +02:00
|
|
|
DAG.getConstant(2, dl, MVT::i32));
|
[SelectionDAG] Get rid of bool parameters in SelectionDAG::getLoad, getStore, and friends.
Summary:
Instead, we take a single flags arg (a bitset).
Also add a default 0 alignment, and change the order of arguments so the
alignment comes before the flags.
This greatly simplifies many callsites, and fixes a bug in
AMDGPUISelLowering, wherein the order of the args to getLoad was
inverted. It also greatly simplifies the process of adding another flag
to getLoad.
Reviewers: chandlerc, tstellarAMD
Subscribers: jholewinski, arsenm, jyknight, dsanders, nemanjai, llvm-commits
Differential Revision: http://reviews.llvm.org/D22249
llvm-svn: 275592
2016-07-15 20:27:10 +02:00
|
|
|
SDValue StoreHigh = DAG.getTruncStore(
|
|
|
|
Chain, dl, High, HighAddr, ST->getPointerInfo().getWithOffset(2),
|
2020-09-14 22:54:50 +02:00
|
|
|
MVT::i16, Align(2), ST->getMemOperand()->getFlags());
|
2009-08-11 22:47:22 +02:00
|
|
|
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StoreLow, StoreHigh);
|
2009-07-16 12:42:35 +02:00
|
|
|
}
|
2011-02-25 22:41:48 +01:00
|
|
|
|
2009-07-16 12:21:18 +02:00
|
|
|
// Lower to a call to __misaligned_store(BasePtr, Value).
|
2019-06-12 12:46:50 +02:00
|
|
|
Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(Context);
|
2009-07-16 12:21:18 +02:00
|
|
|
TargetLowering::ArgListTy Args;
|
|
|
|
TargetLowering::ArgListEntry Entry;
|
2011-02-25 22:41:48 +01:00
|
|
|
|
2009-07-16 12:21:18 +02:00
|
|
|
Entry.Ty = IntPtrTy;
|
|
|
|
Entry.Node = BasePtr;
|
|
|
|
Args.push_back(Entry);
|
2011-02-25 22:41:48 +01:00
|
|
|
|
2009-07-16 12:21:18 +02:00
|
|
|
Entry.Node = Value;
|
|
|
|
Args.push_back(Entry);
|
2011-02-25 22:41:48 +01:00
|
|
|
|
2014-05-17 23:50:17 +02:00
|
|
|
TargetLowering::CallLoweringInfo CLI(DAG);
|
2015-07-09 04:09:04 +02:00
|
|
|
CLI.setDebugLoc(dl).setChain(Chain).setCallee(
|
2019-06-12 12:46:50 +02:00
|
|
|
CallingConv::C, Type::getVoidTy(Context),
|
2015-07-09 04:09:04 +02:00
|
|
|
DAG.getExternalSymbol("__misaligned_store",
|
|
|
|
getPointerTy(DAG.getDataLayout())),
|
2016-06-22 14:54:25 +02:00
|
|
|
std::move(Args));
|
2009-07-16 12:21:18 +02:00
|
|
|
|
2014-05-17 23:50:17 +02:00
|
|
|
std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
|
2009-07-16 12:21:18 +02:00
|
|
|
return CallResult.second;
|
|
|
|
}
|
|
|
|
|
2010-03-10 14:20:07 +01:00
|
|
|
SDValue XCoreTargetLowering::
|
2010-04-17 17:26:15 +02:00
|
|
|
LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
|
2010-03-10 14:20:07 +01:00
|
|
|
{
|
|
|
|
assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::SMUL_LOHI &&
|
|
|
|
"Unexpected operand to lower!");
|
2013-05-25 04:42:55 +02:00
|
|
|
SDLoc dl(Op);
|
2010-03-10 14:20:07 +01:00
|
|
|
SDValue LHS = Op.getOperand(0);
|
|
|
|
SDValue RHS = Op.getOperand(1);
|
2015-04-28 16:05:47 +02:00
|
|
|
SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
|
2010-03-10 14:20:07 +01:00
|
|
|
SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl,
|
|
|
|
DAG.getVTList(MVT::i32, MVT::i32), Zero, Zero,
|
|
|
|
LHS, RHS);
|
|
|
|
SDValue Lo(Hi.getNode(), 1);
|
|
|
|
SDValue Ops[] = { Lo, Hi };
|
2014-04-27 21:20:57 +02:00
|
|
|
return DAG.getMergeValues(Ops, dl);
|
2010-03-10 14:20:07 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
SDValue XCoreTargetLowering::
|
2010-04-17 17:26:15 +02:00
|
|
|
LowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
|
2010-03-10 14:20:07 +01:00
|
|
|
{
|
|
|
|
assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::UMUL_LOHI &&
|
|
|
|
"Unexpected operand to lower!");
|
2013-05-25 04:42:55 +02:00
|
|
|
SDLoc dl(Op);
|
2010-03-10 14:20:07 +01:00
|
|
|
SDValue LHS = Op.getOperand(0);
|
|
|
|
SDValue RHS = Op.getOperand(1);
|
2015-04-28 16:05:47 +02:00
|
|
|
SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
|
2010-03-10 14:27:10 +01:00
|
|
|
SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl,
|
|
|
|
DAG.getVTList(MVT::i32, MVT::i32), LHS, RHS,
|
|
|
|
Zero, Zero);
|
2010-03-10 14:20:07 +01:00
|
|
|
SDValue Lo(Hi.getNode(), 1);
|
|
|
|
SDValue Ops[] = { Lo, Hi };
|
2014-04-27 21:20:57 +02:00
|
|
|
return DAG.getMergeValues(Ops, dl);
|
2010-03-10 14:20:07 +01:00
|
|
|
}
|
|
|
|
|
2010-03-10 18:10:35 +01:00
|
|
|
/// isADDADDMUL - Return whether Op is in a form that is equivalent to
|
|
|
|
/// add(add(mul(x,y),a),b). If requireIntermediatesHaveOneUse is true then
|
|
|
|
/// each intermediate result in the calculation must also have a single use.
|
|
|
|
/// If the Op is in the correct form the constituent parts are written to Mul0,
|
|
|
|
/// Mul1, Addend0 and Addend1.
|
|
|
|
static bool
|
|
|
|
isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0,
|
|
|
|
SDValue &Addend1, bool requireIntermediatesHaveOneUse)
|
|
|
|
{
|
|
|
|
if (Op.getOpcode() != ISD::ADD)
|
|
|
|
return false;
|
|
|
|
SDValue N0 = Op.getOperand(0);
|
|
|
|
SDValue N1 = Op.getOperand(1);
|
|
|
|
SDValue AddOp;
|
|
|
|
SDValue OtherOp;
|
|
|
|
if (N0.getOpcode() == ISD::ADD) {
|
|
|
|
AddOp = N0;
|
|
|
|
OtherOp = N1;
|
|
|
|
} else if (N1.getOpcode() == ISD::ADD) {
|
|
|
|
AddOp = N1;
|
|
|
|
OtherOp = N0;
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
2010-03-10 18:16:29 +01:00
|
|
|
if (requireIntermediatesHaveOneUse && !AddOp.hasOneUse())
|
|
|
|
return false;
|
2010-03-10 18:10:35 +01:00
|
|
|
if (OtherOp.getOpcode() == ISD::MUL) {
|
|
|
|
// add(add(a,b),mul(x,y))
|
2010-03-10 18:16:29 +01:00
|
|
|
if (requireIntermediatesHaveOneUse && !OtherOp.hasOneUse())
|
2010-03-10 18:10:35 +01:00
|
|
|
return false;
|
|
|
|
Mul0 = OtherOp.getOperand(0);
|
|
|
|
Mul1 = OtherOp.getOperand(1);
|
|
|
|
Addend0 = AddOp.getOperand(0);
|
|
|
|
Addend1 = AddOp.getOperand(1);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (AddOp.getOperand(0).getOpcode() == ISD::MUL) {
|
|
|
|
// add(add(mul(x,y),a),b)
|
|
|
|
if (requireIntermediatesHaveOneUse && !AddOp.getOperand(0).hasOneUse())
|
|
|
|
return false;
|
|
|
|
Mul0 = AddOp.getOperand(0).getOperand(0);
|
|
|
|
Mul1 = AddOp.getOperand(0).getOperand(1);
|
|
|
|
Addend0 = AddOp.getOperand(1);
|
|
|
|
Addend1 = OtherOp;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (AddOp.getOperand(1).getOpcode() == ISD::MUL) {
|
|
|
|
// add(add(a,mul(x,y)),b)
|
|
|
|
if (requireIntermediatesHaveOneUse && !AddOp.getOperand(1).hasOneUse())
|
|
|
|
return false;
|
|
|
|
Mul0 = AddOp.getOperand(1).getOperand(0);
|
|
|
|
Mul1 = AddOp.getOperand(1).getOperand(1);
|
|
|
|
Addend0 = AddOp.getOperand(0);
|
|
|
|
Addend1 = OtherOp;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-03-10 12:41:08 +01:00
|
|
|
SDValue XCoreTargetLowering::
|
2010-04-17 17:26:15 +02:00
|
|
|
TryExpandADDWithMul(SDNode *N, SelectionDAG &DAG) const
|
2010-03-10 12:41:08 +01:00
|
|
|
{
|
|
|
|
SDValue Mul;
|
|
|
|
SDValue Other;
|
|
|
|
if (N->getOperand(0).getOpcode() == ISD::MUL) {
|
|
|
|
Mul = N->getOperand(0);
|
|
|
|
Other = N->getOperand(1);
|
|
|
|
} else if (N->getOperand(1).getOpcode() == ISD::MUL) {
|
|
|
|
Mul = N->getOperand(1);
|
|
|
|
Other = N->getOperand(0);
|
|
|
|
} else {
|
|
|
|
return SDValue();
|
|
|
|
}
|
2013-05-25 04:42:55 +02:00
|
|
|
SDLoc dl(N);
|
2010-03-10 12:41:08 +01:00
|
|
|
SDValue LL, RL, AddendL, AddendH;
|
|
|
|
LL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
|
2015-04-28 16:05:47 +02:00
|
|
|
Mul.getOperand(0), DAG.getConstant(0, dl, MVT::i32));
|
2010-03-10 12:41:08 +01:00
|
|
|
RL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
|
2015-04-28 16:05:47 +02:00
|
|
|
Mul.getOperand(1), DAG.getConstant(0, dl, MVT::i32));
|
2010-03-10 12:41:08 +01:00
|
|
|
AddendL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
|
2015-04-28 16:05:47 +02:00
|
|
|
Other, DAG.getConstant(0, dl, MVT::i32));
|
2010-03-10 12:41:08 +01:00
|
|
|
AddendH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
|
2015-04-28 16:05:47 +02:00
|
|
|
Other, DAG.getConstant(1, dl, MVT::i32));
|
2010-03-10 12:41:08 +01:00
|
|
|
APInt HighMask = APInt::getHighBitsSet(64, 32);
|
|
|
|
unsigned LHSSB = DAG.ComputeNumSignBits(Mul.getOperand(0));
|
|
|
|
unsigned RHSSB = DAG.ComputeNumSignBits(Mul.getOperand(1));
|
|
|
|
if (DAG.MaskedValueIsZero(Mul.getOperand(0), HighMask) &&
|
|
|
|
DAG.MaskedValueIsZero(Mul.getOperand(1), HighMask)) {
|
|
|
|
// The inputs are both zero-extended.
|
|
|
|
SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl,
|
|
|
|
DAG.getVTList(MVT::i32, MVT::i32), AddendH,
|
|
|
|
AddendL, LL, RL);
|
|
|
|
SDValue Lo(Hi.getNode(), 1);
|
|
|
|
return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
|
|
|
|
}
|
|
|
|
if (LHSSB > 32 && RHSSB > 32) {
|
|
|
|
// The inputs are both sign-extended.
|
|
|
|
SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl,
|
|
|
|
DAG.getVTList(MVT::i32, MVT::i32), AddendH,
|
|
|
|
AddendL, LL, RL);
|
|
|
|
SDValue Lo(Hi.getNode(), 1);
|
|
|
|
return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
|
|
|
|
}
|
|
|
|
SDValue LH, RH;
|
|
|
|
LH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
|
2015-04-28 16:05:47 +02:00
|
|
|
Mul.getOperand(0), DAG.getConstant(1, dl, MVT::i32));
|
2010-03-10 12:41:08 +01:00
|
|
|
RH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
|
2015-04-28 16:05:47 +02:00
|
|
|
Mul.getOperand(1), DAG.getConstant(1, dl, MVT::i32));
|
2010-03-10 12:41:08 +01:00
|
|
|
SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl,
|
|
|
|
DAG.getVTList(MVT::i32, MVT::i32), AddendH,
|
|
|
|
AddendL, LL, RL);
|
|
|
|
SDValue Lo(Hi.getNode(), 1);
|
|
|
|
RH = DAG.getNode(ISD::MUL, dl, MVT::i32, LL, RH);
|
|
|
|
LH = DAG.getNode(ISD::MUL, dl, MVT::i32, LH, RL);
|
|
|
|
Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, RH);
|
|
|
|
Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, LH);
|
|
|
|
return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
|
|
|
|
}
|
|
|
|
|
2008-12-01 12:39:25 +01:00
|
|
|
SDValue XCoreTargetLowering::
|
2010-04-17 17:26:15 +02:00
|
|
|
ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const
|
2008-11-07 11:59:00 +01:00
|
|
|
{
|
2009-08-11 22:47:22 +02:00
|
|
|
assert(N->getValueType(0) == MVT::i64 &&
|
2008-11-07 11:59:00 +01:00
|
|
|
(N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
|
|
|
|
"Unknown operand to lower!");
|
2010-03-10 12:41:08 +01:00
|
|
|
|
2016-02-09 23:54:12 +01:00
|
|
|
if (N->getOpcode() == ISD::ADD)
|
|
|
|
if (SDValue Result = TryExpandADDWithMul(N, DAG))
|
2010-03-10 17:27:11 +01:00
|
|
|
return Result;
|
2010-03-10 12:41:08 +01:00
|
|
|
|
2013-05-25 04:42:55 +02:00
|
|
|
SDLoc dl(N);
|
2011-02-25 22:41:48 +01:00
|
|
|
|
2008-11-07 11:59:00 +01:00
|
|
|
// Extract components
|
2009-08-11 22:47:22 +02:00
|
|
|
SDValue LHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
|
2015-04-28 16:05:47 +02:00
|
|
|
N->getOperand(0),
|
|
|
|
DAG.getConstant(0, dl, MVT::i32));
|
2009-08-11 22:47:22 +02:00
|
|
|
SDValue LHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
|
2015-04-28 16:05:47 +02:00
|
|
|
N->getOperand(0),
|
|
|
|
DAG.getConstant(1, dl, MVT::i32));
|
2009-08-11 22:47:22 +02:00
|
|
|
SDValue RHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
|
2015-04-28 16:05:47 +02:00
|
|
|
N->getOperand(1),
|
|
|
|
DAG.getConstant(0, dl, MVT::i32));
|
2009-08-11 22:47:22 +02:00
|
|
|
SDValue RHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
|
2015-04-28 16:05:47 +02:00
|
|
|
N->getOperand(1),
|
|
|
|
DAG.getConstant(1, dl, MVT::i32));
|
2011-02-25 22:41:48 +01:00
|
|
|
|
2008-11-07 11:59:00 +01:00
|
|
|
// Expand
|
|
|
|
unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD :
|
|
|
|
XCoreISD::LSUB;
|
2015-04-28 16:05:47 +02:00
|
|
|
SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
|
2013-01-25 21:16:00 +01:00
|
|
|
SDValue Lo = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
|
|
|
|
LHSL, RHSL, Zero);
|
|
|
|
SDValue Carry(Lo.getNode(), 1);
|
2011-02-25 22:41:48 +01:00
|
|
|
|
2013-01-25 21:16:00 +01:00
|
|
|
SDValue Hi = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
|
|
|
|
LHSH, RHSH, Carry);
|
|
|
|
SDValue Ignored(Hi.getNode(), 1);
|
2008-11-07 11:59:00 +01:00
|
|
|
// Merge the pieces
|
2009-08-11 22:47:22 +02:00
|
|
|
return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
|
2008-11-07 11:59:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
SDValue XCoreTargetLowering::
|
2010-04-17 17:26:15 +02:00
|
|
|
LowerVAARG(SDValue Op, SelectionDAG &DAG) const
|
2008-11-07 11:59:00 +01:00
|
|
|
{
|
2013-08-01 10:29:44 +02:00
|
|
|
// Whist llvm does not support aggregate varargs we can ignore
|
|
|
|
// the possibility of the ValueType being an implicit byVal vararg.
|
2008-11-07 11:59:00 +01:00
|
|
|
SDNode *Node = Op.getNode();
|
2013-08-01 10:29:44 +02:00
|
|
|
EVT VT = Node->getValueType(0); // not an aggregate
|
|
|
|
SDValue InChain = Node->getOperand(0);
|
|
|
|
SDValue VAListPtr = Node->getOperand(1);
|
|
|
|
EVT PtrVT = VAListPtr.getValueType();
|
|
|
|
const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
|
2013-05-25 04:42:55 +02:00
|
|
|
SDLoc dl(Node);
|
[SelectionDAG] Get rid of bool parameters in SelectionDAG::getLoad, getStore, and friends.
Summary:
Instead, we take a single flags arg (a bitset).
Also add a default 0 alignment, and change the order of arguments so the
alignment comes before the flags.
This greatly simplifies many callsites, and fixes a bug in
AMDGPUISelLowering, wherein the order of the args to getLoad was
inverted. It also greatly simplifies the process of adding another flag
to getLoad.
Reviewers: chandlerc, tstellarAMD
Subscribers: jholewinski, arsenm, jyknight, dsanders, nemanjai, llvm-commits
Differential Revision: http://reviews.llvm.org/D22249
llvm-svn: 275592
2016-07-15 20:27:10 +02:00
|
|
|
SDValue VAList =
|
|
|
|
DAG.getLoad(PtrVT, dl, InChain, VAListPtr, MachinePointerInfo(SV));
|
2008-11-07 11:59:00 +01:00
|
|
|
// Increment the pointer, VAList, to the next vararg
|
2013-08-01 10:29:44 +02:00
|
|
|
SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAList,
|
2015-04-28 16:05:47 +02:00
|
|
|
DAG.getIntPtrConstant(VT.getSizeInBits() / 8,
|
|
|
|
dl));
|
2008-11-07 11:59:00 +01:00
|
|
|
// Store the incremented VAList to the legalized pointer
|
2013-08-01 10:29:44 +02:00
|
|
|
InChain = DAG.getStore(VAList.getValue(1), dl, nextPtr, VAListPtr,
|
[SelectionDAG] Get rid of bool parameters in SelectionDAG::getLoad, getStore, and friends.
Summary:
Instead, we take a single flags arg (a bitset).
Also add a default 0 alignment, and change the order of arguments so the
alignment comes before the flags.
This greatly simplifies many callsites, and fixes a bug in
AMDGPUISelLowering, wherein the order of the args to getLoad was
inverted. It also greatly simplifies the process of adding another flag
to getLoad.
Reviewers: chandlerc, tstellarAMD
Subscribers: jholewinski, arsenm, jyknight, dsanders, nemanjai, llvm-commits
Differential Revision: http://reviews.llvm.org/D22249
llvm-svn: 275592
2016-07-15 20:27:10 +02:00
|
|
|
MachinePointerInfo(SV));
|
2008-11-07 11:59:00 +01:00
|
|
|
// Load the actual argument out of the pointer VAList
|
[SelectionDAG] Get rid of bool parameters in SelectionDAG::getLoad, getStore, and friends.
Summary:
Instead, we take a single flags arg (a bitset).
Also add a default 0 alignment, and change the order of arguments so the
alignment comes before the flags.
This greatly simplifies many callsites, and fixes a bug in
AMDGPUISelLowering, wherein the order of the args to getLoad was
inverted. It also greatly simplifies the process of adding another flag
to getLoad.
Reviewers: chandlerc, tstellarAMD
Subscribers: jholewinski, arsenm, jyknight, dsanders, nemanjai, llvm-commits
Differential Revision: http://reviews.llvm.org/D22249
llvm-svn: 275592
2016-07-15 20:27:10 +02:00
|
|
|
return DAG.getLoad(VT, dl, InChain, VAList, MachinePointerInfo());
|
2008-11-07 11:59:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
SDValue XCoreTargetLowering::
|
2010-04-17 17:26:15 +02:00
|
|
|
LowerVASTART(SDValue Op, SelectionDAG &DAG) const
|
2008-11-07 11:59:00 +01:00
|
|
|
{
|
2013-05-25 04:42:55 +02:00
|
|
|
SDLoc dl(Op);
|
2008-11-07 11:59:00 +01:00
|
|
|
// vastart stores the address of the VarArgsFrameIndex slot into the
|
|
|
|
// memory location argument
|
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
|
2009-08-11 22:47:22 +02:00
|
|
|
SDValue Addr = DAG.getFrameIndex(XFI->getVarArgsFrameIndex(), MVT::i32);
|
2011-02-25 22:41:48 +01:00
|
|
|
return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1),
|
[SelectionDAG] Get rid of bool parameters in SelectionDAG::getLoad, getStore, and friends.
Summary:
Instead, we take a single flags arg (a bitset).
Also add a default 0 alignment, and change the order of arguments so the
alignment comes before the flags.
This greatly simplifies many callsites, and fixes a bug in
AMDGPUISelLowering, wherein the order of the args to getLoad was
inverted. It also greatly simplifies the process of adding another flag
to getLoad.
Reviewers: chandlerc, tstellarAMD
Subscribers: jholewinski, arsenm, jyknight, dsanders, nemanjai, llvm-commits
Differential Revision: http://reviews.llvm.org/D22249
llvm-svn: 275592
2016-07-15 20:27:10 +02:00
|
|
|
MachinePointerInfo());
|
2008-11-07 11:59:00 +01:00
|
|
|
}
|
|
|
|
|
2010-04-17 17:26:15 +02:00
|
|
|
SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op,
|
|
|
|
SelectionDAG &DAG) const {
|
2014-01-06 15:20:53 +01:00
|
|
|
// This nodes represent llvm.frameaddress on the DAG.
|
|
|
|
// It takes one operand, the index of the frame address to return.
|
|
|
|
// An index of zero corresponds to the current function's frame address.
|
|
|
|
// An index of one to the parent's frame address, and so on.
|
2011-02-25 22:41:48 +01:00
|
|
|
// Depths > 0 not supported yet!
|
2008-11-07 11:59:00 +01:00
|
|
|
if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
|
|
|
|
return SDValue();
|
2011-02-25 22:41:48 +01:00
|
|
|
|
2008-11-07 11:59:00 +01:00
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
2015-02-02 18:52:27 +01:00
|
|
|
const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
|
2014-01-06 15:20:53 +01:00
|
|
|
return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op),
|
2009-08-11 22:47:22 +02:00
|
|
|
RegInfo->getFrameRegister(MF), MVT::i32);
|
2008-11-07 11:59:00 +01:00
|
|
|
}
|
|
|
|
|
2014-01-06 15:20:53 +01:00
|
|
|
SDValue XCoreTargetLowering::
|
|
|
|
LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
|
|
|
|
// This nodes represent llvm.returnaddress on the DAG.
|
|
|
|
// It takes one operand, the index of the return address to return.
|
|
|
|
// An index of zero corresponds to the current function's return address.
|
|
|
|
// An index of one to the parent's return address, and so on.
|
|
|
|
// Depths > 0 not supported yet!
|
|
|
|
if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
|
|
|
|
return SDValue();
|
|
|
|
|
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
|
|
|
|
int FI = XFI->createLRSpillSlot(MF);
|
|
|
|
SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
|
[SelectionDAG] Get rid of bool parameters in SelectionDAG::getLoad, getStore, and friends.
Summary:
Instead, we take a single flags arg (a bitset).
Also add a default 0 alignment, and change the order of arguments so the
alignment comes before the flags.
This greatly simplifies many callsites, and fixes a bug in
AMDGPUISelLowering, wherein the order of the args to getLoad was
inverted. It also greatly simplifies the process of adding another flag
to getLoad.
Reviewers: chandlerc, tstellarAMD
Subscribers: jholewinski, arsenm, jyknight, dsanders, nemanjai, llvm-commits
Differential Revision: http://reviews.llvm.org/D22249
llvm-svn: 275592
2016-07-15 20:27:10 +02:00
|
|
|
return DAG.getLoad(getPointerTy(DAG.getDataLayout()), SDLoc(Op),
|
|
|
|
DAG.getEntryNode(), FIN,
|
|
|
|
MachinePointerInfo::getFixedStack(MF, FI));
|
2014-01-06 15:20:53 +01:00
|
|
|
}
|
|
|
|
|
2014-01-06 15:21:00 +01:00
|
|
|
SDValue XCoreTargetLowering::
|
|
|
|
LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const {
|
|
|
|
// This node represents offset from frame pointer to first on-stack argument.
|
|
|
|
// This is needed for correct stack adjustment during unwind.
|
|
|
|
// However, we don't know the offset until after the frame has be finalised.
|
|
|
|
// This is done during the XCoreFTAOElim pass.
|
|
|
|
return DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, SDLoc(Op), MVT::i32);
|
|
|
|
}
|
|
|
|
|
2014-01-06 15:21:07 +01:00
|
|
|
SDValue XCoreTargetLowering::
|
|
|
|
LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
|
|
|
|
// OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER)
|
|
|
|
// This node represents 'eh_return' gcc dwarf builtin, which is used to
|
|
|
|
// return from exception. The general meaning is: adjust stack by OFFSET and
|
|
|
|
// pass execution to HANDLER.
|
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
SDValue Chain = Op.getOperand(0);
|
|
|
|
SDValue Offset = Op.getOperand(1);
|
|
|
|
SDValue Handler = Op.getOperand(2);
|
|
|
|
SDLoc dl(Op);
|
|
|
|
|
|
|
|
// Absolute SP = (FP + FrameToArgs) + Offset
|
2015-02-02 18:52:27 +01:00
|
|
|
const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
|
2014-01-06 15:21:07 +01:00
|
|
|
SDValue Stack = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
|
|
|
|
RegInfo->getFrameRegister(MF), MVT::i32);
|
|
|
|
SDValue FrameToArgs = DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, dl,
|
|
|
|
MVT::i32);
|
|
|
|
Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, FrameToArgs);
|
|
|
|
Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, Offset);
|
|
|
|
|
|
|
|
// R0=ExceptionPointerRegister R1=ExceptionSelectorRegister
|
|
|
|
// which leaves 2 caller saved registers, R2 & R3 for us to use.
|
|
|
|
unsigned StackReg = XCore::R2;
|
|
|
|
unsigned HandlerReg = XCore::R3;
|
|
|
|
|
|
|
|
SDValue OutChains[] = {
|
|
|
|
DAG.getCopyToReg(Chain, dl, StackReg, Stack),
|
|
|
|
DAG.getCopyToReg(Chain, dl, HandlerReg, Handler)
|
|
|
|
};
|
|
|
|
|
2014-04-26 20:35:24 +02:00
|
|
|
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
|
2014-01-06 15:21:07 +01:00
|
|
|
|
|
|
|
return DAG.getNode(XCoreISD::EH_RETURN, dl, MVT::Other, Chain,
|
|
|
|
DAG.getRegister(StackReg, MVT::i32),
|
|
|
|
DAG.getRegister(HandlerReg, MVT::i32));
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2011-02-02 15:57:41 +01:00
|
|
|
SDValue XCoreTargetLowering::
|
2011-09-06 15:37:06 +02:00
|
|
|
LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
|
|
|
|
return Op.getOperand(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
SDValue XCoreTargetLowering::
|
|
|
|
LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
|
2011-02-02 15:57:41 +01:00
|
|
|
SDValue Chain = Op.getOperand(0);
|
|
|
|
SDValue Trmp = Op.getOperand(1); // trampoline
|
|
|
|
SDValue FPtr = Op.getOperand(2); // nested function
|
|
|
|
SDValue Nest = Op.getOperand(3); // 'nest' parameter value
|
|
|
|
|
|
|
|
const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
|
|
|
|
|
|
|
|
// .align 4
|
|
|
|
// LDAPF_u10 r11, nest
|
|
|
|
// LDW_2rus r11, r11[0]
|
|
|
|
// STWSP_ru6 r11, sp[0]
|
|
|
|
// LDAPF_u10 r11, fptr
|
|
|
|
// LDW_2rus r11, r11[0]
|
|
|
|
// BAU_1r r11
|
|
|
|
// nest:
|
|
|
|
// .word nest
|
|
|
|
// fptr:
|
|
|
|
// .word fptr
|
|
|
|
SDValue OutChains[5];
|
|
|
|
|
|
|
|
SDValue Addr = Trmp;
|
|
|
|
|
2013-05-25 04:42:55 +02:00
|
|
|
SDLoc dl(Op);
|
[SelectionDAG] Get rid of bool parameters in SelectionDAG::getLoad, getStore, and friends.
Summary:
Instead, we take a single flags arg (a bitset).
Also add a default 0 alignment, and change the order of arguments so the
alignment comes before the flags.
This greatly simplifies many callsites, and fixes a bug in
AMDGPUISelLowering, wherein the order of the args to getLoad was
inverted. It also greatly simplifies the process of adding another flag
to getLoad.
Reviewers: chandlerc, tstellarAMD
Subscribers: jholewinski, arsenm, jyknight, dsanders, nemanjai, llvm-commits
Differential Revision: http://reviews.llvm.org/D22249
llvm-svn: 275592
2016-07-15 20:27:10 +02:00
|
|
|
OutChains[0] =
|
|
|
|
DAG.getStore(Chain, dl, DAG.getConstant(0x0a3cd805, dl, MVT::i32), Addr,
|
|
|
|
MachinePointerInfo(TrmpAddr));
|
2011-02-02 15:57:41 +01:00
|
|
|
|
|
|
|
Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
|
2015-04-28 16:05:47 +02:00
|
|
|
DAG.getConstant(4, dl, MVT::i32));
|
[SelectionDAG] Get rid of bool parameters in SelectionDAG::getLoad, getStore, and friends.
Summary:
Instead, we take a single flags arg (a bitset).
Also add a default 0 alignment, and change the order of arguments so the
alignment comes before the flags.
This greatly simplifies many callsites, and fixes a bug in
AMDGPUISelLowering, wherein the order of the args to getLoad was
inverted. It also greatly simplifies the process of adding another flag
to getLoad.
Reviewers: chandlerc, tstellarAMD
Subscribers: jholewinski, arsenm, jyknight, dsanders, nemanjai, llvm-commits
Differential Revision: http://reviews.llvm.org/D22249
llvm-svn: 275592
2016-07-15 20:27:10 +02:00
|
|
|
OutChains[1] =
|
|
|
|
DAG.getStore(Chain, dl, DAG.getConstant(0xd80456c0, dl, MVT::i32), Addr,
|
|
|
|
MachinePointerInfo(TrmpAddr, 4));
|
2011-02-02 15:57:41 +01:00
|
|
|
|
|
|
|
Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
|
2015-04-28 16:05:47 +02:00
|
|
|
DAG.getConstant(8, dl, MVT::i32));
|
[SelectionDAG] Get rid of bool parameters in SelectionDAG::getLoad, getStore, and friends.
Summary:
Instead, we take a single flags arg (a bitset).
Also add a default 0 alignment, and change the order of arguments so the
alignment comes before the flags.
This greatly simplifies many callsites, and fixes a bug in
AMDGPUISelLowering, wherein the order of the args to getLoad was
inverted. It also greatly simplifies the process of adding another flag
to getLoad.
Reviewers: chandlerc, tstellarAMD
Subscribers: jholewinski, arsenm, jyknight, dsanders, nemanjai, llvm-commits
Differential Revision: http://reviews.llvm.org/D22249
llvm-svn: 275592
2016-07-15 20:27:10 +02:00
|
|
|
OutChains[2] =
|
|
|
|
DAG.getStore(Chain, dl, DAG.getConstant(0x27fb0a3c, dl, MVT::i32), Addr,
|
|
|
|
MachinePointerInfo(TrmpAddr, 8));
|
2011-02-02 15:57:41 +01:00
|
|
|
|
|
|
|
Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
|
2015-04-28 16:05:47 +02:00
|
|
|
DAG.getConstant(12, dl, MVT::i32));
|
[SelectionDAG] Get rid of bool parameters in SelectionDAG::getLoad, getStore, and friends.
Summary:
Instead, we take a single flags arg (a bitset).
Also add a default 0 alignment, and change the order of arguments so the
alignment comes before the flags.
This greatly simplifies many callsites, and fixes a bug in
AMDGPUISelLowering, wherein the order of the args to getLoad was
inverted. It also greatly simplifies the process of adding another flag
to getLoad.
Reviewers: chandlerc, tstellarAMD
Subscribers: jholewinski, arsenm, jyknight, dsanders, nemanjai, llvm-commits
Differential Revision: http://reviews.llvm.org/D22249
llvm-svn: 275592
2016-07-15 20:27:10 +02:00
|
|
|
OutChains[3] =
|
|
|
|
DAG.getStore(Chain, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 12));
|
2011-02-02 15:57:41 +01:00
|
|
|
|
|
|
|
Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
|
2015-04-28 16:05:47 +02:00
|
|
|
DAG.getConstant(16, dl, MVT::i32));
|
[SelectionDAG] Get rid of bool parameters in SelectionDAG::getLoad, getStore, and friends.
Summary:
Instead, we take a single flags arg (a bitset).
Also add a default 0 alignment, and change the order of arguments so the
alignment comes before the flags.
This greatly simplifies many callsites, and fixes a bug in
AMDGPUISelLowering, wherein the order of the args to getLoad was
inverted. It also greatly simplifies the process of adding another flag
to getLoad.
Reviewers: chandlerc, tstellarAMD
Subscribers: jholewinski, arsenm, jyknight, dsanders, nemanjai, llvm-commits
Differential Revision: http://reviews.llvm.org/D22249
llvm-svn: 275592
2016-07-15 20:27:10 +02:00
|
|
|
OutChains[4] =
|
|
|
|
DAG.getStore(Chain, dl, FPtr, Addr, MachinePointerInfo(TrmpAddr, 16));
|
2011-02-02 15:57:41 +01:00
|
|
|
|
2014-04-26 20:35:24 +02:00
|
|
|
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
|
2011-02-02 15:57:41 +01:00
|
|
|
}
|
|
|
|
|
2013-01-25 22:20:28 +01:00
|
|
|
SDValue XCoreTargetLowering::
|
|
|
|
LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const {
|
2013-05-25 04:42:55 +02:00
|
|
|
SDLoc DL(Op);
|
2013-01-25 22:20:28 +01:00
|
|
|
unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
|
|
|
|
switch (IntNo) {
|
|
|
|
case Intrinsic::xcore_crc8:
|
|
|
|
EVT VT = Op.getValueType();
|
|
|
|
SDValue Data =
|
|
|
|
DAG.getNode(XCoreISD::CRC8, DL, DAG.getVTList(VT, VT),
|
|
|
|
Op.getOperand(1), Op.getOperand(2) , Op.getOperand(3));
|
|
|
|
SDValue Crc(Data.getNode(), 1);
|
|
|
|
SDValue Results[] = { Crc, Data };
|
2014-04-27 21:20:57 +02:00
|
|
|
return DAG.getMergeValues(Results, DL);
|
2013-01-25 22:20:28 +01:00
|
|
|
}
|
|
|
|
return SDValue();
|
|
|
|
}
|
|
|
|
|
2013-11-12 11:11:26 +01:00
|
|
|
SDValue XCoreTargetLowering::
|
|
|
|
LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const {
|
|
|
|
SDLoc DL(Op);
|
|
|
|
return DAG.getNode(XCoreISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0));
|
|
|
|
}
|
|
|
|
|
2014-02-11 11:36:18 +01:00
|
|
|
SDValue XCoreTargetLowering::
|
|
|
|
LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const {
|
|
|
|
AtomicSDNode *N = cast<AtomicSDNode>(Op);
|
|
|
|
assert(N->getOpcode() == ISD::ATOMIC_LOAD && "Bad Atomic OP");
|
2021-06-22 01:34:02 +02:00
|
|
|
assert((N->getSuccessOrdering() == AtomicOrdering::Unordered ||
|
|
|
|
N->getSuccessOrdering() == AtomicOrdering::Monotonic) &&
|
2016-04-06 23:19:33 +02:00
|
|
|
"setInsertFencesForAtomic(true) expects unordered / monotonic");
|
2014-02-11 11:36:18 +01:00
|
|
|
if (N->getMemoryVT() == MVT::i32) {
|
|
|
|
if (N->getAlignment() < 4)
|
|
|
|
report_fatal_error("atomic load must be aligned");
|
2015-07-09 04:09:04 +02:00
|
|
|
return DAG.getLoad(getPointerTy(DAG.getDataLayout()), SDLoc(Op),
|
|
|
|
N->getChain(), N->getBasePtr(), N->getPointerInfo(),
|
[SelectionDAG] Get rid of bool parameters in SelectionDAG::getLoad, getStore, and friends.
Summary:
Instead, we take a single flags arg (a bitset).
Also add a default 0 alignment, and change the order of arguments so the
alignment comes before the flags.
This greatly simplifies many callsites, and fixes a bug in
AMDGPUISelLowering, wherein the order of the args to getLoad was
inverted. It also greatly simplifies the process of adding another flag
to getLoad.
Reviewers: chandlerc, tstellarAMD
Subscribers: jholewinski, arsenm, jyknight, dsanders, nemanjai, llvm-commits
Differential Revision: http://reviews.llvm.org/D22249
llvm-svn: 275592
2016-07-15 20:27:10 +02:00
|
|
|
N->getAlignment(), N->getMemOperand()->getFlags(),
|
|
|
|
N->getAAInfo(), N->getRanges());
|
2014-02-11 11:36:18 +01:00
|
|
|
}
|
|
|
|
if (N->getMemoryVT() == MVT::i16) {
|
|
|
|
if (N->getAlignment() < 2)
|
|
|
|
report_fatal_error("atomic load must be aligned");
|
|
|
|
return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(),
|
|
|
|
N->getBasePtr(), N->getPointerInfo(), MVT::i16,
|
[SelectionDAG] Get rid of bool parameters in SelectionDAG::getLoad, getStore, and friends.
Summary:
Instead, we take a single flags arg (a bitset).
Also add a default 0 alignment, and change the order of arguments so the
alignment comes before the flags.
This greatly simplifies many callsites, and fixes a bug in
AMDGPUISelLowering, wherein the order of the args to getLoad was
inverted. It also greatly simplifies the process of adding another flag
to getLoad.
Reviewers: chandlerc, tstellarAMD
Subscribers: jholewinski, arsenm, jyknight, dsanders, nemanjai, llvm-commits
Differential Revision: http://reviews.llvm.org/D22249
llvm-svn: 275592
2016-07-15 20:27:10 +02:00
|
|
|
N->getAlignment(), N->getMemOperand()->getFlags(),
|
|
|
|
N->getAAInfo());
|
2014-02-11 11:36:18 +01:00
|
|
|
}
|
|
|
|
if (N->getMemoryVT() == MVT::i8)
|
|
|
|
return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(),
|
|
|
|
N->getBasePtr(), N->getPointerInfo(), MVT::i8,
|
[SelectionDAG] Get rid of bool parameters in SelectionDAG::getLoad, getStore, and friends.
Summary:
Instead, we take a single flags arg (a bitset).
Also add a default 0 alignment, and change the order of arguments so the
alignment comes before the flags.
This greatly simplifies many callsites, and fixes a bug in
AMDGPUISelLowering, wherein the order of the args to getLoad was
inverted. It also greatly simplifies the process of adding another flag
to getLoad.
Reviewers: chandlerc, tstellarAMD
Subscribers: jholewinski, arsenm, jyknight, dsanders, nemanjai, llvm-commits
Differential Revision: http://reviews.llvm.org/D22249
llvm-svn: 275592
2016-07-15 20:27:10 +02:00
|
|
|
N->getAlignment(), N->getMemOperand()->getFlags(),
|
|
|
|
N->getAAInfo());
|
2014-02-11 11:36:18 +01:00
|
|
|
return SDValue();
|
|
|
|
}
|
|
|
|
|
|
|
|
SDValue XCoreTargetLowering::
|
|
|
|
LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const {
|
|
|
|
AtomicSDNode *N = cast<AtomicSDNode>(Op);
|
|
|
|
assert(N->getOpcode() == ISD::ATOMIC_STORE && "Bad Atomic OP");
|
2021-06-22 01:34:02 +02:00
|
|
|
assert((N->getSuccessOrdering() == AtomicOrdering::Unordered ||
|
|
|
|
N->getSuccessOrdering() == AtomicOrdering::Monotonic) &&
|
2016-04-06 23:19:33 +02:00
|
|
|
"setInsertFencesForAtomic(true) expects unordered / monotonic");
|
2014-02-11 11:36:18 +01:00
|
|
|
if (N->getMemoryVT() == MVT::i32) {
|
|
|
|
if (N->getAlignment() < 4)
|
|
|
|
report_fatal_error("atomic store must be aligned");
|
[SelectionDAG] Get rid of bool parameters in SelectionDAG::getLoad, getStore, and friends.
Summary:
Instead, we take a single flags arg (a bitset).
Also add a default 0 alignment, and change the order of arguments so the
alignment comes before the flags.
This greatly simplifies many callsites, and fixes a bug in
AMDGPUISelLowering, wherein the order of the args to getLoad was
inverted. It also greatly simplifies the process of adding another flag
to getLoad.
Reviewers: chandlerc, tstellarAMD
Subscribers: jholewinski, arsenm, jyknight, dsanders, nemanjai, llvm-commits
Differential Revision: http://reviews.llvm.org/D22249
llvm-svn: 275592
2016-07-15 20:27:10 +02:00
|
|
|
return DAG.getStore(N->getChain(), SDLoc(Op), N->getVal(), N->getBasePtr(),
|
|
|
|
N->getPointerInfo(), N->getAlignment(),
|
|
|
|
N->getMemOperand()->getFlags(), N->getAAInfo());
|
2014-02-11 11:36:18 +01:00
|
|
|
}
|
|
|
|
if (N->getMemoryVT() == MVT::i16) {
|
|
|
|
if (N->getAlignment() < 2)
|
|
|
|
report_fatal_error("atomic store must be aligned");
|
|
|
|
return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(),
|
|
|
|
N->getBasePtr(), N->getPointerInfo(), MVT::i16,
|
[SelectionDAG] Get rid of bool parameters in SelectionDAG::getLoad, getStore, and friends.
Summary:
Instead, we take a single flags arg (a bitset).
Also add a default 0 alignment, and change the order of arguments so the
alignment comes before the flags.
This greatly simplifies many callsites, and fixes a bug in
AMDGPUISelLowering, wherein the order of the args to getLoad was
inverted. It also greatly simplifies the process of adding another flag
to getLoad.
Reviewers: chandlerc, tstellarAMD
Subscribers: jholewinski, arsenm, jyknight, dsanders, nemanjai, llvm-commits
Differential Revision: http://reviews.llvm.org/D22249
llvm-svn: 275592
2016-07-15 20:27:10 +02:00
|
|
|
N->getAlignment(), N->getMemOperand()->getFlags(),
|
|
|
|
N->getAAInfo());
|
2014-02-11 11:36:18 +01:00
|
|
|
}
|
|
|
|
if (N->getMemoryVT() == MVT::i8)
|
|
|
|
return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(),
|
|
|
|
N->getBasePtr(), N->getPointerInfo(), MVT::i8,
|
[SelectionDAG] Get rid of bool parameters in SelectionDAG::getLoad, getStore, and friends.
Summary:
Instead, we take a single flags arg (a bitset).
Also add a default 0 alignment, and change the order of arguments so the
alignment comes before the flags.
This greatly simplifies many callsites, and fixes a bug in
AMDGPUISelLowering, wherein the order of the args to getLoad was
inverted. It also greatly simplifies the process of adding another flag
to getLoad.
Reviewers: chandlerc, tstellarAMD
Subscribers: jholewinski, arsenm, jyknight, dsanders, nemanjai, llvm-commits
Differential Revision: http://reviews.llvm.org/D22249
llvm-svn: 275592
2016-07-15 20:27:10 +02:00
|
|
|
N->getAlignment(), N->getMemOperand()->getFlags(),
|
|
|
|
N->getAAInfo());
|
2014-02-11 11:36:18 +01:00
|
|
|
return SDValue();
|
|
|
|
}
|
|
|
|
|
Seperate volatility and atomicity/ordering in SelectionDAG
At the moment, we mark every atomic memory access as being also volatile. This is unnecessarily conservative and prohibits many legal transforms (DCE, folding, etc..).
This patch removes MOVolatile from the MachineMemOperands of atomic, but not volatile, instructions. This should be strictly NFC after a series of previous patches which have gone in to ensure backend code is conservative about handling of isAtomic MMOs. Once it's in and baked for a bit, we'll start working through removing unnecessary bailouts one by one. We applied this same strategy to the middle end a few years ago, with good success.
To make sure this patch itself is NFC, it is build on top of a series of other patches which adjust code to (for the moment) be as conservative for an atomic access as for a volatile access and build up a test corpus (mostly in test/CodeGen/X86/atomics-unordered.ll)..
Previously landed
D57593 Fix a bug in the definition of isUnordered on MachineMemOperand
D57596 [CodeGen] Be conservative about atomic accesses as for volatile
D57802 Be conservative about unordered accesses for the moment
rL353959: [Tests] First batch of cornercase tests for unordered atomics.
rL353966: [Tests] RMW folding tests w/unordered atomic operations.
rL353972: [Tests] More unordered atomic lowering tests.
rL353989: [SelectionDAG] Inline a single use helper function, and remove last non-MMO interface
rL354740: [Hexagon, SystemZ] Be super conservative about atomics
rL354800: [Lanai] Be super conservative about atomics
rL354845: [ARM] Be super conservative about atomics
Attention Out of Tree Backend Owners: This patch may break you. If it does, you can use the TLI getMMOFlags hook to restore the MOVolatile to any instruction you need to. (See llvm-dev thread titled "PSA: Changes to how atomics are handled in backends" started Feb 27, 2019.)
Differential Revision: https://reviews.llvm.org/D57601
llvm-svn: 355025
2019-02-27 21:20:08 +01:00
|
|
|
MachineMemOperand::Flags
|
2020-01-12 20:10:42 +01:00
|
|
|
XCoreTargetLowering::getTargetMMOFlags(const Instruction &I) const {
|
Seperate volatility and atomicity/ordering in SelectionDAG
At the moment, we mark every atomic memory access as being also volatile. This is unnecessarily conservative and prohibits many legal transforms (DCE, folding, etc..).
This patch removes MOVolatile from the MachineMemOperands of atomic, but not volatile, instructions. This should be strictly NFC after a series of previous patches which have gone in to ensure backend code is conservative about handling of isAtomic MMOs. Once it's in and baked for a bit, we'll start working through removing unnecessary bailouts one by one. We applied this same strategy to the middle end a few years ago, with good success.
To make sure this patch itself is NFC, it is build on top of a series of other patches which adjust code to (for the moment) be as conservative for an atomic access as for a volatile access and build up a test corpus (mostly in test/CodeGen/X86/atomics-unordered.ll)..
Previously landed
D57593 Fix a bug in the definition of isUnordered on MachineMemOperand
D57596 [CodeGen] Be conservative about atomic accesses as for volatile
D57802 Be conservative about unordered accesses for the moment
rL353959: [Tests] First batch of cornercase tests for unordered atomics.
rL353966: [Tests] RMW folding tests w/unordered atomic operations.
rL353972: [Tests] More unordered atomic lowering tests.
rL353989: [SelectionDAG] Inline a single use helper function, and remove last non-MMO interface
rL354740: [Hexagon, SystemZ] Be super conservative about atomics
rL354800: [Lanai] Be super conservative about atomics
rL354845: [ARM] Be super conservative about atomics
Attention Out of Tree Backend Owners: This patch may break you. If it does, you can use the TLI getMMOFlags hook to restore the MOVolatile to any instruction you need to. (See llvm-dev thread titled "PSA: Changes to how atomics are handled in backends" started Feb 27, 2019.)
Differential Revision: https://reviews.llvm.org/D57601
llvm-svn: 355025
2019-02-27 21:20:08 +01:00
|
|
|
// Because of how we convert atomic_load and atomic_store to normal loads and
|
|
|
|
// stores in the DAG, we need to ensure that the MMOs are marked volatile
|
|
|
|
// since DAGCombine hasn't been updated to account for atomic, but non
|
|
|
|
// volatile loads. (See D57601)
|
|
|
|
if (auto *SI = dyn_cast<StoreInst>(&I))
|
|
|
|
if (SI->isAtomic())
|
|
|
|
return MachineMemOperand::MOVolatile;
|
|
|
|
if (auto *LI = dyn_cast<LoadInst>(&I))
|
|
|
|
if (LI->isAtomic())
|
|
|
|
return MachineMemOperand::MOVolatile;
|
|
|
|
if (auto *AI = dyn_cast<AtomicRMWInst>(&I))
|
|
|
|
if (AI->isAtomic())
|
|
|
|
return MachineMemOperand::MOVolatile;
|
|
|
|
if (auto *AI = dyn_cast<AtomicCmpXchgInst>(&I))
|
|
|
|
if (AI->isAtomic())
|
|
|
|
return MachineMemOperand::MOVolatile;
|
|
|
|
return MachineMemOperand::MONone;
|
|
|
|
}
|
|
|
|
|
2008-11-07 11:59:00 +01:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Calling Convention Implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "XCoreGenCallingConv.inc"
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 03:29:28 +02:00
|
|
|
// Call Calling Convention Implementation
|
2008-11-07 11:59:00 +01:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 03:29:28 +02:00
|
|
|
/// XCore call implementation
|
|
|
|
SDValue
|
2012-05-25 18:35:28 +02:00
|
|
|
XCoreTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
|
2010-04-17 17:26:15 +02:00
|
|
|
SmallVectorImpl<SDValue> &InVals) const {
|
2012-05-25 18:35:28 +02:00
|
|
|
SelectionDAG &DAG = CLI.DAG;
|
2013-07-14 06:42:23 +02:00
|
|
|
SDLoc &dl = CLI.DL;
|
|
|
|
SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
|
|
|
|
SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
|
|
|
|
SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
|
2012-05-25 18:35:28 +02:00
|
|
|
SDValue Chain = CLI.Chain;
|
|
|
|
SDValue Callee = CLI.Callee;
|
|
|
|
bool &isTailCall = CLI.IsTailCall;
|
|
|
|
CallingConv::ID CallConv = CLI.CallConv;
|
|
|
|
bool isVarArg = CLI.IsVarArg;
|
|
|
|
|
2010-01-27 01:07:07 +01:00
|
|
|
// XCore target does not yet support tail call optimization.
|
|
|
|
isTailCall = false;
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 03:29:28 +02:00
|
|
|
|
2008-11-07 11:59:00 +01:00
|
|
|
// For now, only CallingConv::C implemented
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 03:29:28 +02:00
|
|
|
switch (CallConv)
|
2008-11-07 11:59:00 +01:00
|
|
|
{
|
|
|
|
default:
|
2017-08-22 11:11:41 +02:00
|
|
|
report_fatal_error("Unsupported calling convention");
|
2008-11-07 11:59:00 +01:00
|
|
|
case CallingConv::Fast:
|
|
|
|
case CallingConv::C:
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 03:29:28 +02:00
|
|
|
return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall,
|
2010-07-07 17:54:55 +02:00
|
|
|
Outs, OutVals, Ins, dl, DAG, InVals);
|
2008-11-07 11:59:00 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-27 18:47:48 +01:00
|
|
|
/// LowerCallResult - Lower the result values of a call into the
|
|
|
|
/// appropriate copies out of appropriate physical registers / memory locations.
|
2016-06-12 17:39:02 +02:00
|
|
|
static SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
|
|
|
|
const SmallVectorImpl<CCValAssign> &RVLocs,
|
|
|
|
const SDLoc &dl, SelectionDAG &DAG,
|
|
|
|
SmallVectorImpl<SDValue> &InVals) {
|
2014-02-27 18:47:54 +01:00
|
|
|
SmallVector<std::pair<int, unsigned>, 4> ResultMemLocs;
|
|
|
|
// Copy results out of physical registers.
|
|
|
|
for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
|
|
|
|
const CCValAssign &VA = RVLocs[i];
|
|
|
|
if (VA.isRegLoc()) {
|
|
|
|
Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getValVT(),
|
|
|
|
InFlag).getValue(1);
|
|
|
|
InFlag = Chain.getValue(2);
|
|
|
|
InVals.push_back(Chain.getValue(0));
|
|
|
|
} else {
|
|
|
|
assert(VA.isMemLoc());
|
|
|
|
ResultMemLocs.push_back(std::make_pair(VA.getLocMemOffset(),
|
|
|
|
InVals.size()));
|
|
|
|
// Reserve space for this result.
|
|
|
|
InVals.push_back(SDValue());
|
|
|
|
}
|
2014-02-27 18:47:48 +01:00
|
|
|
}
|
|
|
|
|
2014-02-27 18:47:54 +01:00
|
|
|
// Copy results out of memory.
|
|
|
|
SmallVector<SDValue, 4> MemOpChains;
|
|
|
|
for (unsigned i = 0, e = ResultMemLocs.size(); i != e; ++i) {
|
|
|
|
int offset = ResultMemLocs[i].first;
|
|
|
|
unsigned index = ResultMemLocs[i].second;
|
|
|
|
SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
|
2015-04-28 16:05:47 +02:00
|
|
|
SDValue Ops[] = { Chain, DAG.getConstant(offset / 4, dl, MVT::i32) };
|
2014-04-26 20:35:24 +02:00
|
|
|
SDValue load = DAG.getNode(XCoreISD::LDWSP, dl, VTs, Ops);
|
2014-02-27 18:47:54 +01:00
|
|
|
InVals[index] = load;
|
|
|
|
MemOpChains.push_back(load.getValue(1));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Transform all loads nodes into one single node because
|
|
|
|
// all load nodes are independent of each other.
|
|
|
|
if (!MemOpChains.empty())
|
2014-04-26 20:35:24 +02:00
|
|
|
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
|
2014-02-27 18:47:54 +01:00
|
|
|
|
2014-02-27 18:47:48 +01:00
|
|
|
return Chain;
|
|
|
|
}
|
|
|
|
|
2008-11-07 11:59:00 +01:00
|
|
|
/// LowerCCCCallTo - functions arguments are copied from virtual
|
|
|
|
/// regs to (physical regs)/(stack frame), CALLSEQ_START and
|
|
|
|
/// CALLSEQ_END are emitted.
|
|
|
|
/// TODO: isTailCall, sret.
|
2016-06-12 17:39:02 +02:00
|
|
|
SDValue XCoreTargetLowering::LowerCCCCallTo(
|
|
|
|
SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg,
|
|
|
|
bool isTailCall, const SmallVectorImpl<ISD::OutputArg> &Outs,
|
|
|
|
const SmallVectorImpl<SDValue> &OutVals,
|
|
|
|
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
|
|
|
|
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
|
2008-11-07 11:59:00 +01:00
|
|
|
|
|
|
|
// Analyze operands of the call, assigning locations to each operand.
|
|
|
|
SmallVector<CCValAssign, 16> ArgLocs;
|
2014-08-06 20:45:26 +02:00
|
|
|
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
|
|
|
|
*DAG.getContext());
|
2008-11-07 11:59:00 +01:00
|
|
|
|
|
|
|
// The ABI dictates there should be one stack slot available to the callee
|
|
|
|
// on function entry (for saving lr).
|
[Alignment][NFC] Migrate the rest of backends
Summary: This is a followup on D81196
Reviewers: courbet
Subscribers: arsenm, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, nhaehnle, sbc100, jgravelle-google, hiraditya, aheejin, kbarton, fedor.sergeev, asb, rbar, johnrusso, simoncook, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, Jim, lenary, s.egerton, pzheng, sameer.abuasal, apazos, luismarques, kerbowa, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D81278
2020-06-05 18:51:33 +02:00
|
|
|
CCInfo.AllocateStack(4, Align(4));
|
2008-11-07 11:59:00 +01:00
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 03:29:28 +02:00
|
|
|
CCInfo.AnalyzeCallOperands(Outs, CC_XCore);
|
2008-11-07 11:59:00 +01:00
|
|
|
|
2014-02-27 18:47:54 +01:00
|
|
|
SmallVector<CCValAssign, 16> RVLocs;
|
|
|
|
// Analyze return values to determine the number of bytes of stack required.
|
2014-08-06 20:45:26 +02:00
|
|
|
CCState RetCCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
|
|
|
|
*DAG.getContext());
|
[Alignment][NFC] Migrate the rest of backends
Summary: This is a followup on D81196
Reviewers: courbet
Subscribers: arsenm, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, nhaehnle, sbc100, jgravelle-google, hiraditya, aheejin, kbarton, fedor.sergeev, asb, rbar, johnrusso, simoncook, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, Jim, lenary, s.egerton, pzheng, sameer.abuasal, apazos, luismarques, kerbowa, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D81278
2020-06-05 18:51:33 +02:00
|
|
|
RetCCInfo.AllocateStack(CCInfo.getNextStackOffset(), Align(4));
|
2014-02-27 18:47:54 +01:00
|
|
|
RetCCInfo.AnalyzeCallResult(Ins, RetCC_XCore);
|
|
|
|
|
2008-11-07 11:59:00 +01:00
|
|
|
// Get a count of how many bytes are to be pushed on the stack.
|
2014-02-27 18:47:54 +01:00
|
|
|
unsigned NumBytes = RetCCInfo.getNextStackOffset();
|
2015-07-09 04:09:04 +02:00
|
|
|
auto PtrVT = getPointerTy(DAG.getDataLayout());
|
2008-11-07 11:59:00 +01:00
|
|
|
|
2017-05-09 15:35:13 +02:00
|
|
|
Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
|
2008-11-07 11:59:00 +01:00
|
|
|
|
|
|
|
SmallVector<std::pair<unsigned, SDValue>, 4> RegsToPass;
|
|
|
|
SmallVector<SDValue, 12> MemOpChains;
|
|
|
|
|
|
|
|
// Walk the register/memloc assignments, inserting copies/loads.
|
|
|
|
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
|
|
|
|
CCValAssign &VA = ArgLocs[i];
|
2010-07-07 17:54:55 +02:00
|
|
|
SDValue Arg = OutVals[i];
|
2008-11-07 11:59:00 +01:00
|
|
|
|
|
|
|
// Promote the value if needed.
|
|
|
|
switch (VA.getLocInfo()) {
|
2009-07-14 18:55:14 +02:00
|
|
|
default: llvm_unreachable("Unknown loc info!");
|
2008-11-07 11:59:00 +01:00
|
|
|
case CCValAssign::Full: break;
|
|
|
|
case CCValAssign::SExt:
|
2009-02-05 00:02:30 +01:00
|
|
|
Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
|
2008-11-07 11:59:00 +01:00
|
|
|
break;
|
|
|
|
case CCValAssign::ZExt:
|
2009-02-05 00:02:30 +01:00
|
|
|
Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
|
2008-11-07 11:59:00 +01:00
|
|
|
break;
|
|
|
|
case CCValAssign::AExt:
|
2009-02-05 00:02:30 +01:00
|
|
|
Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
|
2008-11-07 11:59:00 +01:00
|
|
|
break;
|
|
|
|
}
|
2011-02-25 22:41:48 +01:00
|
|
|
|
|
|
|
// Arguments that can be passed on register must be kept at
|
2008-11-07 11:59:00 +01:00
|
|
|
// RegsToPass vector
|
|
|
|
if (VA.isRegLoc()) {
|
|
|
|
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
|
|
|
|
} else {
|
|
|
|
assert(VA.isMemLoc());
|
|
|
|
|
|
|
|
int Offset = VA.getLocMemOffset();
|
|
|
|
|
2011-02-25 22:41:48 +01:00
|
|
|
MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other,
|
2009-02-05 00:02:30 +01:00
|
|
|
Chain, Arg,
|
2015-04-28 16:05:47 +02:00
|
|
|
DAG.getConstant(Offset/4, dl,
|
|
|
|
MVT::i32)));
|
2008-11-07 11:59:00 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Transform all store nodes into one single node because
|
|
|
|
// all store nodes are independent of each other.
|
|
|
|
if (!MemOpChains.empty())
|
2014-04-26 20:35:24 +02:00
|
|
|
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
|
2008-11-07 11:59:00 +01:00
|
|
|
|
2011-02-25 22:41:48 +01:00
|
|
|
// Build a sequence of copy-to-reg nodes chained together with token
|
2008-11-07 11:59:00 +01:00
|
|
|
// chain and flag operands which copy the outgoing args into registers.
|
2011-04-15 07:18:47 +02:00
|
|
|
// The InFlag in necessary since all emitted instructions must be
|
2008-11-07 11:59:00 +01:00
|
|
|
// stuck together.
|
|
|
|
SDValue InFlag;
|
|
|
|
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
|
2011-02-25 22:41:48 +01:00
|
|
|
Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
|
2008-11-07 11:59:00 +01:00
|
|
|
RegsToPass[i].second, InFlag);
|
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the callee is a GlobalAddress node (quite common, every direct call is)
|
|
|
|
// turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
|
|
|
|
// Likewise ExternalSymbol -> TargetExternalSymbol.
|
|
|
|
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
|
2010-07-07 00:08:15 +02:00
|
|
|
Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32);
|
2008-11-07 11:59:00 +01:00
|
|
|
else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
|
2009-08-11 22:47:22 +02:00
|
|
|
Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
|
2008-11-07 11:59:00 +01:00
|
|
|
|
|
|
|
// XCoreBranchLink = #chain, #target_address, #opt_in_flags...
|
2011-02-25 22:41:48 +01:00
|
|
|
// = Chain, Callee, Reg#1, Reg#2, ...
|
2008-11-07 11:59:00 +01:00
|
|
|
//
|
|
|
|
// Returns a chain & a flag for retval copy to use.
|
2010-12-21 03:38:05 +01:00
|
|
|
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
|
2008-11-07 11:59:00 +01:00
|
|
|
SmallVector<SDValue, 8> Ops;
|
|
|
|
Ops.push_back(Chain);
|
|
|
|
Ops.push_back(Callee);
|
|
|
|
|
2011-02-25 22:41:48 +01:00
|
|
|
// Add argument registers to the end of the list so that they are
|
2008-11-07 11:59:00 +01:00
|
|
|
// known live into the call.
|
|
|
|
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
|
|
|
|
Ops.push_back(DAG.getRegister(RegsToPass[i].first,
|
|
|
|
RegsToPass[i].second.getValueType()));
|
|
|
|
|
|
|
|
if (InFlag.getNode())
|
|
|
|
Ops.push_back(InFlag);
|
|
|
|
|
2014-04-26 20:35:24 +02:00
|
|
|
Chain = DAG.getNode(XCoreISD::BL, dl, NodeTys, Ops);
|
2008-11-07 11:59:00 +01:00
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
|
|
|
|
// Create the CALLSEQ_END node.
|
2015-07-09 04:09:04 +02:00
|
|
|
Chain = DAG.getCALLSEQ_END(Chain, DAG.getConstant(NumBytes, dl, PtrVT, true),
|
|
|
|
DAG.getConstant(0, dl, PtrVT, true), InFlag, dl);
|
2008-11-07 11:59:00 +01:00
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
|
2014-02-27 18:47:48 +01:00
|
|
|
// Handle result values, copying them out of physregs into vregs that we
|
|
|
|
// return.
|
|
|
|
return LowerCallResult(Chain, InFlag, RVLocs, dl, DAG, InVals);
|
2008-11-07 11:59:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 03:29:28 +02:00
|
|
|
// Formal Arguments Calling Convention Implementation
|
2008-11-07 11:59:00 +01:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2013-08-01 10:18:55 +02:00
|
|
|
namespace {
|
|
|
|
struct ArgDataPair { SDValue SDV; ISD::ArgFlagsTy Flags; };
|
|
|
|
}
|
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 03:29:28 +02:00
|
|
|
/// XCore formal arguments implementation
|
2016-06-12 17:39:02 +02:00
|
|
|
SDValue XCoreTargetLowering::LowerFormalArguments(
|
|
|
|
SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
|
|
|
|
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
|
|
|
|
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 03:29:28 +02:00
|
|
|
switch (CallConv)
|
2008-11-07 11:59:00 +01:00
|
|
|
{
|
|
|
|
default:
|
2017-08-22 11:11:41 +02:00
|
|
|
report_fatal_error("Unsupported calling convention");
|
2008-11-07 11:59:00 +01:00
|
|
|
case CallingConv::C:
|
|
|
|
case CallingConv::Fast:
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 03:29:28 +02:00
|
|
|
return LowerCCCArguments(Chain, CallConv, isVarArg,
|
|
|
|
Ins, dl, DAG, InVals);
|
2008-11-07 11:59:00 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// LowerCCCArguments - transform physical registers into
|
|
|
|
/// virtual registers and generate load operations for
|
|
|
|
/// arguments places on the stack.
|
|
|
|
/// TODO: sret
|
2016-06-12 17:39:02 +02:00
|
|
|
SDValue XCoreTargetLowering::LowerCCCArguments(
|
|
|
|
SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
|
|
|
|
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
|
|
|
|
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
|
2008-11-07 11:59:00 +01:00
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
2016-07-28 20:40:00 +02:00
|
|
|
MachineFrameInfo &MFI = MF.getFrameInfo();
|
2008-11-07 11:59:00 +01:00
|
|
|
MachineRegisterInfo &RegInfo = MF.getRegInfo();
|
2014-02-27 18:47:54 +01:00
|
|
|
XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
|
2008-11-07 11:59:00 +01:00
|
|
|
|
|
|
|
// Assign locations to all of the incoming arguments.
|
|
|
|
SmallVector<CCValAssign, 16> ArgLocs;
|
2014-08-06 20:45:26 +02:00
|
|
|
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
|
|
|
|
*DAG.getContext());
|
2008-11-07 11:59:00 +01:00
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 03:29:28 +02:00
|
|
|
CCInfo.AnalyzeFormalArguments(Ins, CC_XCore);
|
2008-11-07 11:59:00 +01:00
|
|
|
|
2011-01-10 13:39:04 +01:00
|
|
|
unsigned StackSlotSize = XCoreFrameLowering::stackSlotSize();
|
2008-11-07 11:59:00 +01:00
|
|
|
|
|
|
|
unsigned LRSaveSize = StackSlotSize;
|
2011-02-25 22:41:48 +01:00
|
|
|
|
2014-02-27 18:47:54 +01:00
|
|
|
if (!isVarArg)
|
|
|
|
XFI->setReturnStackOffset(CCInfo.getNextStackOffset() + LRSaveSize);
|
|
|
|
|
2013-08-01 10:18:55 +02:00
|
|
|
// All getCopyFromReg ops must precede any getMemcpys to prevent the
|
|
|
|
// scheduler clobbering a register before it has been copied.
|
|
|
|
// The stages are:
|
|
|
|
// 1. CopyFromReg (and load) arg & vararg registers.
|
|
|
|
// 2. Chain CopyFromReg nodes into a TokenFactor.
|
|
|
|
// 3. Memcpy 'byVal' args & push final InVals.
|
|
|
|
// 4. Chain mem ops nodes into a TokenFactor.
|
|
|
|
SmallVector<SDValue, 4> CFRegNode;
|
|
|
|
SmallVector<ArgDataPair, 4> ArgData;
|
|
|
|
SmallVector<SDValue, 4> MemOps;
|
|
|
|
|
|
|
|
// 1a. CopyFromReg (and load) arg registers.
|
2008-11-07 11:59:00 +01:00
|
|
|
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
|
|
|
|
|
|
|
|
CCValAssign &VA = ArgLocs[i];
|
2013-08-01 10:18:55 +02:00
|
|
|
SDValue ArgIn;
|
2011-02-25 22:41:48 +01:00
|
|
|
|
2008-11-07 11:59:00 +01:00
|
|
|
if (VA.isRegLoc()) {
|
|
|
|
// Arguments passed in registers
|
2009-08-11 00:56:29 +02:00
|
|
|
EVT RegVT = VA.getLocVT();
|
2009-08-11 22:47:22 +02:00
|
|
|
switch (RegVT.getSimpleVT().SimpleTy) {
|
2008-11-07 11:59:00 +01:00
|
|
|
default:
|
2009-07-08 21:04:27 +02:00
|
|
|
{
|
2009-07-08 22:53:28 +02:00
|
|
|
#ifndef NDEBUG
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 03:29:28 +02:00
|
|
|
errs() << "LowerFormalArguments Unhandled argument type: "
|
2016-04-17 19:37:33 +02:00
|
|
|
<< RegVT.getEVTString() << "\n";
|
2009-07-08 22:53:28 +02:00
|
|
|
#endif
|
2014-04-28 06:05:08 +02:00
|
|
|
llvm_unreachable(nullptr);
|
2009-07-08 21:04:27 +02:00
|
|
|
}
|
2009-08-11 22:47:22 +02:00
|
|
|
case MVT::i32:
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-15 21:22:08 +02:00
|
|
|
Register VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
|
2008-11-07 11:59:00 +01:00
|
|
|
RegInfo.addLiveIn(VA.getLocReg(), VReg);
|
2013-08-01 10:18:55 +02:00
|
|
|
ArgIn = DAG.getCopyFromReg(Chain, dl, VReg, RegVT);
|
|
|
|
CFRegNode.push_back(ArgIn.getValue(ArgIn->getNumValues() - 1));
|
2008-11-07 11:59:00 +01:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// sanity check
|
|
|
|
assert(VA.isMemLoc());
|
|
|
|
// Load the argument to a virtual register
|
|
|
|
unsigned ObjSize = VA.getLocVT().getSizeInBits()/8;
|
|
|
|
if (ObjSize > StackSlotSize) {
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 03:29:28 +02:00
|
|
|
errs() << "LowerFormalArguments Unhandled argument type: "
|
2010-11-03 12:35:31 +01:00
|
|
|
<< EVT(VA.getLocVT()).getEVTString()
|
2009-07-25 02:23:56 +02:00
|
|
|
<< "\n";
|
2008-11-07 11:59:00 +01:00
|
|
|
}
|
|
|
|
// Create the frame index object for this incoming parameter...
|
2016-07-28 20:40:00 +02:00
|
|
|
int FI = MFI.CreateFixedObject(ObjSize,
|
|
|
|
LRSaveSize + VA.getLocMemOffset(),
|
|
|
|
true);
|
2008-11-07 11:59:00 +01:00
|
|
|
|
|
|
|
// Create the SelectionDAG nodes corresponding to a load
|
|
|
|
//from this parameter
|
2009-08-11 22:47:22 +02:00
|
|
|
SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
|
2013-08-01 10:18:55 +02:00
|
|
|
ArgIn = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
|
[SelectionDAG] Get rid of bool parameters in SelectionDAG::getLoad, getStore, and friends.
Summary:
Instead, we take a single flags arg (a bitset).
Also add a default 0 alignment, and change the order of arguments so the
alignment comes before the flags.
This greatly simplifies many callsites, and fixes a bug in
AMDGPUISelLowering, wherein the order of the args to getLoad was
inverted. It also greatly simplifies the process of adding another flag
to getLoad.
Reviewers: chandlerc, tstellarAMD
Subscribers: jholewinski, arsenm, jyknight, dsanders, nemanjai, llvm-commits
Differential Revision: http://reviews.llvm.org/D22249
llvm-svn: 275592
2016-07-15 20:27:10 +02:00
|
|
|
MachinePointerInfo::getFixedStack(MF, FI));
|
2008-11-07 11:59:00 +01:00
|
|
|
}
|
2013-08-01 10:18:55 +02:00
|
|
|
const ArgDataPair ADP = { ArgIn, Ins[i].Flags };
|
|
|
|
ArgData.push_back(ADP);
|
2008-11-07 11:59:00 +01:00
|
|
|
}
|
2011-02-25 22:41:48 +01:00
|
|
|
|
2013-08-01 10:18:55 +02:00
|
|
|
// 1b. CopyFromReg vararg registers.
|
2008-11-07 11:59:00 +01:00
|
|
|
if (isVarArg) {
|
2013-08-01 10:18:55 +02:00
|
|
|
// Argument registers
|
2014-04-04 07:16:06 +02:00
|
|
|
static const MCPhysReg ArgRegs[] = {
|
2008-11-07 11:59:00 +01:00
|
|
|
XCore::R0, XCore::R1, XCore::R2, XCore::R3
|
|
|
|
};
|
|
|
|
XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
|
2015-02-21 03:11:17 +01:00
|
|
|
unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs);
|
2008-11-07 11:59:00 +01:00
|
|
|
if (FirstVAReg < array_lengthof(ArgRegs)) {
|
|
|
|
int offset = 0;
|
|
|
|
// Save remaining registers, storing higher register numbers at a higher
|
|
|
|
// address
|
2011-08-01 18:45:59 +02:00
|
|
|
for (int i = array_lengthof(ArgRegs) - 1; i >= (int)FirstVAReg; --i) {
|
2008-11-07 11:59:00 +01:00
|
|
|
// Create a stack slot
|
2016-07-28 20:40:00 +02:00
|
|
|
int FI = MFI.CreateFixedObject(4, offset, true);
|
2011-08-01 18:45:59 +02:00
|
|
|
if (i == (int)FirstVAReg) {
|
2008-11-07 11:59:00 +01:00
|
|
|
XFI->setVarArgsFrameIndex(FI);
|
|
|
|
}
|
|
|
|
offset -= StackSlotSize;
|
2009-08-11 22:47:22 +02:00
|
|
|
SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
|
2008-11-07 11:59:00 +01:00
|
|
|
// Move argument from phys reg -> virt reg
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-15 21:22:08 +02:00
|
|
|
Register VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
|
2008-11-07 11:59:00 +01:00
|
|
|
RegInfo.addLiveIn(ArgRegs[i], VReg);
|
2009-08-11 22:47:22 +02:00
|
|
|
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
|
2013-08-01 10:18:55 +02:00
|
|
|
CFRegNode.push_back(Val.getValue(Val->getNumValues() - 1));
|
2008-11-07 11:59:00 +01:00
|
|
|
// Move argument from virt reg -> stack
|
[SelectionDAG] Get rid of bool parameters in SelectionDAG::getLoad, getStore, and friends.
Summary:
Instead, we take a single flags arg (a bitset).
Also add a default 0 alignment, and change the order of arguments so the
alignment comes before the flags.
This greatly simplifies many callsites, and fixes a bug in
AMDGPUISelLowering, wherein the order of the args to getLoad was
inverted. It also greatly simplifies the process of adding another flag
to getLoad.
Reviewers: chandlerc, tstellarAMD
Subscribers: jholewinski, arsenm, jyknight, dsanders, nemanjai, llvm-commits
Differential Revision: http://reviews.llvm.org/D22249
llvm-svn: 275592
2016-07-15 20:27:10 +02:00
|
|
|
SDValue Store =
|
|
|
|
DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
|
2008-11-07 11:59:00 +01:00
|
|
|
MemOps.push_back(Store);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// This will point to the next argument passed via stack.
|
|
|
|
XFI->setVarArgsFrameIndex(
|
2016-07-28 20:40:00 +02:00
|
|
|
MFI.CreateFixedObject(4, LRSaveSize + CCInfo.getNextStackOffset(),
|
|
|
|
true));
|
2008-11-07 11:59:00 +01:00
|
|
|
}
|
|
|
|
}
|
2011-02-25 22:41:48 +01:00
|
|
|
|
2013-08-01 10:18:55 +02:00
|
|
|
// 2. chain CopyFromReg nodes into a TokenFactor.
|
|
|
|
if (!CFRegNode.empty())
|
2014-04-26 20:35:24 +02:00
|
|
|
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, CFRegNode);
|
2013-08-01 10:18:55 +02:00
|
|
|
|
|
|
|
// 3. Memcpy 'byVal' args & push final InVals.
|
|
|
|
// Aggregates passed "byVal" need to be copied by the callee.
|
|
|
|
// The callee will use a pointer to this copy, rather than the original
|
|
|
|
// pointer.
|
|
|
|
for (SmallVectorImpl<ArgDataPair>::const_iterator ArgDI = ArgData.begin(),
|
|
|
|
ArgDE = ArgData.end();
|
|
|
|
ArgDI != ArgDE; ++ArgDI) {
|
|
|
|
if (ArgDI->Flags.isByVal() && ArgDI->Flags.getByValSize()) {
|
|
|
|
unsigned Size = ArgDI->Flags.getByValSize();
|
[Alignment][NFC] Use Align for getMemcpy/Memmove/Memset
Summary:
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: arsenm, dschuff, jyknight, sdardis, nemanjai, jvesely, nhaehnle, sbc100, jgravelle-google, hiraditya, aheejin, kbarton, fedor.sergeev, asb, rbar, johnrusso, simoncook, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Jim, lenary, s.egerton, pzheng, sameer.abuasal, apazos, luismarques, kerbowa, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D73885
2020-02-03 14:49:01 +01:00
|
|
|
Align Alignment =
|
|
|
|
std::max(Align(StackSlotSize), ArgDI->Flags.getNonZeroByValAlign());
|
2013-08-01 10:18:55 +02:00
|
|
|
// Create a new object on the stack and copy the pointee into it.
|
[Alignment][NFC] Use Align for getMemcpy/Memmove/Memset
Summary:
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: arsenm, dschuff, jyknight, sdardis, nemanjai, jvesely, nhaehnle, sbc100, jgravelle-google, hiraditya, aheejin, kbarton, fedor.sergeev, asb, rbar, johnrusso, simoncook, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Jim, lenary, s.egerton, pzheng, sameer.abuasal, apazos, luismarques, kerbowa, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D73885
2020-02-03 14:49:01 +01:00
|
|
|
int FI = MFI.CreateStackObject(Size, Alignment, false);
|
2013-08-01 10:18:55 +02:00
|
|
|
SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
|
|
|
|
InVals.push_back(FIN);
|
[Alignment][NFC] Use Align for getMemcpy/Memmove/Memset
Summary:
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: arsenm, dschuff, jyknight, sdardis, nemanjai, jvesely, nhaehnle, sbc100, jgravelle-google, hiraditya, aheejin, kbarton, fedor.sergeev, asb, rbar, johnrusso, simoncook, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Jim, lenary, s.egerton, pzheng, sameer.abuasal, apazos, luismarques, kerbowa, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D73885
2020-02-03 14:49:01 +01:00
|
|
|
MemOps.push_back(DAG.getMemcpy(
|
|
|
|
Chain, dl, FIN, ArgDI->SDV, DAG.getConstant(Size, dl, MVT::i32),
|
|
|
|
Alignment, false, false, false, MachinePointerInfo(),
|
|
|
|
MachinePointerInfo()));
|
2013-08-01 10:18:55 +02:00
|
|
|
} else {
|
|
|
|
InVals.push_back(ArgDI->SDV);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// 4, chain mem ops nodes into a TokenFactor.
|
|
|
|
if (!MemOps.empty()) {
|
|
|
|
MemOps.push_back(Chain);
|
2014-04-26 20:35:24 +02:00
|
|
|
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
|
2013-08-01 10:18:55 +02:00
|
|
|
}
|
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 03:29:28 +02:00
|
|
|
return Chain;
|
2008-11-07 11:59:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Return Value Calling Convention Implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2009-11-14 20:33:35 +01:00
|
|
|
bool XCoreTargetLowering::
|
2011-06-09 01:55:35 +02:00
|
|
|
CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
|
2012-07-19 02:11:40 +02:00
|
|
|
bool isVarArg,
|
2010-07-10 11:00:22 +02:00
|
|
|
const SmallVectorImpl<ISD::OutputArg> &Outs,
|
2010-07-07 00:19:37 +02:00
|
|
|
LLVMContext &Context) const {
|
2009-11-14 20:33:35 +01:00
|
|
|
SmallVector<CCValAssign, 16> RVLocs;
|
2014-08-06 20:45:26 +02:00
|
|
|
CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
|
2014-02-27 18:47:54 +01:00
|
|
|
if (!CCInfo.CheckReturn(Outs, RetCC_XCore))
|
|
|
|
return false;
|
|
|
|
if (CCInfo.getNextStackOffset() != 0 && isVarArg)
|
|
|
|
return false;
|
|
|
|
return true;
|
2009-11-14 20:33:35 +01:00
|
|
|
}
|
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 03:29:28 +02:00
|
|
|
SDValue
|
2016-06-12 17:39:02 +02:00
|
|
|
XCoreTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
|
|
|
|
bool isVarArg,
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 03:29:28 +02:00
|
|
|
const SmallVectorImpl<ISD::OutputArg> &Outs,
|
2010-07-07 17:54:55 +02:00
|
|
|
const SmallVectorImpl<SDValue> &OutVals,
|
2016-06-12 17:39:02 +02:00
|
|
|
const SDLoc &dl, SelectionDAG &DAG) const {
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 03:29:28 +02:00
|
|
|
|
2014-02-27 18:47:54 +01:00
|
|
|
XCoreFunctionInfo *XFI =
|
|
|
|
DAG.getMachineFunction().getInfo<XCoreFunctionInfo>();
|
2016-07-28 20:40:00 +02:00
|
|
|
MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
|
2014-02-27 18:47:54 +01:00
|
|
|
|
2008-11-07 11:59:00 +01:00
|
|
|
// CCValAssign - represent the assignment of
|
|
|
|
// the return value to a location
|
|
|
|
SmallVector<CCValAssign, 16> RVLocs;
|
|
|
|
|
|
|
|
// CCState - Info about the registers and stack slot.
|
2014-08-06 20:45:26 +02:00
|
|
|
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
|
|
|
|
*DAG.getContext());
|
2008-11-07 11:59:00 +01:00
|
|
|
|
2011-06-09 01:55:35 +02:00
|
|
|
// Analyze return values.
|
2014-02-27 18:47:54 +01:00
|
|
|
if (!isVarArg)
|
[Alignment][NFC] Migrate the rest of backends
Summary: This is a followup on D81196
Reviewers: courbet
Subscribers: arsenm, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, nhaehnle, sbc100, jgravelle-google, hiraditya, aheejin, kbarton, fedor.sergeev, asb, rbar, johnrusso, simoncook, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, Jim, lenary, s.egerton, pzheng, sameer.abuasal, apazos, luismarques, kerbowa, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D81278
2020-06-05 18:51:33 +02:00
|
|
|
CCInfo.AllocateStack(XFI->getReturnStackOffset(), Align(4));
|
2014-02-27 18:47:54 +01:00
|
|
|
|
Major calling convention code refactoring.
Instead of awkwardly encoding calling-convention information with ISD::CALL,
ISD::FORMAL_ARGUMENTS, ISD::RET, and ISD::ARG_FLAGS nodes, TargetLowering
provides three virtual functions for targets to override:
LowerFormalArguments, LowerCall, and LowerRet, which replace the custom
lowering done on the special nodes. They provide the same information, but
in a more immediately usable format.
This also reworks much of the target-independent tail call logic. The
decision of whether or not to perform a tail call is now cleanly split
between target-independent portions, and the target dependent portion
in IsEligibleForTailCallOptimization.
This also synchronizes all in-tree targets, to help enable future
refactoring and feature work.
llvm-svn: 78142
2009-08-05 03:29:28 +02:00
|
|
|
CCInfo.AnalyzeReturn(Outs, RetCC_XCore);
|
2008-11-07 11:59:00 +01:00
|
|
|
|
|
|
|
SDValue Flag;
|
2013-02-05 19:21:46 +01:00
|
|
|
SmallVector<SDValue, 4> RetOps(1, Chain);
|
|
|
|
|
|
|
|
// Return on XCore is always a "retsp 0"
|
2015-04-28 16:05:47 +02:00
|
|
|
RetOps.push_back(DAG.getConstant(0, dl, MVT::i32));
|
2008-11-07 11:59:00 +01:00
|
|
|
|
2014-02-27 18:47:54 +01:00
|
|
|
SmallVector<SDValue, 4> MemOpChains;
|
|
|
|
// Handle return values that must be copied to memory.
|
|
|
|
for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
|
2008-11-07 11:59:00 +01:00
|
|
|
CCValAssign &VA = RVLocs[i];
|
2014-02-27 18:47:54 +01:00
|
|
|
if (VA.isRegLoc())
|
|
|
|
continue;
|
|
|
|
assert(VA.isMemLoc());
|
|
|
|
if (isVarArg) {
|
|
|
|
report_fatal_error("Can't return value from vararg function in memory");
|
|
|
|
}
|
2008-11-07 11:59:00 +01:00
|
|
|
|
2014-02-27 18:47:54 +01:00
|
|
|
int Offset = VA.getLocMemOffset();
|
|
|
|
unsigned ObjSize = VA.getLocVT().getSizeInBits() / 8;
|
|
|
|
// Create the frame index object for the memory location.
|
2016-07-28 20:40:00 +02:00
|
|
|
int FI = MFI.CreateFixedObject(ObjSize, Offset, false);
|
2014-02-27 18:47:54 +01:00
|
|
|
|
|
|
|
// Create a SelectionDAG node corresponding to a store
|
|
|
|
// to this memory location.
|
|
|
|
SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
|
2015-08-12 01:09:45 +02:00
|
|
|
MemOpChains.push_back(DAG.getStore(
|
|
|
|
Chain, dl, OutVals[i], FIN,
|
[SelectionDAG] Get rid of bool parameters in SelectionDAG::getLoad, getStore, and friends.
Summary:
Instead, we take a single flags arg (a bitset).
Also add a default 0 alignment, and change the order of arguments so the
alignment comes before the flags.
This greatly simplifies many callsites, and fixes a bug in
AMDGPUISelLowering, wherein the order of the args to getLoad was
inverted. It also greatly simplifies the process of adding another flag
to getLoad.
Reviewers: chandlerc, tstellarAMD
Subscribers: jholewinski, arsenm, jyknight, dsanders, nemanjai, llvm-commits
Differential Revision: http://reviews.llvm.org/D22249
llvm-svn: 275592
2016-07-15 20:27:10 +02:00
|
|
|
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
|
2014-02-27 18:47:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Transform all store nodes into one single node because
|
|
|
|
// all stores are independent of each other.
|
|
|
|
if (!MemOpChains.empty())
|
2014-04-26 20:35:24 +02:00
|
|
|
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
|
2014-02-27 18:47:54 +01:00
|
|
|
|
|
|
|
// Now handle return values copied to registers.
|
|
|
|
for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
|
|
|
|
CCValAssign &VA = RVLocs[i];
|
|
|
|
if (!VA.isRegLoc())
|
|
|
|
continue;
|
|
|
|
// Copy the result values into the output registers.
|
|
|
|
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag);
|
2008-11-07 11:59:00 +01:00
|
|
|
|
|
|
|
// guarantee that all emitted copies are
|
|
|
|
// stuck together, avoiding something bad
|
|
|
|
Flag = Chain.getValue(1);
|
2013-02-05 19:21:46 +01:00
|
|
|
RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
|
2008-11-07 11:59:00 +01:00
|
|
|
}
|
|
|
|
|
2013-02-05 19:21:46 +01:00
|
|
|
RetOps[0] = Chain; // Update chain.
|
|
|
|
|
|
|
|
// Add the flag if we have it.
|
2008-11-07 11:59:00 +01:00
|
|
|
if (Flag.getNode())
|
2013-02-05 19:21:46 +01:00
|
|
|
RetOps.push_back(Flag);
|
|
|
|
|
2014-04-26 20:35:24 +02:00
|
|
|
return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other, RetOps);
|
2008-11-07 11:59:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Other Lowering Code
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
MachineBasicBlock *
|
2016-07-01 00:52:52 +02:00
|
|
|
XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
|
2010-05-01 02:01:06 +02:00
|
|
|
MachineBasicBlock *BB) const {
|
2015-02-02 18:52:27 +01:00
|
|
|
const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
|
2016-07-01 00:52:52 +02:00
|
|
|
DebugLoc dl = MI.getDebugLoc();
|
|
|
|
assert((MI.getOpcode() == XCore::SELECT_CC) &&
|
2008-11-07 11:59:00 +01:00
|
|
|
"Unexpected instr type to insert");
|
2011-02-25 22:41:48 +01:00
|
|
|
|
2008-11-07 11:59:00 +01:00
|
|
|
// To "insert" a SELECT_CC instruction, we actually have to insert the diamond
|
|
|
|
// control-flow pattern. The incoming instruction knows the destination vreg
|
|
|
|
// to set, the condition code register to branch on, the true/false values to
|
|
|
|
// select between, and a branch opcode to use.
|
|
|
|
const BasicBlock *LLVM_BB = BB->getBasicBlock();
|
2015-10-20 03:07:42 +02:00
|
|
|
MachineFunction::iterator It = ++BB->getIterator();
|
2011-02-25 22:41:48 +01:00
|
|
|
|
2008-11-07 11:59:00 +01:00
|
|
|
// thisMBB:
|
|
|
|
// ...
|
|
|
|
// TrueVal = ...
|
|
|
|
// cmpTY ccX, r1, r2
|
|
|
|
// bCC copy1MBB
|
|
|
|
// fallthrough --> copy0MBB
|
|
|
|
MachineBasicBlock *thisMBB = BB;
|
|
|
|
MachineFunction *F = BB->getParent();
|
|
|
|
MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
|
|
|
|
MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
|
|
|
|
F->insert(It, copy0MBB);
|
|
|
|
F->insert(It, sinkMBB);
|
2010-07-06 22:24:04 +02:00
|
|
|
|
|
|
|
// Transfer the remainder of BB and its successor edges to sinkMBB.
|
|
|
|
sinkMBB->splice(sinkMBB->begin(), BB,
|
2014-03-02 13:27:27 +01:00
|
|
|
std::next(MachineBasicBlock::iterator(MI)), BB->end());
|
2010-07-06 22:24:04 +02:00
|
|
|
sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
|
|
|
|
|
2008-11-07 11:59:00 +01:00
|
|
|
// Next, add the true and fallthrough blocks as its successors.
|
|
|
|
BB->addSuccessor(copy0MBB);
|
|
|
|
BB->addSuccessor(sinkMBB);
|
2011-02-25 22:41:48 +01:00
|
|
|
|
2010-07-06 22:24:04 +02:00
|
|
|
BuildMI(BB, dl, TII.get(XCore::BRFT_lru6))
|
2016-07-01 00:52:52 +02:00
|
|
|
.addReg(MI.getOperand(1).getReg())
|
|
|
|
.addMBB(sinkMBB);
|
2010-07-06 22:24:04 +02:00
|
|
|
|
2008-11-07 11:59:00 +01:00
|
|
|
// copy0MBB:
|
|
|
|
// %FalseValue = ...
|
|
|
|
// # fallthrough to sinkMBB
|
|
|
|
BB = copy0MBB;
|
2011-02-25 22:41:48 +01:00
|
|
|
|
2008-11-07 11:59:00 +01:00
|
|
|
// Update machine-CFG edges
|
|
|
|
BB->addSuccessor(sinkMBB);
|
2011-02-25 22:41:48 +01:00
|
|
|
|
2008-11-07 11:59:00 +01:00
|
|
|
// sinkMBB:
|
|
|
|
// %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
|
|
|
|
// ...
|
|
|
|
BB = sinkMBB;
|
2016-07-01 00:52:52 +02:00
|
|
|
BuildMI(*BB, BB->begin(), dl, TII.get(XCore::PHI), MI.getOperand(0).getReg())
|
|
|
|
.addReg(MI.getOperand(3).getReg())
|
|
|
|
.addMBB(copy0MBB)
|
|
|
|
.addReg(MI.getOperand(2).getReg())
|
|
|
|
.addMBB(thisMBB);
|
2011-02-25 22:41:48 +01:00
|
|
|
|
2016-07-01 00:52:52 +02:00
|
|
|
MI.eraseFromParent(); // The pseudo instruction is gone now.
|
2008-11-07 11:59:00 +01:00
|
|
|
return BB;
|
|
|
|
}
|
|
|
|
|
2009-07-16 14:50:48 +02:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Target Optimization Hooks
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
|
|
|
|
DAGCombinerInfo &DCI) const {
|
|
|
|
SelectionDAG &DAG = DCI.DAG;
|
2013-05-25 04:42:55 +02:00
|
|
|
SDLoc dl(N);
|
2009-07-16 14:50:48 +02:00
|
|
|
switch (N->getOpcode()) {
|
|
|
|
default: break;
|
2014-02-27 14:20:11 +01:00
|
|
|
case ISD::INTRINSIC_VOID:
|
|
|
|
switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
|
|
|
|
case Intrinsic::xcore_outt:
|
|
|
|
case Intrinsic::xcore_outct:
|
|
|
|
case Intrinsic::xcore_chkct: {
|
|
|
|
SDValue OutVal = N->getOperand(3);
|
|
|
|
// These instructions ignore the high bits.
|
|
|
|
if (OutVal.hasOneUse()) {
|
|
|
|
unsigned BitWidth = OutVal.getValueSizeInBits();
|
|
|
|
APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 8);
|
2017-04-28 07:31:46 +02:00
|
|
|
KnownBits Known;
|
2014-02-27 14:20:11 +01:00
|
|
|
TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
|
|
|
|
!DCI.isBeforeLegalizeOps());
|
|
|
|
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
|
2017-04-21 20:53:12 +02:00
|
|
|
if (TLI.ShrinkDemandedConstant(OutVal, DemandedMask, TLO) ||
|
2017-04-28 07:31:46 +02:00
|
|
|
TLI.SimplifyDemandedBits(OutVal, DemandedMask, Known, TLO))
|
2014-02-27 14:20:11 +01:00
|
|
|
DCI.CommitTargetLoweringOpt(TLO);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Intrinsic::xcore_setpt: {
|
|
|
|
SDValue Time = N->getOperand(3);
|
|
|
|
// This instruction ignores the high bits.
|
|
|
|
if (Time.hasOneUse()) {
|
|
|
|
unsigned BitWidth = Time.getValueSizeInBits();
|
|
|
|
APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16);
|
2017-04-28 07:31:46 +02:00
|
|
|
KnownBits Known;
|
2014-02-27 14:20:11 +01:00
|
|
|
TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
|
|
|
|
!DCI.isBeforeLegalizeOps());
|
|
|
|
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
|
2017-04-21 20:53:12 +02:00
|
|
|
if (TLI.ShrinkDemandedConstant(Time, DemandedMask, TLO) ||
|
2017-04-28 07:31:46 +02:00
|
|
|
TLI.SimplifyDemandedBits(Time, DemandedMask, Known, TLO))
|
2014-02-27 14:20:11 +01:00
|
|
|
DCI.CommitTargetLoweringOpt(TLO);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
2010-03-09 17:07:47 +01:00
|
|
|
case XCoreISD::LADD: {
|
|
|
|
SDValue N0 = N->getOperand(0);
|
|
|
|
SDValue N1 = N->getOperand(1);
|
|
|
|
SDValue N2 = N->getOperand(2);
|
|
|
|
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
|
|
|
|
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
|
|
|
|
EVT VT = N0.getValueType();
|
|
|
|
|
2010-03-09 17:13:57 +01:00
|
|
|
// canonicalize constant to RHS
|
|
|
|
if (N0C && !N1C)
|
|
|
|
return DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N1, N0, N2);
|
|
|
|
|
2010-03-09 17:07:47 +01:00
|
|
|
// fold (ladd 0, 0, x) -> 0, x & 1
|
|
|
|
if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) {
|
2015-04-28 16:05:47 +02:00
|
|
|
SDValue Carry = DAG.getConstant(0, dl, VT);
|
2010-03-09 17:07:47 +01:00
|
|
|
SDValue Result = DAG.getNode(ISD::AND, dl, VT, N2,
|
2015-04-28 16:05:47 +02:00
|
|
|
DAG.getConstant(1, dl, VT));
|
2013-01-25 21:16:00 +01:00
|
|
|
SDValue Ops[] = { Result, Carry };
|
2014-04-27 21:20:57 +02:00
|
|
|
return DAG.getMergeValues(Ops, dl);
|
2010-03-09 17:07:47 +01:00
|
|
|
}
|
2010-03-09 17:34:25 +01:00
|
|
|
|
2012-09-27 12:14:43 +02:00
|
|
|
// fold (ladd x, 0, y) -> 0, add x, y iff carry is unused and y has only the
|
2010-03-09 17:34:25 +01:00
|
|
|
// low bit set
|
2013-01-25 21:16:00 +01:00
|
|
|
if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) {
|
2010-03-09 17:34:25 +01:00
|
|
|
APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
|
|
|
|
VT.getSizeInBits() - 1);
|
2018-12-21 16:35:32 +01:00
|
|
|
KnownBits Known = DAG.computeKnownBits(N2);
|
2017-04-28 07:31:46 +02:00
|
|
|
if ((Known.Zero & Mask) == Mask) {
|
2015-04-28 16:05:47 +02:00
|
|
|
SDValue Carry = DAG.getConstant(0, dl, VT);
|
2010-03-09 17:34:25 +01:00
|
|
|
SDValue Result = DAG.getNode(ISD::ADD, dl, VT, N0, N2);
|
2013-01-25 21:16:00 +01:00
|
|
|
SDValue Ops[] = { Result, Carry };
|
2014-04-27 21:20:57 +02:00
|
|
|
return DAG.getMergeValues(Ops, dl);
|
2010-03-09 17:34:25 +01:00
|
|
|
}
|
|
|
|
}
|
2010-03-09 17:07:47 +01:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case XCoreISD::LSUB: {
|
|
|
|
SDValue N0 = N->getOperand(0);
|
|
|
|
SDValue N1 = N->getOperand(1);
|
|
|
|
SDValue N2 = N->getOperand(2);
|
|
|
|
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
|
|
|
|
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
|
|
|
|
EVT VT = N0.getValueType();
|
|
|
|
|
2012-09-27 12:14:43 +02:00
|
|
|
// fold (lsub 0, 0, x) -> x, -x iff x has only the low bit set
|
2011-02-25 22:41:48 +01:00
|
|
|
if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) {
|
2010-03-09 17:07:47 +01:00
|
|
|
APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
|
|
|
|
VT.getSizeInBits() - 1);
|
2018-12-21 16:35:32 +01:00
|
|
|
KnownBits Known = DAG.computeKnownBits(N2);
|
2017-04-28 07:31:46 +02:00
|
|
|
if ((Known.Zero & Mask) == Mask) {
|
2010-03-09 17:07:47 +01:00
|
|
|
SDValue Borrow = N2;
|
|
|
|
SDValue Result = DAG.getNode(ISD::SUB, dl, VT,
|
2015-04-28 16:05:47 +02:00
|
|
|
DAG.getConstant(0, dl, VT), N2);
|
2013-01-25 21:16:00 +01:00
|
|
|
SDValue Ops[] = { Result, Borrow };
|
2014-04-27 21:20:57 +02:00
|
|
|
return DAG.getMergeValues(Ops, dl);
|
2010-03-09 17:07:47 +01:00
|
|
|
}
|
|
|
|
}
|
2010-03-09 17:34:25 +01:00
|
|
|
|
2012-09-27 12:14:43 +02:00
|
|
|
// fold (lsub x, 0, y) -> 0, sub x, y iff borrow is unused and y has only the
|
2010-03-09 17:34:25 +01:00
|
|
|
// low bit set
|
2013-01-25 21:16:00 +01:00
|
|
|
if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) {
|
2010-03-09 17:34:25 +01:00
|
|
|
APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
|
|
|
|
VT.getSizeInBits() - 1);
|
2018-12-21 16:35:32 +01:00
|
|
|
KnownBits Known = DAG.computeKnownBits(N2);
|
2017-04-28 07:31:46 +02:00
|
|
|
if ((Known.Zero & Mask) == Mask) {
|
2015-04-28 16:05:47 +02:00
|
|
|
SDValue Borrow = DAG.getConstant(0, dl, VT);
|
2010-03-09 17:34:25 +01:00
|
|
|
SDValue Result = DAG.getNode(ISD::SUB, dl, VT, N0, N2);
|
2013-01-25 21:16:00 +01:00
|
|
|
SDValue Ops[] = { Result, Borrow };
|
2014-04-27 21:20:57 +02:00
|
|
|
return DAG.getMergeValues(Ops, dl);
|
2010-03-09 17:34:25 +01:00
|
|
|
}
|
|
|
|
}
|
2010-03-09 17:07:47 +01:00
|
|
|
}
|
|
|
|
break;
|
2010-03-11 17:26:35 +01:00
|
|
|
case XCoreISD::LMUL: {
|
|
|
|
SDValue N0 = N->getOperand(0);
|
|
|
|
SDValue N1 = N->getOperand(1);
|
|
|
|
SDValue N2 = N->getOperand(2);
|
|
|
|
SDValue N3 = N->getOperand(3);
|
|
|
|
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
|
|
|
|
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
|
|
|
|
EVT VT = N0.getValueType();
|
|
|
|
// Canonicalize multiplicative constant to RHS. If both multiplicative
|
|
|
|
// operands are constant canonicalize smallest to RHS.
|
|
|
|
if ((N0C && !N1C) ||
|
|
|
|
(N0C && N1C && N0C->getZExtValue() < N1C->getZExtValue()))
|
2011-09-23 18:28:10 +02:00
|
|
|
return DAG.getNode(XCoreISD::LMUL, dl, DAG.getVTList(VT, VT),
|
|
|
|
N1, N0, N2, N3);
|
2010-03-11 17:26:35 +01:00
|
|
|
|
|
|
|
// lmul(x, 0, a, b)
|
|
|
|
if (N1C && N1C->isNullValue()) {
|
|
|
|
// If the high result is unused fold to add(a, b)
|
|
|
|
if (N->hasNUsesOfValue(0, 0)) {
|
|
|
|
SDValue Lo = DAG.getNode(ISD::ADD, dl, VT, N2, N3);
|
2013-01-25 21:16:00 +01:00
|
|
|
SDValue Ops[] = { Lo, Lo };
|
2014-04-27 21:20:57 +02:00
|
|
|
return DAG.getMergeValues(Ops, dl);
|
2010-03-11 17:26:35 +01:00
|
|
|
}
|
|
|
|
// Otherwise fold to ladd(a, b, 0)
|
2013-01-25 21:16:00 +01:00
|
|
|
SDValue Result =
|
|
|
|
DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N2, N3, N1);
|
|
|
|
SDValue Carry(Result.getNode(), 1);
|
|
|
|
SDValue Ops[] = { Carry, Result };
|
2014-04-27 21:20:57 +02:00
|
|
|
return DAG.getMergeValues(Ops, dl);
|
2010-03-11 17:26:35 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
2010-03-10 17:19:31 +01:00
|
|
|
case ISD::ADD: {
|
2010-03-10 19:12:27 +01:00
|
|
|
// Fold 32 bit expressions such as add(add(mul(x,y),a),b) ->
|
|
|
|
// lmul(x, y, a, b). The high result of lmul will be ignored.
|
2010-03-10 17:19:31 +01:00
|
|
|
// This is only profitable if the intermediate results are unused
|
|
|
|
// elsewhere.
|
2010-03-10 18:10:35 +01:00
|
|
|
SDValue Mul0, Mul1, Addend0, Addend1;
|
2010-03-10 19:12:27 +01:00
|
|
|
if (N->getValueType(0) == MVT::i32 &&
|
|
|
|
isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, true)) {
|
2010-03-10 18:10:35 +01:00
|
|
|
SDValue Ignored = DAG.getNode(XCoreISD::LMUL, dl,
|
|
|
|
DAG.getVTList(MVT::i32, MVT::i32), Mul0,
|
|
|
|
Mul1, Addend0, Addend1);
|
|
|
|
SDValue Result(Ignored.getNode(), 1);
|
|
|
|
return Result;
|
2010-03-10 17:19:31 +01:00
|
|
|
}
|
2010-03-10 19:12:27 +01:00
|
|
|
APInt HighMask = APInt::getHighBitsSet(64, 32);
|
|
|
|
// Fold 64 bit expression such as add(add(mul(x,y),a),b) ->
|
|
|
|
// lmul(x, y, a, b) if all operands are zero-extended. We do this
|
|
|
|
// before type legalization as it is messy to match the operands after
|
|
|
|
// that.
|
|
|
|
if (N->getValueType(0) == MVT::i64 &&
|
|
|
|
isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, false) &&
|
|
|
|
DAG.MaskedValueIsZero(Mul0, HighMask) &&
|
|
|
|
DAG.MaskedValueIsZero(Mul1, HighMask) &&
|
|
|
|
DAG.MaskedValueIsZero(Addend0, HighMask) &&
|
|
|
|
DAG.MaskedValueIsZero(Addend1, HighMask)) {
|
|
|
|
SDValue Mul0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
|
2015-04-28 16:05:47 +02:00
|
|
|
Mul0, DAG.getConstant(0, dl, MVT::i32));
|
2010-03-10 19:12:27 +01:00
|
|
|
SDValue Mul1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
|
2015-04-28 16:05:47 +02:00
|
|
|
Mul1, DAG.getConstant(0, dl, MVT::i32));
|
2010-03-10 19:12:27 +01:00
|
|
|
SDValue Addend0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
|
2015-04-28 16:05:47 +02:00
|
|
|
Addend0, DAG.getConstant(0, dl, MVT::i32));
|
2010-03-10 19:12:27 +01:00
|
|
|
SDValue Addend1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
|
2015-04-28 16:05:47 +02:00
|
|
|
Addend1, DAG.getConstant(0, dl, MVT::i32));
|
2010-03-10 19:12:27 +01:00
|
|
|
SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl,
|
|
|
|
DAG.getVTList(MVT::i32, MVT::i32), Mul0L, Mul1L,
|
|
|
|
Addend0L, Addend1L);
|
|
|
|
SDValue Lo(Hi.getNode(), 1);
|
|
|
|
return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
|
|
|
|
}
|
2010-03-10 17:19:31 +01:00
|
|
|
}
|
|
|
|
break;
|
2009-07-16 14:50:48 +02:00
|
|
|
case ISD::STORE: {
|
|
|
|
// Replace unaligned store of unaligned load with memmove.
|
2019-06-12 13:08:29 +02:00
|
|
|
StoreSDNode *ST = cast<StoreSDNode>(N);
|
2009-08-15 23:21:19 +02:00
|
|
|
if (!DCI.isBeforeLegalize() ||
|
2019-09-26 02:16:01 +02:00
|
|
|
allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
|
|
|
|
ST->getMemoryVT(),
|
|
|
|
*ST->getMemOperand()) ||
|
2009-07-16 14:50:48 +02:00
|
|
|
ST->isVolatile() || ST->isIndexed()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
SDValue Chain = ST->getChain();
|
|
|
|
|
|
|
|
unsigned StoreBits = ST->getMemoryVT().getStoreSizeInBits();
|
2015-07-15 02:07:57 +02:00
|
|
|
assert((StoreBits % 8) == 0 &&
|
|
|
|
"Store size in bits must be a multiple of 8");
|
2009-07-16 14:50:48 +02:00
|
|
|
unsigned Alignment = ST->getAlignment();
|
|
|
|
|
|
|
|
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(ST->getValue())) {
|
|
|
|
if (LD->hasNUsesOfValue(1, 0) && ST->getMemoryVT() == LD->getMemoryVT() &&
|
|
|
|
LD->getAlignment() == Alignment &&
|
|
|
|
!LD->isVolatile() && !LD->isIndexed() &&
|
|
|
|
Chain.reachesChainWithoutSideEffects(SDValue(LD, 1))) {
|
2015-04-13 19:16:45 +02:00
|
|
|
bool isTail = isInTailCallPosition(DAG, ST, Chain);
|
[Alignment][NFC] Use Align for getMemcpy/Memmove/Memset
Summary:
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: arsenm, dschuff, jyknight, sdardis, nemanjai, jvesely, nhaehnle, sbc100, jgravelle-google, hiraditya, aheejin, kbarton, fedor.sergeev, asb, rbar, johnrusso, simoncook, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Jim, lenary, s.egerton, pzheng, sameer.abuasal, apazos, luismarques, kerbowa, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D73885
2020-02-03 14:49:01 +01:00
|
|
|
return DAG.getMemmove(Chain, dl, ST->getBasePtr(), LD->getBasePtr(),
|
|
|
|
DAG.getConstant(StoreBits / 8, dl, MVT::i32),
|
|
|
|
Align(Alignment), false, isTail,
|
|
|
|
ST->getPointerInfo(), LD->getPointerInfo());
|
2009-07-16 14:50:48 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return SDValue();
|
|
|
|
}
|
|
|
|
|
2014-05-14 23:14:37 +02:00
|
|
|
void XCoreTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
|
2017-04-28 07:31:46 +02:00
|
|
|
KnownBits &Known,
|
2017-03-31 13:24:16 +02:00
|
|
|
const APInt &DemandedElts,
|
2014-05-14 23:14:37 +02:00
|
|
|
const SelectionDAG &DAG,
|
|
|
|
unsigned Depth) const {
|
2017-05-05 19:36:09 +02:00
|
|
|
Known.resetAll();
|
2010-03-09 17:07:47 +01:00
|
|
|
switch (Op.getOpcode()) {
|
|
|
|
default: break;
|
|
|
|
case XCoreISD::LADD:
|
|
|
|
case XCoreISD::LSUB:
|
2013-01-25 21:16:00 +01:00
|
|
|
if (Op.getResNo() == 1) {
|
2010-03-09 17:07:47 +01:00
|
|
|
// Top bits of carry / borrow are clear.
|
2017-04-28 07:31:46 +02:00
|
|
|
Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
|
|
|
|
Known.getBitWidth() - 1);
|
2010-03-09 17:07:47 +01:00
|
|
|
}
|
|
|
|
break;
|
2014-02-27 14:20:06 +01:00
|
|
|
case ISD::INTRINSIC_W_CHAIN:
|
|
|
|
{
|
|
|
|
unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
|
|
|
|
switch (IntNo) {
|
|
|
|
case Intrinsic::xcore_getts:
|
|
|
|
// High bits are known to be zero.
|
2017-04-28 07:31:46 +02:00
|
|
|
Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
|
|
|
|
Known.getBitWidth() - 16);
|
2014-02-27 14:20:06 +01:00
|
|
|
break;
|
|
|
|
case Intrinsic::xcore_int:
|
|
|
|
case Intrinsic::xcore_inct:
|
|
|
|
// High bits are known to be zero.
|
2017-04-28 07:31:46 +02:00
|
|
|
Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
|
|
|
|
Known.getBitWidth() - 8);
|
2014-02-27 14:20:06 +01:00
|
|
|
break;
|
|
|
|
case Intrinsic::xcore_testct:
|
|
|
|
// Result is either 0 or 1.
|
2017-04-28 07:31:46 +02:00
|
|
|
Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
|
|
|
|
Known.getBitWidth() - 1);
|
2014-02-27 14:20:06 +01:00
|
|
|
break;
|
|
|
|
case Intrinsic::xcore_testwct:
|
|
|
|
// Result is in the range 0 - 4.
|
2017-04-28 07:31:46 +02:00
|
|
|
Known.Zero = APInt::getHighBitsSet(Known.getBitWidth(),
|
|
|
|
Known.getBitWidth() - 3);
|
2014-02-27 14:20:06 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
2010-03-09 17:07:47 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-11-07 11:59:00 +01:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Addressing mode description hooks
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
static inline bool isImmUs(int64_t val)
|
|
|
|
{
|
|
|
|
return (val >= 0 && val <= 11);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool isImmUs2(int64_t val)
|
|
|
|
{
|
|
|
|
return (val%2 == 0 && isImmUs(val/2));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool isImmUs4(int64_t val)
|
|
|
|
{
|
|
|
|
return (val%4 == 0 && isImmUs(val/4));
|
|
|
|
}
|
|
|
|
|
|
|
|
/// isLegalAddressingMode - Return true if the addressing mode represented
|
|
|
|
/// by AM is legal for this target, for a load/store of the specified type.
|
2015-07-09 04:09:40 +02:00
|
|
|
bool XCoreTargetLowering::isLegalAddressingMode(const DataLayout &DL,
|
|
|
|
const AddrMode &AM, Type *Ty,
|
2017-07-21 13:59:37 +02:00
|
|
|
unsigned AS,
|
|
|
|
Instruction *I) const {
|
2009-07-17 09:16:38 +02:00
|
|
|
if (Ty->getTypeID() == Type::VoidTyID)
|
2010-02-26 17:44:51 +01:00
|
|
|
return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs);
|
2009-07-17 09:16:38 +02:00
|
|
|
|
2015-07-09 04:09:40 +02:00
|
|
|
unsigned Size = DL.getTypeAllocSize(Ty);
|
2008-11-07 11:59:00 +01:00
|
|
|
if (AM.BaseGV) {
|
2009-07-15 17:46:56 +02:00
|
|
|
return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 &&
|
2008-11-07 11:59:00 +01:00
|
|
|
AM.BaseOffs%4 == 0;
|
|
|
|
}
|
2011-02-25 22:41:48 +01:00
|
|
|
|
2009-07-15 17:46:56 +02:00
|
|
|
switch (Size) {
|
|
|
|
case 1:
|
2008-11-07 11:59:00 +01:00
|
|
|
// reg + imm
|
|
|
|
if (AM.Scale == 0) {
|
|
|
|
return isImmUs(AM.BaseOffs);
|
|
|
|
}
|
2009-07-15 17:46:56 +02:00
|
|
|
// reg + reg
|
2008-11-07 11:59:00 +01:00
|
|
|
return AM.Scale == 1 && AM.BaseOffs == 0;
|
2009-07-15 17:46:56 +02:00
|
|
|
case 2:
|
|
|
|
case 3:
|
2008-11-07 11:59:00 +01:00
|
|
|
// reg + imm
|
|
|
|
if (AM.Scale == 0) {
|
|
|
|
return isImmUs2(AM.BaseOffs);
|
|
|
|
}
|
2009-07-15 17:46:56 +02:00
|
|
|
// reg + reg<<1
|
2008-11-07 11:59:00 +01:00
|
|
|
return AM.Scale == 2 && AM.BaseOffs == 0;
|
2009-07-15 17:46:56 +02:00
|
|
|
default:
|
2008-11-07 11:59:00 +01:00
|
|
|
// reg + imm
|
|
|
|
if (AM.Scale == 0) {
|
|
|
|
return isImmUs4(AM.BaseOffs);
|
|
|
|
}
|
|
|
|
// reg + reg<<2
|
|
|
|
return AM.Scale == 4 && AM.BaseOffs == 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// XCore Inline Assembly Support
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2015-02-26 23:38:43 +01:00
|
|
|
std::pair<unsigned, const TargetRegisterClass *>
|
|
|
|
XCoreTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
|
2015-07-05 21:29:18 +02:00
|
|
|
StringRef Constraint,
|
2015-02-26 23:38:43 +01:00
|
|
|
MVT VT) const {
|
2011-06-29 19:53:29 +02:00
|
|
|
if (Constraint.size() == 1) {
|
|
|
|
switch (Constraint[0]) {
|
2008-11-07 11:59:00 +01:00
|
|
|
default : break;
|
|
|
|
case 'r':
|
2012-04-20 09:30:17 +02:00
|
|
|
return std::make_pair(0U, &XCore::GRRegsRegClass);
|
2011-06-29 19:53:29 +02:00
|
|
|
}
|
2008-11-07 11:59:00 +01:00
|
|
|
}
|
2011-06-29 19:53:29 +02:00
|
|
|
// Use the default implementation in TargetLowering to convert the register
|
|
|
|
// constraint into a member of a register class.
|
2015-02-26 23:38:43 +01:00
|
|
|
return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
|
2008-11-07 11:59:00 +01:00
|
|
|
}
|