2016-11-02 07:47:40 +01:00
|
|
|
//===-- AVRISelLowering.cpp - AVR DAG Lowering Implementation -------------===//
|
|
|
|
//
|
2019-01-19 09:50:56 +01:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2016-11-02 07:47:40 +01:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file defines the interfaces that AVR uses to lower LLVM code into a
|
|
|
|
// selection DAG.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "AVRISelLowering.h"
|
|
|
|
|
2017-01-08 00:39:47 +01:00
|
|
|
#include "llvm/ADT/StringSwitch.h"
|
[AVR] Rewrite the function calling convention.
Summary:
The previous version relied on the standard calling convention using
std::reverse() to try to force the AVR ABI. But this only works for
simple cases, it fails for example with aggregate types.
This patch rewrites the calling convention with custom C++ code, that
implements the ABI defined in https://gcc.gnu.org/wiki/avr-gcc.
To do that it adds a few 16-bit pseudo registers for unaligned argument
passing, such as R24R23. For example this function:
define void @fun({ i8, i16 } %a)
will pass %a.0 in R22 and %a.1 in R24R23.
There are no instructions that can use these pseudo registers, so a new
register class, DREGSMOVW, is defined to make them apart.
Also the ArgCC_AVR_BUILTIN_DIV is no longer necessary, as it is
identical to the C++ behavior (actually the clobber list is more strict
for __div* functions, but that is currently unimplemented).
Reviewers: dylanmckay
Subscribers: Gaelan, Sh4rK, indirect, jwagen, efriedma, dsprenkels, hiraditya, Jim, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68524
Patch by Rodrigo Rivas Costa.
2020-06-19 13:26:00 +02:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
2016-11-02 07:47:40 +01:00
|
|
|
#include "llvm/CodeGen/CallingConvLower.h"
|
|
|
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
|
|
|
#include "llvm/CodeGen/SelectionDAG.h"
|
|
|
|
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
|
|
|
|
#include "llvm/IR/Function.h"
|
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
|
|
|
|
|
|
|
#include "AVR.h"
|
|
|
|
#include "AVRMachineFunctionInfo.h"
|
2019-01-18 07:10:41 +01:00
|
|
|
#include "AVRSubtarget.h"
|
2016-11-02 07:47:40 +01:00
|
|
|
#include "AVRTargetMachine.h"
|
|
|
|
#include "MCTargetDesc/AVRMCTargetDesc.h"
|
|
|
|
|
|
|
|
namespace llvm {
|
|
|
|
|
2019-01-18 07:10:41 +01:00
|
|
|
AVRTargetLowering::AVRTargetLowering(const AVRTargetMachine &TM,
|
|
|
|
const AVRSubtarget &STI)
|
|
|
|
: TargetLowering(TM), Subtarget(STI) {
|
2016-11-02 07:47:40 +01:00
|
|
|
// Set up the register classes.
|
|
|
|
addRegisterClass(MVT::i8, &AVR::GPR8RegClass);
|
|
|
|
addRegisterClass(MVT::i16, &AVR::DREGSRegClass);
|
|
|
|
|
|
|
|
// Compute derived properties from the register classes.
|
2019-01-18 07:10:41 +01:00
|
|
|
computeRegisterProperties(Subtarget.getRegisterInfo());
|
2016-11-02 07:47:40 +01:00
|
|
|
|
|
|
|
setBooleanContents(ZeroOrOneBooleanContent);
|
|
|
|
setBooleanVectorContents(ZeroOrOneBooleanContent);
|
|
|
|
setSchedulingPreference(Sched::RegPressure);
|
|
|
|
setStackPointerRegisterToSaveRestore(AVR::SP);
|
2017-12-09 07:45:36 +01:00
|
|
|
setSupportsUnalignedAtomics(true);
|
2016-11-02 07:47:40 +01:00
|
|
|
|
|
|
|
setOperationAction(ISD::GlobalAddress, MVT::i16, Custom);
|
|
|
|
setOperationAction(ISD::BlockAddress, MVT::i16, Custom);
|
|
|
|
|
[AVR] Implement stacksave/stackrestore by expanding (PR31342)
Summary:
Authored by Florian Zeitz.
This implements the missing stacksave/stackrestore intrinsics via expansion.
Output of `llc -O0 -march=avr ~/devel/llvm/test/CodeGen/Generic/stacksave-restore.ll` for sanity checking (comments mine):
```
.text
.file ".../llvm/test/CodeGen/Generic/stacksave-restore.ll"
.globl test
.p2align 1
.type test,@function
test: ; @test
; BB#0:
push r28
push r29
in r28, 61
in r29, 62
sbiw r28, 4
in r0, 63
cli
out 62, r29
out 63, r0
out 61, r28
in r18, 61
in r19, 62
mov r20, r22
mov r21, r23
in r30, 61
in r31, 62
lsl r22
rol r23
lsl r22
rol r23
in r26, 61
in r27, 62
sub r26, r22
sbc r27, r23
andi r26, 252
in r0, 63
cli
out 62, r27
out 63, r0
out 61, r26
in r0, 63
cli
out 62, r31
out 63, r0
out 61, r30
in r30, 61
in r31, 62
sub r30, r22
sbc r31, r23
andi r30, 252
in r0, 63
cli
out 62, r31
out 63, r0
out 61, r30
std Y+3, r24 ; 2-byte Folded Spill
std Y+4, r25 ; 2-byte Folded Spill
mov r24, r26
mov r25, r27
in r0, 63
cli
out 62, r19
out 63, r0
out 61, r18
std Y+1, r20 ; 2-byte Folded Spill
std Y+2, r21 ; 2-byte Folded Spill
adiw r28, 4
in r0, 63
cli
out 62, r29
out 63, r0
out 61, r28
pop r29
pop r28
ret
.Lfunc_end0:
.size test, .Lfunc_end0-test
```
Reviewers: dylanmckay
Reviewed By: dylanmckay
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29553
llvm-svn: 294146
2017-02-05 22:35:45 +01:00
|
|
|
setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
|
|
|
|
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
|
2016-11-02 07:47:40 +01:00
|
|
|
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i8, Expand);
|
|
|
|
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i16, Expand);
|
|
|
|
|
|
|
|
for (MVT VT : MVT::integer_valuetypes()) {
|
|
|
|
for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) {
|
|
|
|
setLoadExtAction(N, VT, MVT::i1, Promote);
|
|
|
|
setLoadExtAction(N, VT, MVT::i8, Expand);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
setTruncStoreAction(MVT::i16, MVT::i8, Expand);
|
|
|
|
|
2018-07-29 13:38:36 +02:00
|
|
|
for (MVT VT : MVT::integer_valuetypes()) {
|
|
|
|
setOperationAction(ISD::ADDC, VT, Legal);
|
|
|
|
setOperationAction(ISD::SUBC, VT, Legal);
|
|
|
|
setOperationAction(ISD::ADDE, VT, Legal);
|
|
|
|
setOperationAction(ISD::SUBE, VT, Legal);
|
|
|
|
}
|
|
|
|
|
2016-11-02 07:47:40 +01:00
|
|
|
// sub (x, imm) gets canonicalized to add (x, -imm), so for illegal types
|
|
|
|
// revert into a sub since we don't have an add with immediate instruction.
|
|
|
|
setOperationAction(ISD::ADD, MVT::i32, Custom);
|
|
|
|
setOperationAction(ISD::ADD, MVT::i64, Custom);
|
|
|
|
|
|
|
|
// our shift instructions are only able to shift 1 bit at a time, so handle
|
|
|
|
// this in a custom way.
|
|
|
|
setOperationAction(ISD::SRA, MVT::i8, Custom);
|
|
|
|
setOperationAction(ISD::SHL, MVT::i8, Custom);
|
|
|
|
setOperationAction(ISD::SRL, MVT::i8, Custom);
|
|
|
|
setOperationAction(ISD::SRA, MVT::i16, Custom);
|
|
|
|
setOperationAction(ISD::SHL, MVT::i16, Custom);
|
|
|
|
setOperationAction(ISD::SRL, MVT::i16, Custom);
|
|
|
|
setOperationAction(ISD::SHL_PARTS, MVT::i16, Expand);
|
|
|
|
setOperationAction(ISD::SRA_PARTS, MVT::i16, Expand);
|
|
|
|
setOperationAction(ISD::SRL_PARTS, MVT::i16, Expand);
|
|
|
|
|
2017-05-01 11:48:55 +02:00
|
|
|
setOperationAction(ISD::ROTL, MVT::i8, Custom);
|
2019-06-07 08:55:00 +02:00
|
|
|
setOperationAction(ISD::ROTL, MVT::i16, Expand);
|
2017-05-01 11:48:55 +02:00
|
|
|
setOperationAction(ISD::ROTR, MVT::i8, Custom);
|
2019-06-07 08:55:00 +02:00
|
|
|
setOperationAction(ISD::ROTR, MVT::i16, Expand);
|
2017-05-01 11:48:55 +02:00
|
|
|
|
2016-11-02 07:47:40 +01:00
|
|
|
setOperationAction(ISD::BR_CC, MVT::i8, Custom);
|
|
|
|
setOperationAction(ISD::BR_CC, MVT::i16, Custom);
|
|
|
|
setOperationAction(ISD::BR_CC, MVT::i32, Custom);
|
|
|
|
setOperationAction(ISD::BR_CC, MVT::i64, Custom);
|
|
|
|
setOperationAction(ISD::BRCOND, MVT::Other, Expand);
|
|
|
|
|
|
|
|
setOperationAction(ISD::SELECT_CC, MVT::i8, Custom);
|
|
|
|
setOperationAction(ISD::SELECT_CC, MVT::i16, Custom);
|
2016-12-07 13:34:47 +01:00
|
|
|
setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
|
|
|
|
setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
|
2016-11-02 07:47:40 +01:00
|
|
|
setOperationAction(ISD::SETCC, MVT::i8, Custom);
|
|
|
|
setOperationAction(ISD::SETCC, MVT::i16, Custom);
|
|
|
|
setOperationAction(ISD::SETCC, MVT::i32, Custom);
|
|
|
|
setOperationAction(ISD::SETCC, MVT::i64, Custom);
|
|
|
|
setOperationAction(ISD::SELECT, MVT::i8, Expand);
|
|
|
|
setOperationAction(ISD::SELECT, MVT::i16, Expand);
|
|
|
|
|
|
|
|
setOperationAction(ISD::BSWAP, MVT::i16, Expand);
|
|
|
|
|
|
|
|
// Add support for postincrement and predecrement load/stores.
|
|
|
|
setIndexedLoadAction(ISD::POST_INC, MVT::i8, Legal);
|
|
|
|
setIndexedLoadAction(ISD::POST_INC, MVT::i16, Legal);
|
|
|
|
setIndexedLoadAction(ISD::PRE_DEC, MVT::i8, Legal);
|
|
|
|
setIndexedLoadAction(ISD::PRE_DEC, MVT::i16, Legal);
|
|
|
|
setIndexedStoreAction(ISD::POST_INC, MVT::i8, Legal);
|
|
|
|
setIndexedStoreAction(ISD::POST_INC, MVT::i16, Legal);
|
|
|
|
setIndexedStoreAction(ISD::PRE_DEC, MVT::i8, Legal);
|
|
|
|
setIndexedStoreAction(ISD::PRE_DEC, MVT::i16, Legal);
|
|
|
|
|
|
|
|
setOperationAction(ISD::BR_JT, MVT::Other, Expand);
|
|
|
|
|
|
|
|
setOperationAction(ISD::VASTART, MVT::Other, Custom);
|
|
|
|
setOperationAction(ISD::VAEND, MVT::Other, Expand);
|
|
|
|
setOperationAction(ISD::VAARG, MVT::Other, Expand);
|
|
|
|
setOperationAction(ISD::VACOPY, MVT::Other, Expand);
|
|
|
|
|
|
|
|
// Atomic operations which must be lowered to rtlib calls
|
|
|
|
for (MVT VT : MVT::integer_valuetypes()) {
|
|
|
|
setOperationAction(ISD::ATOMIC_SWAP, VT, Expand);
|
|
|
|
setOperationAction(ISD::ATOMIC_CMP_SWAP, VT, Expand);
|
|
|
|
setOperationAction(ISD::ATOMIC_LOAD_NAND, VT, Expand);
|
|
|
|
setOperationAction(ISD::ATOMIC_LOAD_MAX, VT, Expand);
|
|
|
|
setOperationAction(ISD::ATOMIC_LOAD_MIN, VT, Expand);
|
|
|
|
setOperationAction(ISD::ATOMIC_LOAD_UMAX, VT, Expand);
|
|
|
|
setOperationAction(ISD::ATOMIC_LOAD_UMIN, VT, Expand);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Division/remainder
|
|
|
|
setOperationAction(ISD::UDIV, MVT::i8, Expand);
|
|
|
|
setOperationAction(ISD::UDIV, MVT::i16, Expand);
|
|
|
|
setOperationAction(ISD::UREM, MVT::i8, Expand);
|
|
|
|
setOperationAction(ISD::UREM, MVT::i16, Expand);
|
|
|
|
setOperationAction(ISD::SDIV, MVT::i8, Expand);
|
|
|
|
setOperationAction(ISD::SDIV, MVT::i16, Expand);
|
|
|
|
setOperationAction(ISD::SREM, MVT::i8, Expand);
|
|
|
|
setOperationAction(ISD::SREM, MVT::i16, Expand);
|
|
|
|
|
|
|
|
// Make division and modulus custom
|
2020-04-18 23:42:48 +02:00
|
|
|
setOperationAction(ISD::UDIVREM, MVT::i8, Custom);
|
|
|
|
setOperationAction(ISD::UDIVREM, MVT::i16, Custom);
|
|
|
|
setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
|
|
|
|
setOperationAction(ISD::SDIVREM, MVT::i8, Custom);
|
|
|
|
setOperationAction(ISD::SDIVREM, MVT::i16, Custom);
|
|
|
|
setOperationAction(ISD::SDIVREM, MVT::i32, Custom);
|
2016-11-02 07:47:40 +01:00
|
|
|
|
|
|
|
// Do not use MUL. The AVR instructions are closer to SMUL_LOHI &co.
|
|
|
|
setOperationAction(ISD::MUL, MVT::i8, Expand);
|
|
|
|
setOperationAction(ISD::MUL, MVT::i16, Expand);
|
|
|
|
|
|
|
|
// Expand 16 bit multiplications.
|
|
|
|
setOperationAction(ISD::SMUL_LOHI, MVT::i16, Expand);
|
|
|
|
setOperationAction(ISD::UMUL_LOHI, MVT::i16, Expand);
|
|
|
|
|
2019-01-18 07:10:41 +01:00
|
|
|
// Expand multiplications to libcalls when there is
|
|
|
|
// no hardware MUL.
|
|
|
|
if (!Subtarget.supportsMultiplication()) {
|
|
|
|
setOperationAction(ISD::SMUL_LOHI, MVT::i8, Expand);
|
|
|
|
setOperationAction(ISD::UMUL_LOHI, MVT::i8, Expand);
|
|
|
|
}
|
|
|
|
|
2016-11-02 07:47:40 +01:00
|
|
|
for (MVT VT : MVT::integer_valuetypes()) {
|
|
|
|
setOperationAction(ISD::MULHS, VT, Expand);
|
|
|
|
setOperationAction(ISD::MULHU, VT, Expand);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (MVT VT : MVT::integer_valuetypes()) {
|
|
|
|
setOperationAction(ISD::CTPOP, VT, Expand);
|
|
|
|
setOperationAction(ISD::CTLZ, VT, Expand);
|
|
|
|
setOperationAction(ISD::CTTZ, VT, Expand);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (MVT VT : MVT::integer_valuetypes()) {
|
|
|
|
setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
|
|
|
|
// TODO: The generated code is pretty poor. Investigate using the
|
|
|
|
// same "shift and subtract with carry" trick that we do for
|
|
|
|
// extending 8-bit to 16-bit. This may require infrastructure
|
|
|
|
// improvements in how we treat 16-bit "registers" to be feasible.
|
|
|
|
}
|
|
|
|
|
2020-04-18 23:42:48 +02:00
|
|
|
// Division rtlib functions (not supported), use divmod functions instead
|
2016-11-02 07:47:40 +01:00
|
|
|
setLibcallName(RTLIB::SDIV_I8, nullptr);
|
|
|
|
setLibcallName(RTLIB::SDIV_I16, nullptr);
|
|
|
|
setLibcallName(RTLIB::SDIV_I32, nullptr);
|
|
|
|
setLibcallName(RTLIB::UDIV_I8, nullptr);
|
|
|
|
setLibcallName(RTLIB::UDIV_I16, nullptr);
|
|
|
|
setLibcallName(RTLIB::UDIV_I32, nullptr);
|
|
|
|
|
2020-04-18 23:42:48 +02:00
|
|
|
// Modulus rtlib functions (not supported), use divmod functions instead
|
2016-11-02 07:47:40 +01:00
|
|
|
setLibcallName(RTLIB::SREM_I8, nullptr);
|
|
|
|
setLibcallName(RTLIB::SREM_I16, nullptr);
|
|
|
|
setLibcallName(RTLIB::SREM_I32, nullptr);
|
|
|
|
setLibcallName(RTLIB::UREM_I8, nullptr);
|
|
|
|
setLibcallName(RTLIB::UREM_I16, nullptr);
|
|
|
|
setLibcallName(RTLIB::UREM_I32, nullptr);
|
|
|
|
|
|
|
|
// Division and modulus rtlib functions
|
|
|
|
setLibcallName(RTLIB::SDIVREM_I8, "__divmodqi4");
|
|
|
|
setLibcallName(RTLIB::SDIVREM_I16, "__divmodhi4");
|
|
|
|
setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4");
|
|
|
|
setLibcallName(RTLIB::UDIVREM_I8, "__udivmodqi4");
|
|
|
|
setLibcallName(RTLIB::UDIVREM_I16, "__udivmodhi4");
|
|
|
|
setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4");
|
|
|
|
|
|
|
|
// Several of the runtime library functions use a special calling conv
|
|
|
|
setLibcallCallingConv(RTLIB::SDIVREM_I8, CallingConv::AVR_BUILTIN);
|
|
|
|
setLibcallCallingConv(RTLIB::SDIVREM_I16, CallingConv::AVR_BUILTIN);
|
|
|
|
setLibcallCallingConv(RTLIB::UDIVREM_I8, CallingConv::AVR_BUILTIN);
|
|
|
|
setLibcallCallingConv(RTLIB::UDIVREM_I16, CallingConv::AVR_BUILTIN);
|
|
|
|
|
|
|
|
// Trigonometric rtlib functions
|
|
|
|
setLibcallName(RTLIB::SIN_F32, "sin");
|
|
|
|
setLibcallName(RTLIB::COS_F32, "cos");
|
|
|
|
|
2019-09-27 14:54:21 +02:00
|
|
|
setMinFunctionAlignment(Align(2));
|
2019-06-19 18:12:12 +02:00
|
|
|
setMinimumJumpTableEntries(UINT_MAX);
|
2016-11-02 07:47:40 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
const char *AVRTargetLowering::getTargetNodeName(unsigned Opcode) const {
|
|
|
|
#define NODE(name) \
|
|
|
|
case AVRISD::name: \
|
|
|
|
return #name
|
|
|
|
|
|
|
|
switch (Opcode) {
|
|
|
|
default:
|
|
|
|
return nullptr;
|
|
|
|
NODE(RET_FLAG);
|
|
|
|
NODE(RETI_FLAG);
|
|
|
|
NODE(CALL);
|
|
|
|
NODE(WRAPPER);
|
|
|
|
NODE(LSL);
|
|
|
|
NODE(LSR);
|
|
|
|
NODE(ROL);
|
|
|
|
NODE(ROR);
|
|
|
|
NODE(ASR);
|
|
|
|
NODE(LSLLOOP);
|
|
|
|
NODE(LSRLOOP);
|
2020-03-02 11:46:22 +01:00
|
|
|
NODE(ROLLOOP);
|
|
|
|
NODE(RORLOOP);
|
2016-11-02 07:47:40 +01:00
|
|
|
NODE(ASRLOOP);
|
|
|
|
NODE(BRCOND);
|
|
|
|
NODE(CMP);
|
|
|
|
NODE(CMPC);
|
|
|
|
NODE(TST);
|
|
|
|
NODE(SELECT_CC);
|
|
|
|
#undef NODE
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
EVT AVRTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
|
|
|
|
EVT VT) const {
|
|
|
|
assert(!VT.isVector() && "No AVR SetCC type for vectors!");
|
|
|
|
return MVT::i8;
|
|
|
|
}
|
|
|
|
|
|
|
|
SDValue AVRTargetLowering::LowerShifts(SDValue Op, SelectionDAG &DAG) const {
|
|
|
|
//:TODO: this function has to be completely rewritten to produce optimal
|
|
|
|
// code, for now it's producing very long but correct code.
|
|
|
|
unsigned Opc8;
|
|
|
|
const SDNode *N = Op.getNode();
|
|
|
|
EVT VT = Op.getValueType();
|
|
|
|
SDLoc dl(N);
|
2020-03-17 22:20:57 +01:00
|
|
|
assert(isPowerOf2_32(VT.getSizeInBits()) &&
|
|
|
|
"Expected power-of-2 shift amount");
|
2016-11-02 07:47:40 +01:00
|
|
|
|
|
|
|
// Expand non-constant shifts to loops.
|
|
|
|
if (!isa<ConstantSDNode>(N->getOperand(1))) {
|
|
|
|
switch (Op.getOpcode()) {
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Invalid shift opcode!");
|
|
|
|
case ISD::SHL:
|
|
|
|
return DAG.getNode(AVRISD::LSLLOOP, dl, VT, N->getOperand(0),
|
|
|
|
N->getOperand(1));
|
|
|
|
case ISD::SRL:
|
|
|
|
return DAG.getNode(AVRISD::LSRLOOP, dl, VT, N->getOperand(0),
|
|
|
|
N->getOperand(1));
|
2020-03-17 22:20:57 +01:00
|
|
|
case ISD::ROTL: {
|
|
|
|
SDValue Amt = N->getOperand(1);
|
|
|
|
EVT AmtVT = Amt.getValueType();
|
|
|
|
Amt = DAG.getNode(ISD::AND, dl, AmtVT, Amt,
|
|
|
|
DAG.getConstant(VT.getSizeInBits() - 1, dl, AmtVT));
|
|
|
|
return DAG.getNode(AVRISD::ROLLOOP, dl, VT, N->getOperand(0), Amt);
|
|
|
|
}
|
|
|
|
case ISD::ROTR: {
|
|
|
|
SDValue Amt = N->getOperand(1);
|
|
|
|
EVT AmtVT = Amt.getValueType();
|
|
|
|
Amt = DAG.getNode(ISD::AND, dl, AmtVT, Amt,
|
|
|
|
DAG.getConstant(VT.getSizeInBits() - 1, dl, AmtVT));
|
|
|
|
return DAG.getNode(AVRISD::RORLOOP, dl, VT, N->getOperand(0), Amt);
|
|
|
|
}
|
2016-11-02 07:47:40 +01:00
|
|
|
case ISD::SRA:
|
|
|
|
return DAG.getNode(AVRISD::ASRLOOP, dl, VT, N->getOperand(0),
|
|
|
|
N->getOperand(1));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t ShiftAmount = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
|
|
|
|
SDValue Victim = N->getOperand(0);
|
|
|
|
|
|
|
|
switch (Op.getOpcode()) {
|
|
|
|
case ISD::SRA:
|
|
|
|
Opc8 = AVRISD::ASR;
|
|
|
|
break;
|
|
|
|
case ISD::ROTL:
|
|
|
|
Opc8 = AVRISD::ROL;
|
2020-03-17 22:20:57 +01:00
|
|
|
ShiftAmount = ShiftAmount % VT.getSizeInBits();
|
2016-11-02 07:47:40 +01:00
|
|
|
break;
|
|
|
|
case ISD::ROTR:
|
|
|
|
Opc8 = AVRISD::ROR;
|
2020-03-17 22:20:57 +01:00
|
|
|
ShiftAmount = ShiftAmount % VT.getSizeInBits();
|
2016-11-02 07:47:40 +01:00
|
|
|
break;
|
|
|
|
case ISD::SRL:
|
|
|
|
Opc8 = AVRISD::LSR;
|
|
|
|
break;
|
|
|
|
case ISD::SHL:
|
|
|
|
Opc8 = AVRISD::LSL;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Invalid shift opcode");
|
|
|
|
}
|
|
|
|
|
2021-01-28 08:10:11 +01:00
|
|
|
// Optimize int8/int16 shifts.
|
2021-01-23 16:54:16 +01:00
|
|
|
if (VT.getSizeInBits() == 8) {
|
|
|
|
if (Op.getOpcode() == ISD::SHL && 4 <= ShiftAmount && ShiftAmount < 7) {
|
|
|
|
// Optimize LSL when 4 <= ShiftAmount <= 6.
|
|
|
|
Victim = DAG.getNode(AVRISD::SWAP, dl, VT, Victim);
|
|
|
|
Victim =
|
|
|
|
DAG.getNode(ISD::AND, dl, VT, Victim, DAG.getConstant(0xf0, dl, VT));
|
|
|
|
ShiftAmount -= 4;
|
|
|
|
} else if (Op.getOpcode() == ISD::SRL && 4 <= ShiftAmount &&
|
|
|
|
ShiftAmount < 7) {
|
|
|
|
// Optimize LSR when 4 <= ShiftAmount <= 6.
|
|
|
|
Victim = DAG.getNode(AVRISD::SWAP, dl, VT, Victim);
|
|
|
|
Victim =
|
|
|
|
DAG.getNode(ISD::AND, dl, VT, Victim, DAG.getConstant(0x0f, dl, VT));
|
|
|
|
ShiftAmount -= 4;
|
2021-01-24 04:04:37 +01:00
|
|
|
} else if (Op.getOpcode() == ISD::SHL && ShiftAmount == 7) {
|
|
|
|
// Optimize LSL when ShiftAmount == 7.
|
|
|
|
Victim = DAG.getNode(AVRISD::LSL7, dl, VT, Victim);
|
|
|
|
ShiftAmount = 0;
|
|
|
|
} else if (Op.getOpcode() == ISD::SRL && ShiftAmount == 7) {
|
|
|
|
// Optimize LSR when ShiftAmount == 7.
|
|
|
|
Victim = DAG.getNode(AVRISD::LSR7, dl, VT, Victim);
|
|
|
|
ShiftAmount = 0;
|
|
|
|
} else if (Op.getOpcode() == ISD::SRA && ShiftAmount == 7) {
|
|
|
|
// Optimize ASR when ShiftAmount == 7.
|
|
|
|
Victim = DAG.getNode(AVRISD::ASR7, dl, VT, Victim);
|
|
|
|
ShiftAmount = 0;
|
2021-01-23 16:54:16 +01:00
|
|
|
}
|
2021-01-28 08:10:11 +01:00
|
|
|
} else if (VT.getSizeInBits() == 16) {
|
|
|
|
if (4 <= ShiftAmount && ShiftAmount < 8)
|
|
|
|
switch (Op.getOpcode()) {
|
|
|
|
case ISD::SHL:
|
|
|
|
Victim = DAG.getNode(AVRISD::LSL4, dl, VT, Victim);
|
|
|
|
ShiftAmount -= 4;
|
|
|
|
break;
|
|
|
|
case ISD::SRL:
|
|
|
|
Victim = DAG.getNode(AVRISD::LSR4, dl, VT, Victim);
|
|
|
|
ShiftAmount -= 4;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
else if (8 <= ShiftAmount && ShiftAmount < 12)
|
|
|
|
switch (Op.getOpcode()) {
|
|
|
|
case ISD::SHL:
|
|
|
|
Victim = DAG.getNode(AVRISD::LSL8, dl, VT, Victim);
|
|
|
|
ShiftAmount -= 8;
|
|
|
|
break;
|
|
|
|
case ISD::SRL:
|
|
|
|
Victim = DAG.getNode(AVRISD::LSR8, dl, VT, Victim);
|
|
|
|
ShiftAmount -= 8;
|
|
|
|
break;
|
|
|
|
case ISD::SRA:
|
|
|
|
Victim = DAG.getNode(AVRISD::ASR8, dl, VT, Victim);
|
|
|
|
ShiftAmount -= 8;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
else if (12 <= ShiftAmount)
|
|
|
|
switch (Op.getOpcode()) {
|
|
|
|
case ISD::SHL:
|
|
|
|
Victim = DAG.getNode(AVRISD::LSL12, dl, VT, Victim);
|
|
|
|
ShiftAmount -= 12;
|
|
|
|
break;
|
|
|
|
case ISD::SRL:
|
|
|
|
Victim = DAG.getNode(AVRISD::LSR12, dl, VT, Victim);
|
|
|
|
ShiftAmount -= 12;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2021-01-23 16:54:16 +01:00
|
|
|
}
|
|
|
|
|
2016-11-02 07:47:40 +01:00
|
|
|
while (ShiftAmount--) {
|
|
|
|
Victim = DAG.getNode(Opc8, dl, VT, Victim);
|
|
|
|
}
|
|
|
|
|
|
|
|
return Victim;
|
|
|
|
}
|
|
|
|
|
|
|
|
SDValue AVRTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const {
|
|
|
|
unsigned Opcode = Op->getOpcode();
|
|
|
|
assert((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) &&
|
|
|
|
"Invalid opcode for Div/Rem lowering");
|
2017-03-24 02:57:29 +01:00
|
|
|
bool IsSigned = (Opcode == ISD::SDIVREM);
|
2016-11-02 07:47:40 +01:00
|
|
|
EVT VT = Op->getValueType(0);
|
|
|
|
Type *Ty = VT.getTypeForEVT(*DAG.getContext());
|
|
|
|
|
|
|
|
RTLIB::Libcall LC;
|
|
|
|
switch (VT.getSimpleVT().SimpleTy) {
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unexpected request for libcall!");
|
|
|
|
case MVT::i8:
|
2017-03-24 02:57:29 +01:00
|
|
|
LC = IsSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8;
|
2016-11-02 07:47:40 +01:00
|
|
|
break;
|
|
|
|
case MVT::i16:
|
2017-03-24 02:57:29 +01:00
|
|
|
LC = IsSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16;
|
2016-11-02 07:47:40 +01:00
|
|
|
break;
|
|
|
|
case MVT::i32:
|
2017-03-24 02:57:29 +01:00
|
|
|
LC = IsSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32;
|
2016-11-02 07:47:40 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
SDValue InChain = DAG.getEntryNode();
|
|
|
|
|
|
|
|
TargetLowering::ArgListTy Args;
|
|
|
|
TargetLowering::ArgListEntry Entry;
|
|
|
|
for (SDValue const &Value : Op->op_values()) {
|
|
|
|
Entry.Node = Value;
|
|
|
|
Entry.Ty = Value.getValueType().getTypeForEVT(*DAG.getContext());
|
2017-03-24 02:57:29 +01:00
|
|
|
Entry.IsSExt = IsSigned;
|
|
|
|
Entry.IsZExt = !IsSigned;
|
2016-11-02 07:47:40 +01:00
|
|
|
Args.push_back(Entry);
|
|
|
|
}
|
|
|
|
|
|
|
|
SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
|
|
|
|
getPointerTy(DAG.getDataLayout()));
|
|
|
|
|
2017-05-12 11:08:03 +02:00
|
|
|
Type *RetTy = (Type *)StructType::get(Ty, Ty);
|
2016-11-02 07:47:40 +01:00
|
|
|
|
|
|
|
SDLoc dl(Op);
|
|
|
|
TargetLowering::CallLoweringInfo CLI(DAG);
|
|
|
|
CLI.setDebugLoc(dl)
|
|
|
|
.setChain(InChain)
|
2017-03-18 01:44:07 +01:00
|
|
|
.setLibCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args))
|
2016-11-02 07:47:40 +01:00
|
|
|
.setInRegister()
|
2017-03-24 02:57:29 +01:00
|
|
|
.setSExtResult(IsSigned)
|
|
|
|
.setZExtResult(!IsSigned);
|
2016-11-02 07:47:40 +01:00
|
|
|
|
|
|
|
std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
|
|
|
|
return CallInfo.first;
|
|
|
|
}
|
|
|
|
|
|
|
|
SDValue AVRTargetLowering::LowerGlobalAddress(SDValue Op,
|
|
|
|
SelectionDAG &DAG) const {
|
|
|
|
auto DL = DAG.getDataLayout();
|
|
|
|
|
|
|
|
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
|
|
|
|
int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
|
|
|
|
|
|
|
|
// Create the TargetGlobalAddress node, folding in the constant offset.
|
|
|
|
SDValue Result =
|
|
|
|
DAG.getTargetGlobalAddress(GV, SDLoc(Op), getPointerTy(DL), Offset);
|
|
|
|
return DAG.getNode(AVRISD::WRAPPER, SDLoc(Op), getPointerTy(DL), Result);
|
|
|
|
}
|
|
|
|
|
|
|
|
SDValue AVRTargetLowering::LowerBlockAddress(SDValue Op,
|
|
|
|
SelectionDAG &DAG) const {
|
|
|
|
auto DL = DAG.getDataLayout();
|
|
|
|
const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
|
|
|
|
|
|
|
|
SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy(DL));
|
|
|
|
|
|
|
|
return DAG.getNode(AVRISD::WRAPPER, SDLoc(Op), getPointerTy(DL), Result);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// IntCCToAVRCC - Convert a DAG integer condition code to an AVR CC.
|
|
|
|
static AVRCC::CondCodes intCCToAVRCC(ISD::CondCode CC) {
|
|
|
|
switch (CC) {
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unknown condition code!");
|
|
|
|
case ISD::SETEQ:
|
|
|
|
return AVRCC::COND_EQ;
|
|
|
|
case ISD::SETNE:
|
|
|
|
return AVRCC::COND_NE;
|
|
|
|
case ISD::SETGE:
|
|
|
|
return AVRCC::COND_GE;
|
|
|
|
case ISD::SETLT:
|
|
|
|
return AVRCC::COND_LT;
|
|
|
|
case ISD::SETUGE:
|
|
|
|
return AVRCC::COND_SH;
|
|
|
|
case ISD::SETULT:
|
|
|
|
return AVRCC::COND_LO;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-23 17:38:57 +01:00
|
|
|
/// Returns appropriate CP/CPI/CPC nodes code for the given 8/16-bit operands.
|
|
|
|
SDValue AVRTargetLowering::getAVRCmp(SDValue LHS, SDValue RHS,
|
|
|
|
SelectionDAG &DAG, SDLoc DL) const {
|
|
|
|
assert((LHS.getSimpleValueType() == RHS.getSimpleValueType()) &&
|
|
|
|
"LHS and RHS have different types");
|
|
|
|
assert(((LHS.getSimpleValueType() == MVT::i16) ||
|
|
|
|
(LHS.getSimpleValueType() == MVT::i8)) && "invalid comparison type");
|
|
|
|
|
|
|
|
SDValue Cmp;
|
|
|
|
|
2021-01-30 08:23:37 +01:00
|
|
|
if (LHS.getSimpleValueType() == MVT::i16 && isa<ConstantSDNode>(RHS)) {
|
2021-01-23 17:38:57 +01:00
|
|
|
// Generate a CPI/CPC pair if RHS is a 16-bit constant.
|
|
|
|
SDValue LHSlo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, LHS,
|
|
|
|
DAG.getIntPtrConstant(0, DL));
|
|
|
|
SDValue LHShi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, LHS,
|
|
|
|
DAG.getIntPtrConstant(1, DL));
|
|
|
|
SDValue RHSlo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, RHS,
|
|
|
|
DAG.getIntPtrConstant(0, DL));
|
|
|
|
SDValue RHShi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, RHS,
|
|
|
|
DAG.getIntPtrConstant(1, DL));
|
|
|
|
Cmp = DAG.getNode(AVRISD::CMP, DL, MVT::Glue, LHSlo, RHSlo);
|
|
|
|
Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHShi, RHShi, Cmp);
|
|
|
|
} else {
|
|
|
|
// Generate ordinary 16-bit comparison.
|
|
|
|
Cmp = DAG.getNode(AVRISD::CMP, DL, MVT::Glue, LHS, RHS);
|
|
|
|
}
|
|
|
|
|
|
|
|
return Cmp;
|
|
|
|
}
|
|
|
|
|
2016-11-02 07:47:40 +01:00
|
|
|
/// Returns appropriate AVR CMP/CMPC nodes and corresponding condition code for
|
|
|
|
/// the given operands.
|
|
|
|
SDValue AVRTargetLowering::getAVRCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
|
|
|
|
SDValue &AVRcc, SelectionDAG &DAG,
|
|
|
|
SDLoc DL) const {
|
|
|
|
SDValue Cmp;
|
|
|
|
EVT VT = LHS.getValueType();
|
|
|
|
bool UseTest = false;
|
|
|
|
|
|
|
|
switch (CC) {
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
case ISD::SETLE: {
|
|
|
|
// Swap operands and reverse the branching condition.
|
|
|
|
std::swap(LHS, RHS);
|
|
|
|
CC = ISD::SETGE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case ISD::SETGT: {
|
|
|
|
if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
|
|
|
|
switch (C->getSExtValue()) {
|
|
|
|
case -1: {
|
|
|
|
// When doing lhs > -1 use a tst instruction on the top part of lhs
|
|
|
|
// and use brpl instead of using a chain of cp/cpc.
|
|
|
|
UseTest = true;
|
|
|
|
AVRcc = DAG.getConstant(AVRCC::COND_PL, DL, MVT::i8);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case 0: {
|
|
|
|
// Turn lhs > 0 into 0 < lhs since 0 can be materialized with
|
|
|
|
// __zero_reg__ in lhs.
|
|
|
|
RHS = LHS;
|
|
|
|
LHS = DAG.getConstant(0, DL, VT);
|
|
|
|
CC = ISD::SETLT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default: {
|
|
|
|
// Turn lhs < rhs with lhs constant into rhs >= lhs+1, this allows
|
|
|
|
// us to fold the constant into the cmp instruction.
|
|
|
|
RHS = DAG.getConstant(C->getSExtValue() + 1, DL, VT);
|
|
|
|
CC = ISD::SETGE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
// Swap operands and reverse the branching condition.
|
|
|
|
std::swap(LHS, RHS);
|
|
|
|
CC = ISD::SETLT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case ISD::SETLT: {
|
|
|
|
if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
|
|
|
|
switch (C->getSExtValue()) {
|
|
|
|
case 1: {
|
|
|
|
// Turn lhs < 1 into 0 >= lhs since 0 can be materialized with
|
|
|
|
// __zero_reg__ in lhs.
|
|
|
|
RHS = LHS;
|
|
|
|
LHS = DAG.getConstant(0, DL, VT);
|
|
|
|
CC = ISD::SETGE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case 0: {
|
|
|
|
// When doing lhs < 0 use a tst instruction on the top part of lhs
|
|
|
|
// and use brmi instead of using a chain of cp/cpc.
|
|
|
|
UseTest = true;
|
|
|
|
AVRcc = DAG.getConstant(AVRCC::COND_MI, DL, MVT::i8);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case ISD::SETULE: {
|
|
|
|
// Swap operands and reverse the branching condition.
|
|
|
|
std::swap(LHS, RHS);
|
|
|
|
CC = ISD::SETUGE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case ISD::SETUGT: {
|
|
|
|
// Turn lhs < rhs with lhs constant into rhs >= lhs+1, this allows us to
|
|
|
|
// fold the constant into the cmp instruction.
|
|
|
|
if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
|
|
|
|
RHS = DAG.getConstant(C->getSExtValue() + 1, DL, VT);
|
|
|
|
CC = ISD::SETUGE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
// Swap operands and reverse the branching condition.
|
|
|
|
std::swap(LHS, RHS);
|
|
|
|
CC = ISD::SETULT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Expand 32 and 64 bit comparisons with custom CMP and CMPC nodes instead of
|
|
|
|
// using the default and/or/xor expansion code which is much longer.
|
|
|
|
if (VT == MVT::i32) {
|
|
|
|
SDValue LHSlo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS,
|
|
|
|
DAG.getIntPtrConstant(0, DL));
|
|
|
|
SDValue LHShi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS,
|
|
|
|
DAG.getIntPtrConstant(1, DL));
|
|
|
|
SDValue RHSlo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS,
|
|
|
|
DAG.getIntPtrConstant(0, DL));
|
|
|
|
SDValue RHShi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS,
|
|
|
|
DAG.getIntPtrConstant(1, DL));
|
|
|
|
|
|
|
|
if (UseTest) {
|
|
|
|
// When using tst we only care about the highest part.
|
|
|
|
SDValue Top = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, LHShi,
|
|
|
|
DAG.getIntPtrConstant(1, DL));
|
|
|
|
Cmp = DAG.getNode(AVRISD::TST, DL, MVT::Glue, Top);
|
|
|
|
} else {
|
2021-01-23 17:38:57 +01:00
|
|
|
Cmp = getAVRCmp(LHSlo, RHSlo, DAG, DL);
|
2016-11-02 07:47:40 +01:00
|
|
|
Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHShi, RHShi, Cmp);
|
|
|
|
}
|
|
|
|
} else if (VT == MVT::i64) {
|
|
|
|
SDValue LHS_0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, LHS,
|
|
|
|
DAG.getIntPtrConstant(0, DL));
|
|
|
|
SDValue LHS_1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, LHS,
|
|
|
|
DAG.getIntPtrConstant(1, DL));
|
|
|
|
|
|
|
|
SDValue LHS0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS_0,
|
|
|
|
DAG.getIntPtrConstant(0, DL));
|
|
|
|
SDValue LHS1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS_0,
|
|
|
|
DAG.getIntPtrConstant(1, DL));
|
|
|
|
SDValue LHS2 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS_1,
|
|
|
|
DAG.getIntPtrConstant(0, DL));
|
|
|
|
SDValue LHS3 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS_1,
|
|
|
|
DAG.getIntPtrConstant(1, DL));
|
|
|
|
|
|
|
|
SDValue RHS_0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, RHS,
|
|
|
|
DAG.getIntPtrConstant(0, DL));
|
|
|
|
SDValue RHS_1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, RHS,
|
|
|
|
DAG.getIntPtrConstant(1, DL));
|
|
|
|
|
|
|
|
SDValue RHS0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS_0,
|
|
|
|
DAG.getIntPtrConstant(0, DL));
|
|
|
|
SDValue RHS1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS_0,
|
|
|
|
DAG.getIntPtrConstant(1, DL));
|
|
|
|
SDValue RHS2 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS_1,
|
|
|
|
DAG.getIntPtrConstant(0, DL));
|
|
|
|
SDValue RHS3 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS_1,
|
|
|
|
DAG.getIntPtrConstant(1, DL));
|
|
|
|
|
|
|
|
if (UseTest) {
|
|
|
|
// When using tst we only care about the highest part.
|
|
|
|
SDValue Top = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, LHS3,
|
|
|
|
DAG.getIntPtrConstant(1, DL));
|
|
|
|
Cmp = DAG.getNode(AVRISD::TST, DL, MVT::Glue, Top);
|
|
|
|
} else {
|
2021-01-23 17:38:57 +01:00
|
|
|
Cmp = getAVRCmp(LHS0, RHS0, DAG, DL);
|
2016-11-02 07:47:40 +01:00
|
|
|
Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHS1, RHS1, Cmp);
|
|
|
|
Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHS2, RHS2, Cmp);
|
|
|
|
Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHS3, RHS3, Cmp);
|
|
|
|
}
|
|
|
|
} else if (VT == MVT::i8 || VT == MVT::i16) {
|
|
|
|
if (UseTest) {
|
|
|
|
// When using tst we only care about the highest part.
|
|
|
|
Cmp = DAG.getNode(AVRISD::TST, DL, MVT::Glue,
|
|
|
|
(VT == MVT::i8)
|
|
|
|
? LHS
|
|
|
|
: DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8,
|
|
|
|
LHS, DAG.getIntPtrConstant(1, DL)));
|
|
|
|
} else {
|
2021-01-23 17:38:57 +01:00
|
|
|
Cmp = getAVRCmp(LHS, RHS, DAG, DL);
|
2016-11-02 07:47:40 +01:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
llvm_unreachable("Invalid comparison size");
|
|
|
|
}
|
|
|
|
|
|
|
|
// When using a test instruction AVRcc is already set.
|
|
|
|
if (!UseTest) {
|
|
|
|
AVRcc = DAG.getConstant(intCCToAVRCC(CC), DL, MVT::i8);
|
|
|
|
}
|
|
|
|
|
|
|
|
return Cmp;
|
|
|
|
}
|
|
|
|
|
|
|
|
SDValue AVRTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
|
|
|
|
SDValue Chain = Op.getOperand(0);
|
|
|
|
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
|
|
|
|
SDValue LHS = Op.getOperand(2);
|
|
|
|
SDValue RHS = Op.getOperand(3);
|
|
|
|
SDValue Dest = Op.getOperand(4);
|
|
|
|
SDLoc dl(Op);
|
|
|
|
|
|
|
|
SDValue TargetCC;
|
|
|
|
SDValue Cmp = getAVRCmp(LHS, RHS, CC, TargetCC, DAG, dl);
|
|
|
|
|
|
|
|
return DAG.getNode(AVRISD::BRCOND, dl, MVT::Other, Chain, Dest, TargetCC,
|
|
|
|
Cmp);
|
|
|
|
}
|
|
|
|
|
|
|
|
SDValue AVRTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
|
|
|
|
SDValue LHS = Op.getOperand(0);
|
|
|
|
SDValue RHS = Op.getOperand(1);
|
|
|
|
SDValue TrueV = Op.getOperand(2);
|
|
|
|
SDValue FalseV = Op.getOperand(3);
|
|
|
|
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
|
|
|
|
SDLoc dl(Op);
|
|
|
|
|
|
|
|
SDValue TargetCC;
|
|
|
|
SDValue Cmp = getAVRCmp(LHS, RHS, CC, TargetCC, DAG, dl);
|
|
|
|
|
|
|
|
SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
|
|
|
|
SDValue Ops[] = {TrueV, FalseV, TargetCC, Cmp};
|
|
|
|
|
|
|
|
return DAG.getNode(AVRISD::SELECT_CC, dl, VTs, Ops);
|
|
|
|
}
|
|
|
|
|
|
|
|
SDValue AVRTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
|
|
|
|
SDValue LHS = Op.getOperand(0);
|
|
|
|
SDValue RHS = Op.getOperand(1);
|
|
|
|
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
|
|
|
|
SDLoc DL(Op);
|
|
|
|
|
|
|
|
SDValue TargetCC;
|
|
|
|
SDValue Cmp = getAVRCmp(LHS, RHS, CC, TargetCC, DAG, DL);
|
|
|
|
|
|
|
|
SDValue TrueV = DAG.getConstant(1, DL, Op.getValueType());
|
|
|
|
SDValue FalseV = DAG.getConstant(0, DL, Op.getValueType());
|
|
|
|
SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
|
|
|
|
SDValue Ops[] = {TrueV, FalseV, TargetCC, Cmp};
|
|
|
|
|
|
|
|
return DAG.getNode(AVRISD::SELECT_CC, DL, VTs, Ops);
|
|
|
|
}
|
|
|
|
|
|
|
|
SDValue AVRTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
|
|
|
|
const MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
const AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
|
|
|
|
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
|
|
|
|
auto DL = DAG.getDataLayout();
|
|
|
|
SDLoc dl(Op);
|
|
|
|
|
|
|
|
// Vastart just stores the address of the VarArgsFrameIndex slot into the
|
|
|
|
// memory location argument.
|
|
|
|
SDValue FI = DAG.getFrameIndex(AFI->getVarArgsFrameIndex(), getPointerTy(DL));
|
|
|
|
|
|
|
|
return DAG.getStore(Op.getOperand(0), dl, FI, Op.getOperand(1),
|
2020-09-14 22:54:50 +02:00
|
|
|
MachinePointerInfo(SV));
|
2016-11-02 07:47:40 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
SDValue AVRTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
|
|
|
|
switch (Op.getOpcode()) {
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Don't know how to custom lower this!");
|
|
|
|
case ISD::SHL:
|
|
|
|
case ISD::SRA:
|
|
|
|
case ISD::SRL:
|
|
|
|
case ISD::ROTL:
|
|
|
|
case ISD::ROTR:
|
|
|
|
return LowerShifts(Op, DAG);
|
|
|
|
case ISD::GlobalAddress:
|
|
|
|
return LowerGlobalAddress(Op, DAG);
|
|
|
|
case ISD::BlockAddress:
|
|
|
|
return LowerBlockAddress(Op, DAG);
|
|
|
|
case ISD::BR_CC:
|
|
|
|
return LowerBR_CC(Op, DAG);
|
|
|
|
case ISD::SELECT_CC:
|
|
|
|
return LowerSELECT_CC(Op, DAG);
|
|
|
|
case ISD::SETCC:
|
|
|
|
return LowerSETCC(Op, DAG);
|
|
|
|
case ISD::VASTART:
|
|
|
|
return LowerVASTART(Op, DAG);
|
|
|
|
case ISD::SDIVREM:
|
|
|
|
case ISD::UDIVREM:
|
|
|
|
return LowerDivRem(Op, DAG);
|
|
|
|
}
|
|
|
|
|
|
|
|
return SDValue();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Replace a node with an illegal result type
|
|
|
|
/// with a new node built out of custom code.
|
|
|
|
void AVRTargetLowering::ReplaceNodeResults(SDNode *N,
|
|
|
|
SmallVectorImpl<SDValue> &Results,
|
|
|
|
SelectionDAG &DAG) const {
|
|
|
|
SDLoc DL(N);
|
|
|
|
|
|
|
|
switch (N->getOpcode()) {
|
|
|
|
case ISD::ADD: {
|
|
|
|
// Convert add (x, imm) into sub (x, -imm).
|
|
|
|
if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
|
|
|
|
SDValue Sub = DAG.getNode(
|
|
|
|
ISD::SUB, DL, N->getValueType(0), N->getOperand(0),
|
|
|
|
DAG.getConstant(-C->getAPIntValue(), DL, C->getValueType(0)));
|
|
|
|
Results.push_back(Sub);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default: {
|
|
|
|
SDValue Res = LowerOperation(SDValue(N, 0), DAG);
|
|
|
|
|
|
|
|
for (unsigned I = 0, E = Res->getNumValues(); I != E; ++I)
|
|
|
|
Results.push_back(Res.getValue(I));
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Return true if the addressing mode represented
|
|
|
|
/// by AM is legal for this target, for a load/store of the specified type.
|
|
|
|
bool AVRTargetLowering::isLegalAddressingMode(const DataLayout &DL,
|
|
|
|
const AddrMode &AM, Type *Ty,
|
2017-07-21 13:59:37 +02:00
|
|
|
unsigned AS, Instruction *I) const {
|
2016-11-02 07:47:40 +01:00
|
|
|
int64_t Offs = AM.BaseOffs;
|
|
|
|
|
|
|
|
// Allow absolute addresses.
|
|
|
|
if (AM.BaseGV && !AM.HasBaseReg && AM.Scale == 0 && Offs == 0) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Flash memory instructions only allow zero offsets.
|
|
|
|
if (isa<PointerType>(Ty) && AS == AVR::ProgramMemory) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Allow reg+<6bit> offset.
|
|
|
|
if (Offs < 0)
|
|
|
|
Offs = -Offs;
|
|
|
|
if (AM.BaseGV == 0 && AM.HasBaseReg && AM.Scale == 0 && isUInt<6>(Offs)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns true by value, base pointer and
|
|
|
|
/// offset pointer and addressing mode by reference if the node's address
|
|
|
|
/// can be legally represented as pre-indexed load / store address.
|
|
|
|
bool AVRTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
|
|
|
|
SDValue &Offset,
|
|
|
|
ISD::MemIndexedMode &AM,
|
|
|
|
SelectionDAG &DAG) const {
|
|
|
|
EVT VT;
|
|
|
|
const SDNode *Op;
|
|
|
|
SDLoc DL(N);
|
|
|
|
|
|
|
|
if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
|
|
|
|
VT = LD->getMemoryVT();
|
|
|
|
Op = LD->getBasePtr().getNode();
|
|
|
|
if (LD->getExtensionType() != ISD::NON_EXTLOAD)
|
|
|
|
return false;
|
|
|
|
if (AVR::isProgramMemoryAccess(LD)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
} else if (const StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
|
|
|
|
VT = ST->getMemoryVT();
|
|
|
|
Op = ST->getBasePtr().getNode();
|
|
|
|
if (AVR::isProgramMemoryAccess(ST)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (VT != MVT::i8 && VT != MVT::i16) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1))) {
|
|
|
|
int RHSC = RHS->getSExtValue();
|
|
|
|
if (Op->getOpcode() == ISD::SUB)
|
|
|
|
RHSC = -RHSC;
|
|
|
|
|
|
|
|
if ((VT == MVT::i16 && RHSC != -2) || (VT == MVT::i8 && RHSC != -1)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
Base = Op->getOperand(0);
|
|
|
|
Offset = DAG.getConstant(RHSC, DL, MVT::i8);
|
|
|
|
AM = ISD::PRE_DEC;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns true by value, base pointer and
|
|
|
|
/// offset pointer and addressing mode by reference if this node can be
|
|
|
|
/// combined with a load / store to form a post-indexed load / store.
|
|
|
|
bool AVRTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
|
|
|
|
SDValue &Base,
|
|
|
|
SDValue &Offset,
|
|
|
|
ISD::MemIndexedMode &AM,
|
|
|
|
SelectionDAG &DAG) const {
|
|
|
|
EVT VT;
|
|
|
|
SDLoc DL(N);
|
|
|
|
|
|
|
|
if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
|
|
|
|
VT = LD->getMemoryVT();
|
|
|
|
if (LD->getExtensionType() != ISD::NON_EXTLOAD)
|
|
|
|
return false;
|
|
|
|
} else if (const StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
|
|
|
|
VT = ST->getMemoryVT();
|
|
|
|
if (AVR::isProgramMemoryAccess(ST)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (VT != MVT::i8 && VT != MVT::i16) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1))) {
|
|
|
|
int RHSC = RHS->getSExtValue();
|
|
|
|
if (Op->getOpcode() == ISD::SUB)
|
|
|
|
RHSC = -RHSC;
|
|
|
|
if ((VT == MVT::i16 && RHSC != 2) || (VT == MVT::i8 && RHSC != 1)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
Base = Op->getOperand(0);
|
|
|
|
Offset = DAG.getConstant(RHSC, DL, MVT::i8);
|
|
|
|
AM = ISD::POST_INC;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool AVRTargetLowering::isOffsetFoldingLegal(
|
|
|
|
const GlobalAddressSDNode *GA) const {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Formal Arguments Calling Convention Implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "AVRGenCallingConv.inc"
|
|
|
|
|
[AVR] Rewrite the function calling convention.
Summary:
The previous version relied on the standard calling convention using
std::reverse() to try to force the AVR ABI. But this only works for
simple cases, it fails for example with aggregate types.
This patch rewrites the calling convention with custom C++ code, that
implements the ABI defined in https://gcc.gnu.org/wiki/avr-gcc.
To do that it adds a few 16-bit pseudo registers for unaligned argument
passing, such as R24R23. For example this function:
define void @fun({ i8, i16 } %a)
will pass %a.0 in R22 and %a.1 in R24R23.
There are no instructions that can use these pseudo registers, so a new
register class, DREGSMOVW, is defined to make them apart.
Also the ArgCC_AVR_BUILTIN_DIV is no longer necessary, as it is
identical to the C++ behavior (actually the clobber list is more strict
for __div* functions, but that is currently unimplemented).
Reviewers: dylanmckay
Subscribers: Gaelan, Sh4rK, indirect, jwagen, efriedma, dsprenkels, hiraditya, Jim, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68524
Patch by Rodrigo Rivas Costa.
2020-06-19 13:26:00 +02:00
|
|
|
/// Registers for calling conventions, ordered in reverse as required by ABI.
|
|
|
|
/// Both arrays must be of the same length.
|
|
|
|
static const MCPhysReg RegList8[] = {
|
|
|
|
AVR::R25, AVR::R24, AVR::R23, AVR::R22, AVR::R21, AVR::R20,
|
|
|
|
AVR::R19, AVR::R18, AVR::R17, AVR::R16, AVR::R15, AVR::R14,
|
|
|
|
AVR::R13, AVR::R12, AVR::R11, AVR::R10, AVR::R9, AVR::R8};
|
|
|
|
static const MCPhysReg RegList16[] = {
|
|
|
|
AVR::R26R25, AVR::R25R24, AVR::R24R23, AVR::R23R22,
|
|
|
|
AVR::R22R21, AVR::R21R20, AVR::R20R19, AVR::R19R18,
|
|
|
|
AVR::R18R17, AVR::R17R16, AVR::R16R15, AVR::R15R14,
|
|
|
|
AVR::R14R13, AVR::R13R12, AVR::R12R11, AVR::R11R10,
|
|
|
|
AVR::R10R9, AVR::R9R8};
|
|
|
|
|
|
|
|
static_assert(array_lengthof(RegList8) == array_lengthof(RegList16),
|
|
|
|
"8-bit and 16-bit register arrays must be of equal length");
|
2018-02-19 09:28:38 +01:00
|
|
|
|
[AVR] Rewrite the function calling convention.
Summary:
The previous version relied on the standard calling convention using
std::reverse() to try to force the AVR ABI. But this only works for
simple cases, it fails for example with aggregate types.
This patch rewrites the calling convention with custom C++ code, that
implements the ABI defined in https://gcc.gnu.org/wiki/avr-gcc.
To do that it adds a few 16-bit pseudo registers for unaligned argument
passing, such as R24R23. For example this function:
define void @fun({ i8, i16 } %a)
will pass %a.0 in R22 and %a.1 in R24R23.
There are no instructions that can use these pseudo registers, so a new
register class, DREGSMOVW, is defined to make them apart.
Also the ArgCC_AVR_BUILTIN_DIV is no longer necessary, as it is
identical to the C++ behavior (actually the clobber list is more strict
for __div* functions, but that is currently unimplemented).
Reviewers: dylanmckay
Subscribers: Gaelan, Sh4rK, indirect, jwagen, efriedma, dsprenkels, hiraditya, Jim, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68524
Patch by Rodrigo Rivas Costa.
2020-06-19 13:26:00 +02:00
|
|
|
/// Analyze incoming and outgoing function arguments. We need custom C++ code
|
|
|
|
/// to handle special constraints in the ABI.
|
|
|
|
/// In addition, all pieces of a certain argument have to be passed either
|
|
|
|
/// using registers or the stack but never mixing both.
|
|
|
|
template <typename ArgT>
|
|
|
|
static void
|
|
|
|
analyzeArguments(TargetLowering::CallLoweringInfo *CLI, const Function *F,
|
|
|
|
const DataLayout *TD, const SmallVectorImpl<ArgT> &Args,
|
|
|
|
SmallVectorImpl<CCValAssign> &ArgLocs, CCState &CCInfo) {
|
|
|
|
unsigned NumArgs = Args.size();
|
|
|
|
// This is the index of the last used register, in RegList*.
|
|
|
|
// -1 means R26 (R26 is never actually used in CC).
|
|
|
|
int RegLastIdx = -1;
|
|
|
|
// Once a value is passed to the stack it will always be used
|
|
|
|
bool UseStack = false;
|
|
|
|
for (unsigned i = 0; i != NumArgs;) {
|
|
|
|
MVT VT = Args[i].VT;
|
|
|
|
// We have to count the number of bytes for each function argument, that is
|
|
|
|
// those Args with the same OrigArgIndex. This is important in case the
|
|
|
|
// function takes an aggregate type.
|
|
|
|
// Current argument will be between [i..j).
|
|
|
|
unsigned ArgIndex = Args[i].OrigArgIndex;
|
|
|
|
unsigned TotalBytes = VT.getStoreSize();
|
|
|
|
unsigned j = i + 1;
|
|
|
|
for (; j != NumArgs; ++j) {
|
|
|
|
if (Args[j].OrigArgIndex != ArgIndex)
|
|
|
|
break;
|
|
|
|
TotalBytes += Args[j].VT.getStoreSize();
|
|
|
|
}
|
|
|
|
// Round up to even number of bytes.
|
|
|
|
TotalBytes = alignTo(TotalBytes, 2);
|
|
|
|
// Skip zero sized arguments
|
|
|
|
if (TotalBytes == 0)
|
|
|
|
continue;
|
|
|
|
// The index of the first register to be used
|
|
|
|
unsigned RegIdx = RegLastIdx + TotalBytes;
|
|
|
|
RegLastIdx = RegIdx;
|
|
|
|
// If there are not enough registers, use the stack
|
|
|
|
if (RegIdx >= array_lengthof(RegList8)) {
|
|
|
|
UseStack = true;
|
|
|
|
}
|
|
|
|
for (; i != j; ++i) {
|
|
|
|
MVT VT = Args[i].VT;
|
2016-11-02 07:47:40 +01:00
|
|
|
|
[AVR] Rewrite the function calling convention.
Summary:
The previous version relied on the standard calling convention using
std::reverse() to try to force the AVR ABI. But this only works for
simple cases, it fails for example with aggregate types.
This patch rewrites the calling convention with custom C++ code, that
implements the ABI defined in https://gcc.gnu.org/wiki/avr-gcc.
To do that it adds a few 16-bit pseudo registers for unaligned argument
passing, such as R24R23. For example this function:
define void @fun({ i8, i16 } %a)
will pass %a.0 in R22 and %a.1 in R24R23.
There are no instructions that can use these pseudo registers, so a new
register class, DREGSMOVW, is defined to make them apart.
Also the ArgCC_AVR_BUILTIN_DIV is no longer necessary, as it is
identical to the C++ behavior (actually the clobber list is more strict
for __div* functions, but that is currently unimplemented).
Reviewers: dylanmckay
Subscribers: Gaelan, Sh4rK, indirect, jwagen, efriedma, dsprenkels, hiraditya, Jim, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68524
Patch by Rodrigo Rivas Costa.
2020-06-19 13:26:00 +02:00
|
|
|
if (UseStack) {
|
|
|
|
auto evt = EVT(VT).getTypeForEVT(CCInfo.getContext());
|
|
|
|
unsigned Offset = CCInfo.AllocateStack(TD->getTypeAllocSize(evt),
|
|
|
|
TD->getABITypeAlign(evt));
|
|
|
|
CCInfo.addLoc(
|
|
|
|
CCValAssign::getMem(i, VT, Offset, VT, CCValAssign::Full));
|
|
|
|
} else {
|
|
|
|
unsigned Reg;
|
|
|
|
if (VT == MVT::i8) {
|
|
|
|
Reg = CCInfo.AllocateReg(RegList8[RegIdx]);
|
|
|
|
} else if (VT == MVT::i16) {
|
|
|
|
Reg = CCInfo.AllocateReg(RegList16[RegIdx]);
|
|
|
|
} else {
|
|
|
|
llvm_unreachable(
|
|
|
|
"calling convention can only manage i8 and i16 types");
|
|
|
|
}
|
|
|
|
assert(Reg && "register not available in calling convention");
|
|
|
|
CCInfo.addLoc(CCValAssign::getReg(i, VT, Reg, VT, CCValAssign::Full));
|
|
|
|
// Registers inside a particular argument are sorted in increasing order
|
|
|
|
// (remember the array is reversed).
|
|
|
|
RegIdx -= VT.getStoreSize();
|
|
|
|
}
|
2016-11-02 07:47:40 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
[AVR] Rewrite the function calling convention.
Summary:
The previous version relied on the standard calling convention using
std::reverse() to try to force the AVR ABI. But this only works for
simple cases, it fails for example with aggregate types.
This patch rewrites the calling convention with custom C++ code, that
implements the ABI defined in https://gcc.gnu.org/wiki/avr-gcc.
To do that it adds a few 16-bit pseudo registers for unaligned argument
passing, such as R24R23. For example this function:
define void @fun({ i8, i16 } %a)
will pass %a.0 in R22 and %a.1 in R24R23.
There are no instructions that can use these pseudo registers, so a new
register class, DREGSMOVW, is defined to make them apart.
Also the ArgCC_AVR_BUILTIN_DIV is no longer necessary, as it is
identical to the C++ behavior (actually the clobber list is more strict
for __div* functions, but that is currently unimplemented).
Reviewers: dylanmckay
Subscribers: Gaelan, Sh4rK, indirect, jwagen, efriedma, dsprenkels, hiraditya, Jim, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68524
Patch by Rodrigo Rivas Costa.
2020-06-19 13:26:00 +02:00
|
|
|
/// Count the total number of bytes needed to pass or return these arguments.
|
|
|
|
template <typename ArgT>
|
|
|
|
static unsigned getTotalArgumentsSizeInBytes(const SmallVectorImpl<ArgT> &Args) {
|
|
|
|
unsigned TotalBytes = 0;
|
2016-11-02 07:47:40 +01:00
|
|
|
|
[AVR] Rewrite the function calling convention.
Summary:
The previous version relied on the standard calling convention using
std::reverse() to try to force the AVR ABI. But this only works for
simple cases, it fails for example with aggregate types.
This patch rewrites the calling convention with custom C++ code, that
implements the ABI defined in https://gcc.gnu.org/wiki/avr-gcc.
To do that it adds a few 16-bit pseudo registers for unaligned argument
passing, such as R24R23. For example this function:
define void @fun({ i8, i16 } %a)
will pass %a.0 in R22 and %a.1 in R24R23.
There are no instructions that can use these pseudo registers, so a new
register class, DREGSMOVW, is defined to make them apart.
Also the ArgCC_AVR_BUILTIN_DIV is no longer necessary, as it is
identical to the C++ behavior (actually the clobber list is more strict
for __div* functions, but that is currently unimplemented).
Reviewers: dylanmckay
Subscribers: Gaelan, Sh4rK, indirect, jwagen, efriedma, dsprenkels, hiraditya, Jim, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68524
Patch by Rodrigo Rivas Costa.
2020-06-19 13:26:00 +02:00
|
|
|
for (const ArgT& Arg : Args) {
|
|
|
|
TotalBytes += Arg.VT.getStoreSize();
|
2016-11-02 07:47:40 +01:00
|
|
|
}
|
[AVR] Rewrite the function calling convention.
Summary:
The previous version relied on the standard calling convention using
std::reverse() to try to force the AVR ABI. But this only works for
simple cases, it fails for example with aggregate types.
This patch rewrites the calling convention with custom C++ code, that
implements the ABI defined in https://gcc.gnu.org/wiki/avr-gcc.
To do that it adds a few 16-bit pseudo registers for unaligned argument
passing, such as R24R23. For example this function:
define void @fun({ i8, i16 } %a)
will pass %a.0 in R22 and %a.1 in R24R23.
There are no instructions that can use these pseudo registers, so a new
register class, DREGSMOVW, is defined to make them apart.
Also the ArgCC_AVR_BUILTIN_DIV is no longer necessary, as it is
identical to the C++ behavior (actually the clobber list is more strict
for __div* functions, but that is currently unimplemented).
Reviewers: dylanmckay
Subscribers: Gaelan, Sh4rK, indirect, jwagen, efriedma, dsprenkels, hiraditya, Jim, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68524
Patch by Rodrigo Rivas Costa.
2020-06-19 13:26:00 +02:00
|
|
|
return TotalBytes;
|
2016-11-02 07:47:40 +01:00
|
|
|
}
|
|
|
|
|
[AVR] Rewrite the function calling convention.
Summary:
The previous version relied on the standard calling convention using
std::reverse() to try to force the AVR ABI. But this only works for
simple cases, it fails for example with aggregate types.
This patch rewrites the calling convention with custom C++ code, that
implements the ABI defined in https://gcc.gnu.org/wiki/avr-gcc.
To do that it adds a few 16-bit pseudo registers for unaligned argument
passing, such as R24R23. For example this function:
define void @fun({ i8, i16 } %a)
will pass %a.0 in R22 and %a.1 in R24R23.
There are no instructions that can use these pseudo registers, so a new
register class, DREGSMOVW, is defined to make them apart.
Also the ArgCC_AVR_BUILTIN_DIV is no longer necessary, as it is
identical to the C++ behavior (actually the clobber list is more strict
for __div* functions, but that is currently unimplemented).
Reviewers: dylanmckay
Subscribers: Gaelan, Sh4rK, indirect, jwagen, efriedma, dsprenkels, hiraditya, Jim, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68524
Patch by Rodrigo Rivas Costa.
2020-06-19 13:26:00 +02:00
|
|
|
/// Analyze incoming and outgoing value of returning from a function.
|
|
|
|
/// The algorithm is similar to analyzeArguments, but there can only be
|
|
|
|
/// one value, possibly an aggregate, and it is limited to 8 bytes.
|
|
|
|
template <typename ArgT>
|
|
|
|
static void analyzeReturnValues(const SmallVectorImpl<ArgT> &Args,
|
|
|
|
CCState &CCInfo) {
|
|
|
|
unsigned NumArgs = Args.size();
|
|
|
|
unsigned TotalBytes = getTotalArgumentsSizeInBytes(Args);
|
|
|
|
// CanLowerReturn() guarantees this assertion.
|
|
|
|
assert(TotalBytes <= 8 && "return values greater than 8 bytes cannot be lowered");
|
|
|
|
|
|
|
|
// GCC-ABI says that the size is rounded up to the next even number,
|
|
|
|
// but actually once it is more than 4 it will always round up to 8.
|
|
|
|
if (TotalBytes > 4) {
|
|
|
|
TotalBytes = 8;
|
2016-11-02 07:47:40 +01:00
|
|
|
} else {
|
[AVR] Rewrite the function calling convention.
Summary:
The previous version relied on the standard calling convention using
std::reverse() to try to force the AVR ABI. But this only works for
simple cases, it fails for example with aggregate types.
This patch rewrites the calling convention with custom C++ code, that
implements the ABI defined in https://gcc.gnu.org/wiki/avr-gcc.
To do that it adds a few 16-bit pseudo registers for unaligned argument
passing, such as R24R23. For example this function:
define void @fun({ i8, i16 } %a)
will pass %a.0 in R22 and %a.1 in R24R23.
There are no instructions that can use these pseudo registers, so a new
register class, DREGSMOVW, is defined to make them apart.
Also the ArgCC_AVR_BUILTIN_DIV is no longer necessary, as it is
identical to the C++ behavior (actually the clobber list is more strict
for __div* functions, but that is currently unimplemented).
Reviewers: dylanmckay
Subscribers: Gaelan, Sh4rK, indirect, jwagen, efriedma, dsprenkels, hiraditya, Jim, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68524
Patch by Rodrigo Rivas Costa.
2020-06-19 13:26:00 +02:00
|
|
|
TotalBytes = alignTo(TotalBytes, 2);
|
2016-11-02 07:47:40 +01:00
|
|
|
}
|
|
|
|
|
[AVR] Rewrite the function calling convention.
Summary:
The previous version relied on the standard calling convention using
std::reverse() to try to force the AVR ABI. But this only works for
simple cases, it fails for example with aggregate types.
This patch rewrites the calling convention with custom C++ code, that
implements the ABI defined in https://gcc.gnu.org/wiki/avr-gcc.
To do that it adds a few 16-bit pseudo registers for unaligned argument
passing, such as R24R23. For example this function:
define void @fun({ i8, i16 } %a)
will pass %a.0 in R22 and %a.1 in R24R23.
There are no instructions that can use these pseudo registers, so a new
register class, DREGSMOVW, is defined to make them apart.
Also the ArgCC_AVR_BUILTIN_DIV is no longer necessary, as it is
identical to the C++ behavior (actually the clobber list is more strict
for __div* functions, but that is currently unimplemented).
Reviewers: dylanmckay
Subscribers: Gaelan, Sh4rK, indirect, jwagen, efriedma, dsprenkels, hiraditya, Jim, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68524
Patch by Rodrigo Rivas Costa.
2020-06-19 13:26:00 +02:00
|
|
|
// The index of the first register to use.
|
|
|
|
int RegIdx = TotalBytes - 1;
|
|
|
|
for (unsigned i = 0; i != NumArgs; ++i) {
|
|
|
|
MVT VT = Args[i].VT;
|
|
|
|
unsigned Reg;
|
|
|
|
if (VT == MVT::i8) {
|
|
|
|
Reg = CCInfo.AllocateReg(RegList8[RegIdx]);
|
|
|
|
} else if (VT == MVT::i16) {
|
|
|
|
Reg = CCInfo.AllocateReg(RegList16[RegIdx]);
|
2016-11-02 07:47:40 +01:00
|
|
|
} else {
|
[AVR] Rewrite the function calling convention.
Summary:
The previous version relied on the standard calling convention using
std::reverse() to try to force the AVR ABI. But this only works for
simple cases, it fails for example with aggregate types.
This patch rewrites the calling convention with custom C++ code, that
implements the ABI defined in https://gcc.gnu.org/wiki/avr-gcc.
To do that it adds a few 16-bit pseudo registers for unaligned argument
passing, such as R24R23. For example this function:
define void @fun({ i8, i16 } %a)
will pass %a.0 in R22 and %a.1 in R24R23.
There are no instructions that can use these pseudo registers, so a new
register class, DREGSMOVW, is defined to make them apart.
Also the ArgCC_AVR_BUILTIN_DIV is no longer necessary, as it is
identical to the C++ behavior (actually the clobber list is more strict
for __div* functions, but that is currently unimplemented).
Reviewers: dylanmckay
Subscribers: Gaelan, Sh4rK, indirect, jwagen, efriedma, dsprenkels, hiraditya, Jim, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68524
Patch by Rodrigo Rivas Costa.
2020-06-19 13:26:00 +02:00
|
|
|
llvm_unreachable("calling convention can only manage i8 and i16 types");
|
2016-11-02 07:47:40 +01:00
|
|
|
}
|
[AVR] Rewrite the function calling convention.
Summary:
The previous version relied on the standard calling convention using
std::reverse() to try to force the AVR ABI. But this only works for
simple cases, it fails for example with aggregate types.
This patch rewrites the calling convention with custom C++ code, that
implements the ABI defined in https://gcc.gnu.org/wiki/avr-gcc.
To do that it adds a few 16-bit pseudo registers for unaligned argument
passing, such as R24R23. For example this function:
define void @fun({ i8, i16 } %a)
will pass %a.0 in R22 and %a.1 in R24R23.
There are no instructions that can use these pseudo registers, so a new
register class, DREGSMOVW, is defined to make them apart.
Also the ArgCC_AVR_BUILTIN_DIV is no longer necessary, as it is
identical to the C++ behavior (actually the clobber list is more strict
for __div* functions, but that is currently unimplemented).
Reviewers: dylanmckay
Subscribers: Gaelan, Sh4rK, indirect, jwagen, efriedma, dsprenkels, hiraditya, Jim, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68524
Patch by Rodrigo Rivas Costa.
2020-06-19 13:26:00 +02:00
|
|
|
assert(Reg && "register not available in calling convention");
|
|
|
|
CCInfo.addLoc(CCValAssign::getReg(i, VT, Reg, VT, CCValAssign::Full));
|
|
|
|
// Registers sort in increasing order
|
|
|
|
RegIdx -= VT.getStoreSize();
|
2016-11-02 07:47:40 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
SDValue AVRTargetLowering::LowerFormalArguments(
|
|
|
|
SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
|
[AVR] Rewrite the function calling convention.
Summary:
The previous version relied on the standard calling convention using
std::reverse() to try to force the AVR ABI. But this only works for
simple cases, it fails for example with aggregate types.
This patch rewrites the calling convention with custom C++ code, that
implements the ABI defined in https://gcc.gnu.org/wiki/avr-gcc.
To do that it adds a few 16-bit pseudo registers for unaligned argument
passing, such as R24R23. For example this function:
define void @fun({ i8, i16 } %a)
will pass %a.0 in R22 and %a.1 in R24R23.
There are no instructions that can use these pseudo registers, so a new
register class, DREGSMOVW, is defined to make them apart.
Also the ArgCC_AVR_BUILTIN_DIV is no longer necessary, as it is
identical to the C++ behavior (actually the clobber list is more strict
for __div* functions, but that is currently unimplemented).
Reviewers: dylanmckay
Subscribers: Gaelan, Sh4rK, indirect, jwagen, efriedma, dsprenkels, hiraditya, Jim, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68524
Patch by Rodrigo Rivas Costa.
2020-06-19 13:26:00 +02:00
|
|
|
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
|
|
|
|
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
|
2016-11-02 07:47:40 +01:00
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
MachineFrameInfo &MFI = MF.getFrameInfo();
|
|
|
|
auto DL = DAG.getDataLayout();
|
|
|
|
|
|
|
|
// Assign locations to all of the incoming arguments.
|
|
|
|
SmallVector<CCValAssign, 16> ArgLocs;
|
|
|
|
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
|
|
|
|
*DAG.getContext());
|
|
|
|
|
[AVR] Rewrite the function calling convention.
Summary:
The previous version relied on the standard calling convention using
std::reverse() to try to force the AVR ABI. But this only works for
simple cases, it fails for example with aggregate types.
This patch rewrites the calling convention with custom C++ code, that
implements the ABI defined in https://gcc.gnu.org/wiki/avr-gcc.
To do that it adds a few 16-bit pseudo registers for unaligned argument
passing, such as R24R23. For example this function:
define void @fun({ i8, i16 } %a)
will pass %a.0 in R22 and %a.1 in R24R23.
There are no instructions that can use these pseudo registers, so a new
register class, DREGSMOVW, is defined to make them apart.
Also the ArgCC_AVR_BUILTIN_DIV is no longer necessary, as it is
identical to the C++ behavior (actually the clobber list is more strict
for __div* functions, but that is currently unimplemented).
Reviewers: dylanmckay
Subscribers: Gaelan, Sh4rK, indirect, jwagen, efriedma, dsprenkels, hiraditya, Jim, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68524
Patch by Rodrigo Rivas Costa.
2020-06-19 13:26:00 +02:00
|
|
|
// Variadic functions do not need all the analysis below.
|
|
|
|
if (isVarArg) {
|
|
|
|
CCInfo.AnalyzeFormalArguments(Ins, ArgCC_AVR_Vararg);
|
|
|
|
} else {
|
|
|
|
analyzeArguments(nullptr, &MF.getFunction(), &DL, Ins, ArgLocs, CCInfo);
|
|
|
|
}
|
2016-11-02 07:47:40 +01:00
|
|
|
|
|
|
|
SDValue ArgValue;
|
|
|
|
for (CCValAssign &VA : ArgLocs) {
|
|
|
|
|
|
|
|
// Arguments stored on registers.
|
|
|
|
if (VA.isRegLoc()) {
|
|
|
|
EVT RegVT = VA.getLocVT();
|
|
|
|
const TargetRegisterClass *RC;
|
|
|
|
if (RegVT == MVT::i8) {
|
|
|
|
RC = &AVR::GPR8RegClass;
|
|
|
|
} else if (RegVT == MVT::i16) {
|
|
|
|
RC = &AVR::DREGSRegClass;
|
|
|
|
} else {
|
|
|
|
llvm_unreachable("Unknown argument type!");
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
|
|
|
|
ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
|
|
|
|
|
|
|
|
// :NOTE: Clang should not promote any i8 into i16 but for safety the
|
|
|
|
// following code will handle zexts or sexts generated by other
|
|
|
|
// front ends. Otherwise:
|
|
|
|
// If this is an 8 bit value, it is really passed promoted
|
|
|
|
// to 16 bits. Insert an assert[sz]ext to capture this, then
|
|
|
|
// truncate to the right size.
|
|
|
|
switch (VA.getLocInfo()) {
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unknown loc info!");
|
|
|
|
case CCValAssign::Full:
|
|
|
|
break;
|
|
|
|
case CCValAssign::BCvt:
|
|
|
|
ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
|
|
|
|
break;
|
|
|
|
case CCValAssign::SExt:
|
|
|
|
ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
|
|
|
|
DAG.getValueType(VA.getValVT()));
|
|
|
|
ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
|
|
|
|
break;
|
|
|
|
case CCValAssign::ZExt:
|
|
|
|
ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
|
|
|
|
DAG.getValueType(VA.getValVT()));
|
|
|
|
ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
InVals.push_back(ArgValue);
|
|
|
|
} else {
|
|
|
|
// Sanity check.
|
|
|
|
assert(VA.isMemLoc());
|
|
|
|
|
|
|
|
EVT LocVT = VA.getLocVT();
|
|
|
|
|
|
|
|
// Create the frame index object for this incoming parameter.
|
|
|
|
int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8,
|
|
|
|
VA.getLocMemOffset(), true);
|
|
|
|
|
|
|
|
// Create the SelectionDAG nodes corresponding to a load
|
|
|
|
// from this parameter.
|
|
|
|
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DL));
|
|
|
|
InVals.push_back(DAG.getLoad(LocVT, dl, Chain, FIN,
|
2020-09-14 22:54:50 +02:00
|
|
|
MachinePointerInfo::getFixedStack(MF, FI)));
|
2016-11-02 07:47:40 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the function takes variable number of arguments, make a frame index for
|
|
|
|
// the start of the first vararg value... for expansion of llvm.va_start.
|
|
|
|
if (isVarArg) {
|
|
|
|
unsigned StackSize = CCInfo.getNextStackOffset();
|
|
|
|
AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
|
|
|
|
|
|
|
|
AFI->setVarArgsFrameIndex(MFI.CreateFixedObject(2, StackSize, true));
|
|
|
|
}
|
|
|
|
|
|
|
|
return Chain;
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Call Calling Convention Implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
SDValue AVRTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
|
|
|
|
SmallVectorImpl<SDValue> &InVals) const {
|
|
|
|
SelectionDAG &DAG = CLI.DAG;
|
|
|
|
SDLoc &DL = CLI.DL;
|
|
|
|
SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
|
|
|
|
SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
|
|
|
|
SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
|
|
|
|
SDValue Chain = CLI.Chain;
|
|
|
|
SDValue Callee = CLI.Callee;
|
|
|
|
bool &isTailCall = CLI.IsTailCall;
|
|
|
|
CallingConv::ID CallConv = CLI.CallConv;
|
|
|
|
bool isVarArg = CLI.IsVarArg;
|
|
|
|
|
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
|
|
|
|
// AVR does not yet support tail call optimization.
|
|
|
|
isTailCall = false;
|
|
|
|
|
|
|
|
// Analyze operands of the call, assigning locations to each operand.
|
|
|
|
SmallVector<CCValAssign, 16> ArgLocs;
|
|
|
|
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
|
|
|
|
*DAG.getContext());
|
|
|
|
|
|
|
|
// If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
|
|
|
|
// direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
|
|
|
|
// node so that legalize doesn't hack it.
|
|
|
|
const Function *F = nullptr;
|
|
|
|
if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
|
|
|
|
const GlobalValue *GV = G->getGlobal();
|
|
|
|
|
|
|
|
F = cast<Function>(GV);
|
|
|
|
Callee =
|
|
|
|
DAG.getTargetGlobalAddress(GV, DL, getPointerTy(DAG.getDataLayout()));
|
|
|
|
} else if (const ExternalSymbolSDNode *ES =
|
|
|
|
dyn_cast<ExternalSymbolSDNode>(Callee)) {
|
|
|
|
Callee = DAG.getTargetExternalSymbol(ES->getSymbol(),
|
|
|
|
getPointerTy(DAG.getDataLayout()));
|
|
|
|
}
|
|
|
|
|
[AVR] Rewrite the function calling convention.
Summary:
The previous version relied on the standard calling convention using
std::reverse() to try to force the AVR ABI. But this only works for
simple cases, it fails for example with aggregate types.
This patch rewrites the calling convention with custom C++ code, that
implements the ABI defined in https://gcc.gnu.org/wiki/avr-gcc.
To do that it adds a few 16-bit pseudo registers for unaligned argument
passing, such as R24R23. For example this function:
define void @fun({ i8, i16 } %a)
will pass %a.0 in R22 and %a.1 in R24R23.
There are no instructions that can use these pseudo registers, so a new
register class, DREGSMOVW, is defined to make them apart.
Also the ArgCC_AVR_BUILTIN_DIV is no longer necessary, as it is
identical to the C++ behavior (actually the clobber list is more strict
for __div* functions, but that is currently unimplemented).
Reviewers: dylanmckay
Subscribers: Gaelan, Sh4rK, indirect, jwagen, efriedma, dsprenkels, hiraditya, Jim, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68524
Patch by Rodrigo Rivas Costa.
2020-06-19 13:26:00 +02:00
|
|
|
// Variadic functions do not need all the analysis below.
|
|
|
|
if (isVarArg) {
|
|
|
|
CCInfo.AnalyzeCallOperands(Outs, ArgCC_AVR_Vararg);
|
|
|
|
} else {
|
|
|
|
analyzeArguments(&CLI, F, &DAG.getDataLayout(), Outs, ArgLocs, CCInfo);
|
|
|
|
}
|
2016-11-02 07:47:40 +01:00
|
|
|
|
|
|
|
// Get a count of how many bytes are to be pushed on the stack.
|
|
|
|
unsigned NumBytes = CCInfo.getNextStackOffset();
|
|
|
|
|
2017-05-09 15:35:13 +02:00
|
|
|
Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, DL);
|
2016-11-02 07:47:40 +01:00
|
|
|
|
|
|
|
SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
|
|
|
|
|
|
|
|
// First, walk the register assignments, inserting copies.
|
|
|
|
unsigned AI, AE;
|
|
|
|
bool HasStackArgs = false;
|
|
|
|
for (AI = 0, AE = ArgLocs.size(); AI != AE; ++AI) {
|
|
|
|
CCValAssign &VA = ArgLocs[AI];
|
|
|
|
EVT RegVT = VA.getLocVT();
|
|
|
|
SDValue Arg = OutVals[AI];
|
|
|
|
|
|
|
|
// Promote the value if needed. With Clang this should not happen.
|
|
|
|
switch (VA.getLocInfo()) {
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unknown loc info!");
|
|
|
|
case CCValAssign::Full:
|
|
|
|
break;
|
|
|
|
case CCValAssign::SExt:
|
|
|
|
Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, RegVT, Arg);
|
|
|
|
break;
|
|
|
|
case CCValAssign::ZExt:
|
|
|
|
Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, RegVT, Arg);
|
|
|
|
break;
|
|
|
|
case CCValAssign::AExt:
|
|
|
|
Arg = DAG.getNode(ISD::ANY_EXTEND, DL, RegVT, Arg);
|
|
|
|
break;
|
|
|
|
case CCValAssign::BCvt:
|
|
|
|
Arg = DAG.getNode(ISD::BITCAST, DL, RegVT, Arg);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stop when we encounter a stack argument, we need to process them
|
|
|
|
// in reverse order in the loop below.
|
|
|
|
if (VA.isMemLoc()) {
|
|
|
|
HasStackArgs = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Arguments that can be passed on registers must be kept in the RegsToPass
|
|
|
|
// vector.
|
|
|
|
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Second, stack arguments have to walked in reverse order by inserting
|
|
|
|
// chained stores, this ensures their order is not changed by the scheduler
|
|
|
|
// and that the push instruction sequence generated is correct, otherwise they
|
|
|
|
// can be freely intermixed.
|
|
|
|
if (HasStackArgs) {
|
|
|
|
for (AE = AI, AI = ArgLocs.size(); AI != AE; --AI) {
|
|
|
|
unsigned Loc = AI - 1;
|
|
|
|
CCValAssign &VA = ArgLocs[Loc];
|
|
|
|
SDValue Arg = OutVals[Loc];
|
|
|
|
|
|
|
|
assert(VA.isMemLoc());
|
|
|
|
|
|
|
|
// SP points to one stack slot further so add one to adjust it.
|
|
|
|
SDValue PtrOff = DAG.getNode(
|
|
|
|
ISD::ADD, DL, getPointerTy(DAG.getDataLayout()),
|
|
|
|
DAG.getRegister(AVR::SP, getPointerTy(DAG.getDataLayout())),
|
|
|
|
DAG.getIntPtrConstant(VA.getLocMemOffset() + 1, DL));
|
|
|
|
|
|
|
|
Chain =
|
|
|
|
DAG.getStore(Chain, DL, Arg, PtrOff,
|
2020-09-14 22:54:50 +02:00
|
|
|
MachinePointerInfo::getStack(MF, VA.getLocMemOffset()));
|
2016-11-02 07:47:40 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Build a sequence of copy-to-reg nodes chained together with token chain and
|
|
|
|
// flag operands which copy the outgoing args into registers. The InFlag in
|
|
|
|
// necessary since all emited instructions must be stuck together.
|
|
|
|
SDValue InFlag;
|
|
|
|
for (auto Reg : RegsToPass) {
|
|
|
|
Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, InFlag);
|
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns a chain & a flag for retval copy to use.
|
|
|
|
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
|
|
|
|
SmallVector<SDValue, 8> Ops;
|
|
|
|
Ops.push_back(Chain);
|
|
|
|
Ops.push_back(Callee);
|
|
|
|
|
|
|
|
// Add argument registers to the end of the list so that they are known live
|
|
|
|
// into the call.
|
|
|
|
for (auto Reg : RegsToPass) {
|
|
|
|
Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add a register mask operand representing the call-preserved registers.
|
2019-01-18 07:10:41 +01:00
|
|
|
const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
|
2016-11-02 07:47:40 +01:00
|
|
|
const uint32_t *Mask =
|
|
|
|
TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv);
|
|
|
|
assert(Mask && "Missing call preserved mask for calling convention");
|
|
|
|
Ops.push_back(DAG.getRegisterMask(Mask));
|
|
|
|
|
|
|
|
if (InFlag.getNode()) {
|
|
|
|
Ops.push_back(InFlag);
|
|
|
|
}
|
|
|
|
|
|
|
|
Chain = DAG.getNode(AVRISD::CALL, DL, NodeTys, Ops);
|
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
|
|
|
|
// Create the CALLSEQ_END node.
|
|
|
|
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, DL, true),
|
|
|
|
DAG.getIntPtrConstant(0, DL, true), InFlag, DL);
|
|
|
|
|
|
|
|
if (!Ins.empty()) {
|
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Handle result values, copying them out of physregs into vregs that we
|
|
|
|
// return.
|
|
|
|
return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, DL, DAG,
|
|
|
|
InVals);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Lower the result values of a call into the
|
|
|
|
/// appropriate copies out of appropriate physical registers.
|
|
|
|
///
|
|
|
|
SDValue AVRTargetLowering::LowerCallResult(
|
|
|
|
SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
|
|
|
|
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, SelectionDAG &DAG,
|
|
|
|
SmallVectorImpl<SDValue> &InVals) const {
|
|
|
|
|
|
|
|
// Assign locations to each value returned by this call.
|
|
|
|
SmallVector<CCValAssign, 16> RVLocs;
|
|
|
|
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
|
|
|
|
*DAG.getContext());
|
|
|
|
|
|
|
|
// Handle runtime calling convs.
|
[AVR] Rewrite the function calling convention.
Summary:
The previous version relied on the standard calling convention using
std::reverse() to try to force the AVR ABI. But this only works for
simple cases, it fails for example with aggregate types.
This patch rewrites the calling convention with custom C++ code, that
implements the ABI defined in https://gcc.gnu.org/wiki/avr-gcc.
To do that it adds a few 16-bit pseudo registers for unaligned argument
passing, such as R24R23. For example this function:
define void @fun({ i8, i16 } %a)
will pass %a.0 in R22 and %a.1 in R24R23.
There are no instructions that can use these pseudo registers, so a new
register class, DREGSMOVW, is defined to make them apart.
Also the ArgCC_AVR_BUILTIN_DIV is no longer necessary, as it is
identical to the C++ behavior (actually the clobber list is more strict
for __div* functions, but that is currently unimplemented).
Reviewers: dylanmckay
Subscribers: Gaelan, Sh4rK, indirect, jwagen, efriedma, dsprenkels, hiraditya, Jim, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68524
Patch by Rodrigo Rivas Costa.
2020-06-19 13:26:00 +02:00
|
|
|
if (CallConv == CallingConv::AVR_BUILTIN) {
|
|
|
|
CCInfo.AnalyzeCallResult(Ins, RetCC_AVR_BUILTIN);
|
|
|
|
} else {
|
|
|
|
analyzeReturnValues(Ins, CCInfo);
|
2016-11-02 07:47:40 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Copy all of the result registers out of their specified physreg.
|
|
|
|
for (CCValAssign const &RVLoc : RVLocs) {
|
|
|
|
Chain = DAG.getCopyFromReg(Chain, dl, RVLoc.getLocReg(), RVLoc.getValVT(),
|
|
|
|
InFlag)
|
|
|
|
.getValue(1);
|
|
|
|
InFlag = Chain.getValue(2);
|
|
|
|
InVals.push_back(Chain.getValue(0));
|
|
|
|
}
|
|
|
|
|
|
|
|
return Chain;
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Return Value Calling Convention Implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
[AVR] Rewrite the function calling convention.
Summary:
The previous version relied on the standard calling convention using
std::reverse() to try to force the AVR ABI. But this only works for
simple cases, it fails for example with aggregate types.
This patch rewrites the calling convention with custom C++ code, that
implements the ABI defined in https://gcc.gnu.org/wiki/avr-gcc.
To do that it adds a few 16-bit pseudo registers for unaligned argument
passing, such as R24R23. For example this function:
define void @fun({ i8, i16 } %a)
will pass %a.0 in R22 and %a.1 in R24R23.
There are no instructions that can use these pseudo registers, so a new
register class, DREGSMOVW, is defined to make them apart.
Also the ArgCC_AVR_BUILTIN_DIV is no longer necessary, as it is
identical to the C++ behavior (actually the clobber list is more strict
for __div* functions, but that is currently unimplemented).
Reviewers: dylanmckay
Subscribers: Gaelan, Sh4rK, indirect, jwagen, efriedma, dsprenkels, hiraditya, Jim, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68524
Patch by Rodrigo Rivas Costa.
2020-06-19 13:26:00 +02:00
|
|
|
bool AVRTargetLowering::CanLowerReturn(
|
|
|
|
CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
|
|
|
|
const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
|
|
|
|
if (CallConv == CallingConv::AVR_BUILTIN) {
|
|
|
|
SmallVector<CCValAssign, 16> RVLocs;
|
|
|
|
CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
|
|
|
|
return CCInfo.CheckReturn(Outs, RetCC_AVR_BUILTIN);
|
2016-11-02 07:47:40 +01:00
|
|
|
}
|
|
|
|
|
[AVR] Rewrite the function calling convention.
Summary:
The previous version relied on the standard calling convention using
std::reverse() to try to force the AVR ABI. But this only works for
simple cases, it fails for example with aggregate types.
This patch rewrites the calling convention with custom C++ code, that
implements the ABI defined in https://gcc.gnu.org/wiki/avr-gcc.
To do that it adds a few 16-bit pseudo registers for unaligned argument
passing, such as R24R23. For example this function:
define void @fun({ i8, i16 } %a)
will pass %a.0 in R22 and %a.1 in R24R23.
There are no instructions that can use these pseudo registers, so a new
register class, DREGSMOVW, is defined to make them apart.
Also the ArgCC_AVR_BUILTIN_DIV is no longer necessary, as it is
identical to the C++ behavior (actually the clobber list is more strict
for __div* functions, but that is currently unimplemented).
Reviewers: dylanmckay
Subscribers: Gaelan, Sh4rK, indirect, jwagen, efriedma, dsprenkels, hiraditya, Jim, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68524
Patch by Rodrigo Rivas Costa.
2020-06-19 13:26:00 +02:00
|
|
|
unsigned TotalBytes = getTotalArgumentsSizeInBytes(Outs);
|
|
|
|
return TotalBytes <= 8;
|
2016-11-02 07:47:40 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
SDValue
|
|
|
|
AVRTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
|
|
|
|
bool isVarArg,
|
|
|
|
const SmallVectorImpl<ISD::OutputArg> &Outs,
|
|
|
|
const SmallVectorImpl<SDValue> &OutVals,
|
|
|
|
const SDLoc &dl, SelectionDAG &DAG) const {
|
|
|
|
// CCValAssign - represent the assignment of the return value to locations.
|
|
|
|
SmallVector<CCValAssign, 16> RVLocs;
|
|
|
|
|
|
|
|
// CCState - Info about the registers and stack slot.
|
|
|
|
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
|
|
|
|
*DAG.getContext());
|
|
|
|
|
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
|
[AVR] Rewrite the function calling convention.
Summary:
The previous version relied on the standard calling convention using
std::reverse() to try to force the AVR ABI. But this only works for
simple cases, it fails for example with aggregate types.
This patch rewrites the calling convention with custom C++ code, that
implements the ABI defined in https://gcc.gnu.org/wiki/avr-gcc.
To do that it adds a few 16-bit pseudo registers for unaligned argument
passing, such as R24R23. For example this function:
define void @fun({ i8, i16 } %a)
will pass %a.0 in R22 and %a.1 in R24R23.
There are no instructions that can use these pseudo registers, so a new
register class, DREGSMOVW, is defined to make them apart.
Also the ArgCC_AVR_BUILTIN_DIV is no longer necessary, as it is
identical to the C++ behavior (actually the clobber list is more strict
for __div* functions, but that is currently unimplemented).
Reviewers: dylanmckay
Subscribers: Gaelan, Sh4rK, indirect, jwagen, efriedma, dsprenkels, hiraditya, Jim, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68524
Patch by Rodrigo Rivas Costa.
2020-06-19 13:26:00 +02:00
|
|
|
// Analyze return values.
|
|
|
|
if (CallConv == CallingConv::AVR_BUILTIN) {
|
|
|
|
CCInfo.AnalyzeReturn(Outs, RetCC_AVR_BUILTIN);
|
|
|
|
} else {
|
|
|
|
analyzeReturnValues(Outs, CCInfo);
|
2016-11-02 07:47:40 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
SDValue Flag;
|
|
|
|
SmallVector<SDValue, 4> RetOps(1, Chain);
|
|
|
|
// Copy the result values into the output registers.
|
[AVR] Rewrite the function calling convention.
Summary:
The previous version relied on the standard calling convention using
std::reverse() to try to force the AVR ABI. But this only works for
simple cases, it fails for example with aggregate types.
This patch rewrites the calling convention with custom C++ code, that
implements the ABI defined in https://gcc.gnu.org/wiki/avr-gcc.
To do that it adds a few 16-bit pseudo registers for unaligned argument
passing, such as R24R23. For example this function:
define void @fun({ i8, i16 } %a)
will pass %a.0 in R22 and %a.1 in R24R23.
There are no instructions that can use these pseudo registers, so a new
register class, DREGSMOVW, is defined to make them apart.
Also the ArgCC_AVR_BUILTIN_DIV is no longer necessary, as it is
identical to the C++ behavior (actually the clobber list is more strict
for __div* functions, but that is currently unimplemented).
Reviewers: dylanmckay
Subscribers: Gaelan, Sh4rK, indirect, jwagen, efriedma, dsprenkels, hiraditya, Jim, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68524
Patch by Rodrigo Rivas Costa.
2020-06-19 13:26:00 +02:00
|
|
|
for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
|
2016-11-02 07:47:40 +01:00
|
|
|
CCValAssign &VA = RVLocs[i];
|
|
|
|
assert(VA.isRegLoc() && "Can only return in registers!");
|
|
|
|
|
|
|
|
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag);
|
|
|
|
|
|
|
|
// Guarantee that all emitted copies are stuck together with flags.
|
|
|
|
Flag = Chain.getValue(1);
|
|
|
|
RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Don't emit the ret/reti instruction when the naked attribute is present in
|
|
|
|
// the function being compiled.
|
2017-12-15 23:22:58 +01:00
|
|
|
if (MF.getFunction().getAttributes().hasAttribute(
|
Rename AttributeSet to AttributeList
Summary:
This class is a list of AttributeSetNodes corresponding the function
prototype of a call or function declaration. This class used to be
called ParamAttrListPtr, then AttrListPtr, then AttributeSet. It is
typically accessed by parameter and return value index, so
"AttributeList" seems like a more intuitive name.
Rename AttributeSetImpl to AttributeListImpl to follow suit.
It's useful to rename this class so that we can rename AttributeSetNode
to AttributeSet later. AttributeSet is the set of attributes that apply
to a single function, argument, or return value.
Reviewers: sanjoy, javed.absar, chandlerc, pete
Reviewed By: pete
Subscribers: pete, jholewinski, arsenm, dschuff, mehdi_amini, jfb, nhaehnle, sbc100, void, llvm-commits
Differential Revision: https://reviews.llvm.org/D31102
llvm-svn: 298393
2017-03-21 17:57:19 +01:00
|
|
|
AttributeList::FunctionIndex, Attribute::Naked)) {
|
2016-11-02 07:47:40 +01:00
|
|
|
return Chain;
|
|
|
|
}
|
|
|
|
|
2020-03-31 08:00:18 +02:00
|
|
|
const AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
|
|
|
|
|
2016-11-02 07:47:40 +01:00
|
|
|
unsigned RetOpc =
|
2020-03-31 08:28:01 +02:00
|
|
|
AFI->isInterruptOrSignalHandler()
|
2020-03-31 08:00:18 +02:00
|
|
|
? AVRISD::RETI_FLAG
|
|
|
|
: AVRISD::RET_FLAG;
|
2016-11-02 07:47:40 +01:00
|
|
|
|
|
|
|
RetOps[0] = Chain; // Update chain.
|
|
|
|
|
|
|
|
if (Flag.getNode()) {
|
|
|
|
RetOps.push_back(Flag);
|
|
|
|
}
|
|
|
|
|
|
|
|
return DAG.getNode(RetOpc, dl, MVT::Other, RetOps);
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Custom Inserters
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
MachineBasicBlock *AVRTargetLowering::insertShift(MachineInstr &MI,
|
|
|
|
MachineBasicBlock *BB) const {
|
|
|
|
unsigned Opc;
|
|
|
|
const TargetRegisterClass *RC;
|
2018-09-01 14:22:07 +02:00
|
|
|
bool HasRepeatedOperand = false;
|
2016-11-02 07:47:40 +01:00
|
|
|
MachineFunction *F = BB->getParent();
|
|
|
|
MachineRegisterInfo &RI = F->getRegInfo();
|
2019-01-18 07:10:41 +01:00
|
|
|
const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
|
2016-11-02 07:47:40 +01:00
|
|
|
DebugLoc dl = MI.getDebugLoc();
|
|
|
|
|
|
|
|
switch (MI.getOpcode()) {
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Invalid shift opcode!");
|
|
|
|
case AVR::Lsl8:
|
2018-09-01 14:23:00 +02:00
|
|
|
Opc = AVR::ADDRdRr; // LSL is an alias of ADD Rd, Rd
|
2016-11-02 07:47:40 +01:00
|
|
|
RC = &AVR::GPR8RegClass;
|
2018-09-01 14:23:00 +02:00
|
|
|
HasRepeatedOperand = true;
|
2016-11-02 07:47:40 +01:00
|
|
|
break;
|
|
|
|
case AVR::Lsl16:
|
|
|
|
Opc = AVR::LSLWRd;
|
|
|
|
RC = &AVR::DREGSRegClass;
|
|
|
|
break;
|
|
|
|
case AVR::Asr8:
|
|
|
|
Opc = AVR::ASRRd;
|
|
|
|
RC = &AVR::GPR8RegClass;
|
|
|
|
break;
|
|
|
|
case AVR::Asr16:
|
|
|
|
Opc = AVR::ASRWRd;
|
|
|
|
RC = &AVR::DREGSRegClass;
|
|
|
|
break;
|
|
|
|
case AVR::Lsr8:
|
|
|
|
Opc = AVR::LSRRd;
|
|
|
|
RC = &AVR::GPR8RegClass;
|
|
|
|
break;
|
|
|
|
case AVR::Lsr16:
|
|
|
|
Opc = AVR::LSRWRd;
|
|
|
|
RC = &AVR::DREGSRegClass;
|
|
|
|
break;
|
2017-05-01 11:48:55 +02:00
|
|
|
case AVR::Rol8:
|
2019-12-23 04:24:20 +01:00
|
|
|
Opc = AVR::ROLBRd;
|
2017-05-01 11:48:55 +02:00
|
|
|
RC = &AVR::GPR8RegClass;
|
|
|
|
break;
|
|
|
|
case AVR::Rol16:
|
|
|
|
Opc = AVR::ROLWRd;
|
|
|
|
RC = &AVR::DREGSRegClass;
|
|
|
|
break;
|
|
|
|
case AVR::Ror8:
|
2019-12-23 04:24:20 +01:00
|
|
|
Opc = AVR::RORBRd;
|
2017-05-01 11:48:55 +02:00
|
|
|
RC = &AVR::GPR8RegClass;
|
|
|
|
break;
|
|
|
|
case AVR::Ror16:
|
|
|
|
Opc = AVR::RORWRd;
|
|
|
|
RC = &AVR::DREGSRegClass;
|
|
|
|
break;
|
2016-11-02 07:47:40 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
const BasicBlock *LLVM_BB = BB->getBasicBlock();
|
2017-09-26 02:51:03 +02:00
|
|
|
|
|
|
|
MachineFunction::iterator I;
|
2017-09-26 03:37:53 +02:00
|
|
|
for (I = BB->getIterator(); I != F->end() && &(*I) != BB; ++I);
|
2017-09-26 02:51:03 +02:00
|
|
|
if (I != F->end()) ++I;
|
2016-11-02 07:47:40 +01:00
|
|
|
|
|
|
|
// Create loop block.
|
|
|
|
MachineBasicBlock *LoopBB = F->CreateMachineBasicBlock(LLVM_BB);
|
2020-08-23 14:17:29 +02:00
|
|
|
MachineBasicBlock *CheckBB = F->CreateMachineBasicBlock(LLVM_BB);
|
2016-11-02 07:47:40 +01:00
|
|
|
MachineBasicBlock *RemBB = F->CreateMachineBasicBlock(LLVM_BB);
|
|
|
|
|
|
|
|
F->insert(I, LoopBB);
|
2020-08-23 14:17:29 +02:00
|
|
|
F->insert(I, CheckBB);
|
2016-11-02 07:47:40 +01:00
|
|
|
F->insert(I, RemBB);
|
|
|
|
|
|
|
|
// Update machine-CFG edges by transferring all successors of the current
|
|
|
|
// block to the block containing instructions after shift.
|
|
|
|
RemBB->splice(RemBB->begin(), BB, std::next(MachineBasicBlock::iterator(MI)),
|
|
|
|
BB->end());
|
|
|
|
RemBB->transferSuccessorsAndUpdatePHIs(BB);
|
|
|
|
|
2020-08-23 14:17:29 +02:00
|
|
|
// Add edges BB => LoopBB => CheckBB => RemBB, CheckBB => LoopBB.
|
|
|
|
BB->addSuccessor(CheckBB);
|
|
|
|
LoopBB->addSuccessor(CheckBB);
|
|
|
|
CheckBB->addSuccessor(LoopBB);
|
|
|
|
CheckBB->addSuccessor(RemBB);
|
2016-11-02 07:47:40 +01:00
|
|
|
|
2020-08-23 14:17:29 +02:00
|
|
|
Register ShiftAmtReg = RI.createVirtualRegister(&AVR::GPR8RegClass);
|
|
|
|
Register ShiftAmtReg2 = RI.createVirtualRegister(&AVR::GPR8RegClass);
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-15 21:22:08 +02:00
|
|
|
Register ShiftReg = RI.createVirtualRegister(RC);
|
|
|
|
Register ShiftReg2 = RI.createVirtualRegister(RC);
|
|
|
|
Register ShiftAmtSrcReg = MI.getOperand(2).getReg();
|
|
|
|
Register SrcReg = MI.getOperand(1).getReg();
|
|
|
|
Register DstReg = MI.getOperand(0).getReg();
|
2016-11-02 07:47:40 +01:00
|
|
|
|
|
|
|
// BB:
|
2020-08-23 14:17:29 +02:00
|
|
|
// rjmp CheckBB
|
|
|
|
BuildMI(BB, dl, TII.get(AVR::RJMPk)).addMBB(CheckBB);
|
2016-11-02 07:47:40 +01:00
|
|
|
|
|
|
|
// LoopBB:
|
|
|
|
// ShiftReg2 = shift ShiftReg
|
2020-08-23 14:17:29 +02:00
|
|
|
auto ShiftMI = BuildMI(LoopBB, dl, TII.get(Opc), ShiftReg2).addReg(ShiftReg);
|
|
|
|
if (HasRepeatedOperand)
|
|
|
|
ShiftMI.addReg(ShiftReg);
|
|
|
|
|
|
|
|
// CheckBB:
|
|
|
|
// ShiftReg = phi [%SrcReg, BB], [%ShiftReg2, LoopBB]
|
|
|
|
// ShiftAmt = phi [%N, BB], [%ShiftAmt2, LoopBB]
|
|
|
|
// DestReg = phi [%SrcReg, BB], [%ShiftReg, LoopBB]
|
2016-11-02 07:47:40 +01:00
|
|
|
// ShiftAmt2 = ShiftAmt - 1;
|
2020-08-23 14:17:29 +02:00
|
|
|
// if (ShiftAmt2 >= 0) goto LoopBB;
|
|
|
|
BuildMI(CheckBB, dl, TII.get(AVR::PHI), ShiftReg)
|
2016-11-02 07:47:40 +01:00
|
|
|
.addReg(SrcReg)
|
|
|
|
.addMBB(BB)
|
|
|
|
.addReg(ShiftReg2)
|
|
|
|
.addMBB(LoopBB);
|
2020-08-23 14:17:29 +02:00
|
|
|
BuildMI(CheckBB, dl, TII.get(AVR::PHI), ShiftAmtReg)
|
2016-11-02 07:47:40 +01:00
|
|
|
.addReg(ShiftAmtSrcReg)
|
|
|
|
.addMBB(BB)
|
|
|
|
.addReg(ShiftAmtReg2)
|
|
|
|
.addMBB(LoopBB);
|
2020-08-23 14:17:29 +02:00
|
|
|
BuildMI(CheckBB, dl, TII.get(AVR::PHI), DstReg)
|
2016-11-02 07:47:40 +01:00
|
|
|
.addReg(SrcReg)
|
|
|
|
.addMBB(BB)
|
|
|
|
.addReg(ShiftReg2)
|
|
|
|
.addMBB(LoopBB);
|
|
|
|
|
2020-08-23 14:17:29 +02:00
|
|
|
BuildMI(CheckBB, dl, TII.get(AVR::DECRd), ShiftAmtReg2)
|
|
|
|
.addReg(ShiftAmtReg);
|
|
|
|
BuildMI(CheckBB, dl, TII.get(AVR::BRPLk)).addMBB(LoopBB);
|
|
|
|
|
2016-11-02 07:47:40 +01:00
|
|
|
MI.eraseFromParent(); // The pseudo instruction is gone now.
|
|
|
|
return RemBB;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool isCopyMulResult(MachineBasicBlock::iterator const &I) {
|
|
|
|
if (I->getOpcode() == AVR::COPY) {
|
Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Partial reverts in:
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
X86FixupLEAs.cpp - Some functions return unsigned and arguably should be MCRegister
X86FrameLowering.cpp - Some functions return unsigned and arguably should be MCRegister
HexagonBitSimplify.cpp - Function takes BitTracker::RegisterRef which appears to be unsigned&
MachineVerifier.cpp - Ambiguous operator==() given MCRegister and const Register
PPCFastISel.cpp - No Register::operator-=()
PeepholeOptimizer.cpp - TargetInstrInfo::optimizeLoadInstr() takes an unsigned&
MachineTraceMetrics.cpp - MachineTraceMetrics lacks a suitable constructor
Manual fixups in:
ARMFastISel.cpp - ARMEmitLoad() now takes a Register& instead of unsigned&
HexagonSplitDouble.cpp - Ternary operator was ambiguous between unsigned/Register
HexagonConstExtenders.cpp - Has a local class named Register, used llvm::Register instead of Register.
PPCFastISel.cpp - PPCEmitLoad() now takes a Register& instead of unsigned&
Depends on D65919
Reviewers: arsenm, bogner, craig.topper, RKSimon
Reviewed By: arsenm
Subscribers: RKSimon, craig.topper, lenary, aemerson, wuzish, jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D65962
llvm-svn: 369041
2019-08-15 21:22:08 +02:00
|
|
|
Register SrcReg = I->getOperand(1).getReg();
|
2016-11-02 07:47:40 +01:00
|
|
|
return (SrcReg == AVR::R0 || SrcReg == AVR::R1);
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// The mul instructions wreak havock on our zero_reg R1. We need to clear it
|
|
|
|
// after the result has been evacuated. This is probably not the best way to do
|
|
|
|
// it, but it works for now.
|
|
|
|
MachineBasicBlock *AVRTargetLowering::insertMul(MachineInstr &MI,
|
|
|
|
MachineBasicBlock *BB) const {
|
2019-01-18 07:10:41 +01:00
|
|
|
const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
|
2016-11-02 07:47:40 +01:00
|
|
|
MachineBasicBlock::iterator I(MI);
|
|
|
|
++I; // in any case insert *after* the mul instruction
|
|
|
|
if (isCopyMulResult(I))
|
|
|
|
++I;
|
|
|
|
if (isCopyMulResult(I))
|
|
|
|
++I;
|
|
|
|
BuildMI(*BB, I, MI.getDebugLoc(), TII.get(AVR::EORRdRr), AVR::R1)
|
|
|
|
.addReg(AVR::R1)
|
|
|
|
.addReg(AVR::R1);
|
|
|
|
return BB;
|
|
|
|
}
|
|
|
|
|
|
|
|
MachineBasicBlock *
|
|
|
|
AVRTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
|
|
|
|
MachineBasicBlock *MBB) const {
|
|
|
|
int Opc = MI.getOpcode();
|
|
|
|
|
|
|
|
// Pseudo shift instructions with a non constant shift amount are expanded
|
|
|
|
// into a loop.
|
|
|
|
switch (Opc) {
|
|
|
|
case AVR::Lsl8:
|
|
|
|
case AVR::Lsl16:
|
|
|
|
case AVR::Lsr8:
|
|
|
|
case AVR::Lsr16:
|
2017-05-01 11:48:55 +02:00
|
|
|
case AVR::Rol8:
|
|
|
|
case AVR::Rol16:
|
|
|
|
case AVR::Ror8:
|
|
|
|
case AVR::Ror16:
|
2016-11-02 07:47:40 +01:00
|
|
|
case AVR::Asr8:
|
|
|
|
case AVR::Asr16:
|
|
|
|
return insertShift(MI, MBB);
|
|
|
|
case AVR::MULRdRr:
|
|
|
|
case AVR::MULSRdRr:
|
|
|
|
return insertMul(MI, MBB);
|
|
|
|
}
|
|
|
|
|
|
|
|
assert((Opc == AVR::Select16 || Opc == AVR::Select8) &&
|
|
|
|
"Unexpected instr type to insert");
|
|
|
|
|
|
|
|
const AVRInstrInfo &TII = (const AVRInstrInfo &)*MI.getParent()
|
|
|
|
->getParent()
|
|
|
|
->getSubtarget()
|
|
|
|
.getInstrInfo();
|
|
|
|
DebugLoc dl = MI.getDebugLoc();
|
|
|
|
|
|
|
|
// To "insert" a SELECT instruction, we insert the diamond
|
|
|
|
// control-flow pattern. The incoming instruction knows the
|
|
|
|
// destination vreg to set, the condition code register to branch
|
|
|
|
// on, the true/false values to select between, and a branch opcode
|
|
|
|
// to use.
|
|
|
|
|
|
|
|
MachineFunction *MF = MBB->getParent();
|
|
|
|
const BasicBlock *LLVM_BB = MBB->getBasicBlock();
|
[AVR] Insert unconditional branch when inserting MBBs between blocks with fallthrough
This updates the AVR Select8/Select16 expansion code so that, when
inserting the two basic blocks for true and false conditions, any
existing fallthrough on the previous block is preserved.
Prior to this patch, if the block before the Select pseudo fell through
to the subsequent block, two new basic blocks would be inserted at the
prior fallthrough point, changing the fallthrough destination.
The predecessor or successor lists were not updated, causing the
BranchFolding pass at -O1 and above the rearrange basic blocks, causing
an infinite loop. Not to mention the unconditional fallthrough to the
true block is incorrect in of itself.
This patch modifies the Select8/16 expansion so that, if inserting true
and false basic blocks at a fallthrough point, the implicit branch is
preserved by means of an explicit, unconditional branch to the previous
fallthrough destination.
Thanks to Carl Peto for reporting this bug.
This fixes avr-rust bug https://github.com/avr-rust/rust/issues/123.
llvm-svn: 351721
2019-01-21 05:32:02 +01:00
|
|
|
MachineBasicBlock *FallThrough = MBB->getFallThrough();
|
|
|
|
|
|
|
|
// If the current basic block falls through to another basic block,
|
|
|
|
// we must insert an unconditional branch to the fallthrough destination
|
|
|
|
// if we are to insert basic blocks at the prior fallthrough point.
|
|
|
|
if (FallThrough != nullptr) {
|
|
|
|
BuildMI(MBB, dl, TII.get(AVR::RJMPk)).addMBB(FallThrough);
|
|
|
|
}
|
|
|
|
|
2016-11-02 07:47:40 +01:00
|
|
|
MachineBasicBlock *trueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
|
|
|
|
MachineBasicBlock *falseMBB = MF->CreateMachineBasicBlock(LLVM_BB);
|
|
|
|
|
2017-05-13 02:22:34 +02:00
|
|
|
MachineFunction::iterator I;
|
|
|
|
for (I = MF->begin(); I != MF->end() && &(*I) != MBB; ++I);
|
|
|
|
if (I != MF->end()) ++I;
|
2016-11-02 07:47:40 +01:00
|
|
|
MF->insert(I, trueMBB);
|
|
|
|
MF->insert(I, falseMBB);
|
|
|
|
|
|
|
|
// Transfer remaining instructions and all successors of the current
|
|
|
|
// block to the block which will contain the Phi node for the
|
|
|
|
// select.
|
|
|
|
trueMBB->splice(trueMBB->begin(), MBB,
|
|
|
|
std::next(MachineBasicBlock::iterator(MI)), MBB->end());
|
|
|
|
trueMBB->transferSuccessorsAndUpdatePHIs(MBB);
|
|
|
|
|
|
|
|
AVRCC::CondCodes CC = (AVRCC::CondCodes)MI.getOperand(3).getImm();
|
|
|
|
BuildMI(MBB, dl, TII.getBrCond(CC)).addMBB(trueMBB);
|
|
|
|
BuildMI(MBB, dl, TII.get(AVR::RJMPk)).addMBB(falseMBB);
|
|
|
|
MBB->addSuccessor(falseMBB);
|
|
|
|
MBB->addSuccessor(trueMBB);
|
|
|
|
|
|
|
|
// Unconditionally flow back to the true block
|
|
|
|
BuildMI(falseMBB, dl, TII.get(AVR::RJMPk)).addMBB(trueMBB);
|
|
|
|
falseMBB->addSuccessor(trueMBB);
|
|
|
|
|
|
|
|
// Set up the Phi node to determine where we came from
|
|
|
|
BuildMI(*trueMBB, trueMBB->begin(), dl, TII.get(AVR::PHI), MI.getOperand(0).getReg())
|
|
|
|
.addReg(MI.getOperand(1).getReg())
|
|
|
|
.addMBB(MBB)
|
|
|
|
.addReg(MI.getOperand(2).getReg())
|
|
|
|
.addMBB(falseMBB) ;
|
|
|
|
|
|
|
|
MI.eraseFromParent(); // The pseudo instruction is gone now.
|
|
|
|
return trueMBB;
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Inline Asm Support
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
AVRTargetLowering::ConstraintType
|
|
|
|
AVRTargetLowering::getConstraintType(StringRef Constraint) const {
|
|
|
|
if (Constraint.size() == 1) {
|
|
|
|
// See http://www.nongnu.org/avr-libc/user-manual/inline_asm.html
|
|
|
|
switch (Constraint[0]) {
|
Emit diagnostic if an inline asm constraint requires an immediate
Summary:
An inline asm call can result in an immediate after inlining. Therefore emit a
diagnostic here if constraint requires an immediate but one isn't supplied.
Reviewers: joerg, mgorny, efriedma, rsmith
Reviewed By: joerg
Subscribers: asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, zzheng, edward-jones, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, s.egerton, MaskRay, jyknight, dylanmckay, javed.absar, fedor.sergeev, jrtc27, Jim, krytarowski, eraman, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D60942
llvm-svn: 367750
2019-08-03 07:52:47 +02:00
|
|
|
default:
|
|
|
|
break;
|
2016-11-02 07:47:40 +01:00
|
|
|
case 'a': // Simple upper registers
|
|
|
|
case 'b': // Base pointer registers pairs
|
|
|
|
case 'd': // Upper register
|
|
|
|
case 'l': // Lower registers
|
|
|
|
case 'e': // Pointer register pairs
|
|
|
|
case 'q': // Stack pointer register
|
|
|
|
case 'r': // Any register
|
|
|
|
case 'w': // Special upper register pairs
|
|
|
|
return C_RegisterClass;
|
|
|
|
case 't': // Temporary register
|
|
|
|
case 'x': case 'X': // Pointer register pair X
|
|
|
|
case 'y': case 'Y': // Pointer register pair Y
|
|
|
|
case 'z': case 'Z': // Pointer register pair Z
|
|
|
|
return C_Register;
|
|
|
|
case 'Q': // A memory address based on Y or Z pointer with displacement.
|
|
|
|
return C_Memory;
|
|
|
|
case 'G': // Floating point constant
|
|
|
|
case 'I': // 6-bit positive integer constant
|
|
|
|
case 'J': // 6-bit negative integer constant
|
|
|
|
case 'K': // Integer constant (Range: 2)
|
|
|
|
case 'L': // Integer constant (Range: 0)
|
|
|
|
case 'M': // 8-bit integer constant
|
|
|
|
case 'N': // Integer constant (Range: -1)
|
|
|
|
case 'O': // Integer constant (Range: 8, 16, 24)
|
|
|
|
case 'P': // Integer constant (Range: 1)
|
|
|
|
case 'R': // Integer constant (Range: -6 to 5)x
|
Emit diagnostic if an inline asm constraint requires an immediate
Summary:
An inline asm call can result in an immediate after inlining. Therefore emit a
diagnostic here if constraint requires an immediate but one isn't supplied.
Reviewers: joerg, mgorny, efriedma, rsmith
Reviewed By: joerg
Subscribers: asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, zzheng, edward-jones, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, s.egerton, MaskRay, jyknight, dylanmckay, javed.absar, fedor.sergeev, jrtc27, Jim, krytarowski, eraman, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D60942
llvm-svn: 367750
2019-08-03 07:52:47 +02:00
|
|
|
return C_Immediate;
|
2016-11-02 07:47:40 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return TargetLowering::getConstraintType(Constraint);
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned
|
|
|
|
AVRTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
|
|
|
|
// Not sure if this is actually the right thing to do, but we got to do
|
|
|
|
// *something* [agnat]
|
|
|
|
switch (ConstraintCode[0]) {
|
|
|
|
case 'Q':
|
|
|
|
return InlineAsm::Constraint_Q;
|
|
|
|
}
|
|
|
|
return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
|
|
|
|
}
|
|
|
|
|
|
|
|
AVRTargetLowering::ConstraintWeight
|
|
|
|
AVRTargetLowering::getSingleConstraintMatchWeight(
|
|
|
|
AsmOperandInfo &info, const char *constraint) const {
|
|
|
|
ConstraintWeight weight = CW_Invalid;
|
|
|
|
Value *CallOperandVal = info.CallOperandVal;
|
|
|
|
|
|
|
|
// If we don't have a value, we can't do a match,
|
|
|
|
// but allow it at the lowest weight.
|
|
|
|
// (this behaviour has been copied from the ARM backend)
|
|
|
|
if (!CallOperandVal) {
|
|
|
|
return CW_Default;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Look at the constraint type.
|
|
|
|
switch (*constraint) {
|
|
|
|
default:
|
|
|
|
weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
|
|
|
|
break;
|
|
|
|
case 'd':
|
|
|
|
case 'r':
|
|
|
|
case 'l':
|
|
|
|
weight = CW_Register;
|
|
|
|
break;
|
|
|
|
case 'a':
|
|
|
|
case 'b':
|
|
|
|
case 'e':
|
|
|
|
case 'q':
|
|
|
|
case 't':
|
|
|
|
case 'w':
|
|
|
|
case 'x': case 'X':
|
|
|
|
case 'y': case 'Y':
|
|
|
|
case 'z': case 'Z':
|
|
|
|
weight = CW_SpecificReg;
|
|
|
|
break;
|
|
|
|
case 'G':
|
|
|
|
if (const ConstantFP *C = dyn_cast<ConstantFP>(CallOperandVal)) {
|
|
|
|
if (C->isZero()) {
|
|
|
|
weight = CW_Constant;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'I':
|
|
|
|
if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
|
|
|
|
if (isUInt<6>(C->getZExtValue())) {
|
|
|
|
weight = CW_Constant;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'J':
|
|
|
|
if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
|
|
|
|
if ((C->getSExtValue() >= -63) && (C->getSExtValue() <= 0)) {
|
|
|
|
weight = CW_Constant;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'K':
|
|
|
|
if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
|
|
|
|
if (C->getZExtValue() == 2) {
|
|
|
|
weight = CW_Constant;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'L':
|
|
|
|
if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
|
|
|
|
if (C->getZExtValue() == 0) {
|
|
|
|
weight = CW_Constant;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'M':
|
|
|
|
if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
|
|
|
|
if (isUInt<8>(C->getZExtValue())) {
|
|
|
|
weight = CW_Constant;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'N':
|
|
|
|
if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
|
|
|
|
if (C->getSExtValue() == -1) {
|
|
|
|
weight = CW_Constant;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'O':
|
|
|
|
if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
|
|
|
|
if ((C->getZExtValue() == 8) || (C->getZExtValue() == 16) ||
|
|
|
|
(C->getZExtValue() == 24)) {
|
|
|
|
weight = CW_Constant;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'P':
|
|
|
|
if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
|
|
|
|
if (C->getZExtValue() == 1) {
|
|
|
|
weight = CW_Constant;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'R':
|
|
|
|
if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
|
|
|
|
if ((C->getSExtValue() >= -6) && (C->getSExtValue() <= 5)) {
|
|
|
|
weight = CW_Constant;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'Q':
|
|
|
|
weight = CW_Memory;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return weight;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::pair<unsigned, const TargetRegisterClass *>
|
|
|
|
AVRTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
|
|
|
|
StringRef Constraint,
|
|
|
|
MVT VT) const {
|
|
|
|
// We only support i8 and i16.
|
|
|
|
//
|
|
|
|
//:FIXME: remove this assert for now since it gets sometimes executed
|
|
|
|
// assert((VT == MVT::i16 || VT == MVT::i8) && "Wrong operand type.");
|
|
|
|
|
|
|
|
if (Constraint.size() == 1) {
|
|
|
|
switch (Constraint[0]) {
|
|
|
|
case 'a': // Simple upper registers r16..r23.
|
|
|
|
return std::make_pair(0U, &AVR::LD8loRegClass);
|
|
|
|
case 'b': // Base pointer registers: y, z.
|
|
|
|
return std::make_pair(0U, &AVR::PTRDISPREGSRegClass);
|
|
|
|
case 'd': // Upper registers r16..r31.
|
|
|
|
return std::make_pair(0U, &AVR::LD8RegClass);
|
|
|
|
case 'l': // Lower registers r0..r15.
|
|
|
|
return std::make_pair(0U, &AVR::GPR8loRegClass);
|
|
|
|
case 'e': // Pointer register pairs: x, y, z.
|
|
|
|
return std::make_pair(0U, &AVR::PTRREGSRegClass);
|
|
|
|
case 'q': // Stack pointer register: SPH:SPL.
|
|
|
|
return std::make_pair(0U, &AVR::GPRSPRegClass);
|
|
|
|
case 'r': // Any register: r0..r31.
|
|
|
|
if (VT == MVT::i8)
|
|
|
|
return std::make_pair(0U, &AVR::GPR8RegClass);
|
|
|
|
|
|
|
|
return std::make_pair(0U, &AVR::DREGSRegClass);
|
|
|
|
case 't': // Temporary register: r0.
|
|
|
|
return std::make_pair(unsigned(AVR::R0), &AVR::GPR8RegClass);
|
|
|
|
case 'w': // Special upper register pairs: r24, r26, r28, r30.
|
|
|
|
return std::make_pair(0U, &AVR::IWREGSRegClass);
|
|
|
|
case 'x': // Pointer register pair X: r27:r26.
|
|
|
|
case 'X':
|
|
|
|
return std::make_pair(unsigned(AVR::R27R26), &AVR::PTRREGSRegClass);
|
|
|
|
case 'y': // Pointer register pair Y: r29:r28.
|
|
|
|
case 'Y':
|
|
|
|
return std::make_pair(unsigned(AVR::R29R28), &AVR::PTRREGSRegClass);
|
|
|
|
case 'z': // Pointer register pair Z: r31:r30.
|
|
|
|
case 'Z':
|
|
|
|
return std::make_pair(unsigned(AVR::R31R30), &AVR::PTRREGSRegClass);
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-18 07:10:41 +01:00
|
|
|
return TargetLowering::getRegForInlineAsmConstraint(
|
|
|
|
Subtarget.getRegisterInfo(), Constraint, VT);
|
2016-11-02 07:47:40 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void AVRTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
|
|
|
|
std::string &Constraint,
|
|
|
|
std::vector<SDValue> &Ops,
|
|
|
|
SelectionDAG &DAG) const {
|
|
|
|
SDValue Result(0, 0);
|
|
|
|
SDLoc DL(Op);
|
|
|
|
EVT Ty = Op.getValueType();
|
|
|
|
|
|
|
|
// Currently only support length 1 constraints.
|
|
|
|
if (Constraint.length() != 1) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
char ConstraintLetter = Constraint[0];
|
|
|
|
switch (ConstraintLetter) {
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
// Deal with integers first:
|
|
|
|
case 'I':
|
|
|
|
case 'J':
|
|
|
|
case 'K':
|
|
|
|
case 'L':
|
|
|
|
case 'M':
|
|
|
|
case 'N':
|
|
|
|
case 'O':
|
|
|
|
case 'P':
|
|
|
|
case 'R': {
|
|
|
|
const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
|
|
|
|
if (!C) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
int64_t CVal64 = C->getSExtValue();
|
|
|
|
uint64_t CUVal64 = C->getZExtValue();
|
|
|
|
switch (ConstraintLetter) {
|
|
|
|
case 'I': // 0..63
|
|
|
|
if (!isUInt<6>(CUVal64))
|
|
|
|
return;
|
|
|
|
Result = DAG.getTargetConstant(CUVal64, DL, Ty);
|
|
|
|
break;
|
|
|
|
case 'J': // -63..0
|
|
|
|
if (CVal64 < -63 || CVal64 > 0)
|
|
|
|
return;
|
|
|
|
Result = DAG.getTargetConstant(CVal64, DL, Ty);
|
|
|
|
break;
|
|
|
|
case 'K': // 2
|
|
|
|
if (CUVal64 != 2)
|
|
|
|
return;
|
|
|
|
Result = DAG.getTargetConstant(CUVal64, DL, Ty);
|
|
|
|
break;
|
|
|
|
case 'L': // 0
|
|
|
|
if (CUVal64 != 0)
|
|
|
|
return;
|
|
|
|
Result = DAG.getTargetConstant(CUVal64, DL, Ty);
|
|
|
|
break;
|
|
|
|
case 'M': // 0..255
|
|
|
|
if (!isUInt<8>(CUVal64))
|
|
|
|
return;
|
|
|
|
// i8 type may be printed as a negative number,
|
|
|
|
// e.g. 254 would be printed as -2,
|
|
|
|
// so we force it to i16 at least.
|
|
|
|
if (Ty.getSimpleVT() == MVT::i8) {
|
|
|
|
Ty = MVT::i16;
|
|
|
|
}
|
|
|
|
Result = DAG.getTargetConstant(CUVal64, DL, Ty);
|
|
|
|
break;
|
|
|
|
case 'N': // -1
|
|
|
|
if (CVal64 != -1)
|
|
|
|
return;
|
|
|
|
Result = DAG.getTargetConstant(CVal64, DL, Ty);
|
|
|
|
break;
|
|
|
|
case 'O': // 8, 16, 24
|
|
|
|
if (CUVal64 != 8 && CUVal64 != 16 && CUVal64 != 24)
|
|
|
|
return;
|
|
|
|
Result = DAG.getTargetConstant(CUVal64, DL, Ty);
|
|
|
|
break;
|
|
|
|
case 'P': // 1
|
|
|
|
if (CUVal64 != 1)
|
|
|
|
return;
|
|
|
|
Result = DAG.getTargetConstant(CUVal64, DL, Ty);
|
|
|
|
break;
|
|
|
|
case 'R': // -6..5
|
|
|
|
if (CVal64 < -6 || CVal64 > 5)
|
|
|
|
return;
|
|
|
|
Result = DAG.getTargetConstant(CVal64, DL, Ty);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case 'G':
|
|
|
|
const ConstantFPSDNode *FC = dyn_cast<ConstantFPSDNode>(Op);
|
|
|
|
if (!FC || !FC->isZero())
|
|
|
|
return;
|
|
|
|
// Soften float to i8 0
|
|
|
|
Result = DAG.getTargetConstant(0, DL, MVT::i8);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Result.getNode()) {
|
|
|
|
Ops.push_back(Result);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
|
|
|
|
}
|
|
|
|
|
2020-01-10 00:27:07 +01:00
|
|
|
Register AVRTargetLowering::getRegisterByName(const char *RegName, LLT VT,
|
2019-10-01 03:44:39 +02:00
|
|
|
const MachineFunction &MF) const {
|
|
|
|
Register Reg;
|
2017-01-08 00:39:47 +01:00
|
|
|
|
2020-01-10 00:27:07 +01:00
|
|
|
if (VT == LLT::scalar(8)) {
|
2017-01-08 00:39:47 +01:00
|
|
|
Reg = StringSwitch<unsigned>(RegName)
|
|
|
|
.Case("r0", AVR::R0).Case("r1", AVR::R1).Case("r2", AVR::R2)
|
|
|
|
.Case("r3", AVR::R3).Case("r4", AVR::R4).Case("r5", AVR::R5)
|
|
|
|
.Case("r6", AVR::R6).Case("r7", AVR::R7).Case("r8", AVR::R8)
|
|
|
|
.Case("r9", AVR::R9).Case("r10", AVR::R10).Case("r11", AVR::R11)
|
|
|
|
.Case("r12", AVR::R12).Case("r13", AVR::R13).Case("r14", AVR::R14)
|
|
|
|
.Case("r15", AVR::R15).Case("r16", AVR::R16).Case("r17", AVR::R17)
|
|
|
|
.Case("r18", AVR::R18).Case("r19", AVR::R19).Case("r20", AVR::R20)
|
|
|
|
.Case("r21", AVR::R21).Case("r22", AVR::R22).Case("r23", AVR::R23)
|
|
|
|
.Case("r24", AVR::R24).Case("r25", AVR::R25).Case("r26", AVR::R26)
|
|
|
|
.Case("r27", AVR::R27).Case("r28", AVR::R28).Case("r29", AVR::R29)
|
|
|
|
.Case("r30", AVR::R30).Case("r31", AVR::R31)
|
|
|
|
.Case("X", AVR::R27R26).Case("Y", AVR::R29R28).Case("Z", AVR::R31R30)
|
|
|
|
.Default(0);
|
|
|
|
} else {
|
|
|
|
Reg = StringSwitch<unsigned>(RegName)
|
|
|
|
.Case("r0", AVR::R1R0).Case("r2", AVR::R3R2)
|
|
|
|
.Case("r4", AVR::R5R4).Case("r6", AVR::R7R6)
|
|
|
|
.Case("r8", AVR::R9R8).Case("r10", AVR::R11R10)
|
|
|
|
.Case("r12", AVR::R13R12).Case("r14", AVR::R15R14)
|
|
|
|
.Case("r16", AVR::R17R16).Case("r18", AVR::R19R18)
|
|
|
|
.Case("r20", AVR::R21R20).Case("r22", AVR::R23R22)
|
|
|
|
.Case("r24", AVR::R25R24).Case("r26", AVR::R27R26)
|
|
|
|
.Case("r28", AVR::R29R28).Case("r30", AVR::R31R30)
|
|
|
|
.Case("X", AVR::R27R26).Case("Y", AVR::R29R28).Case("Z", AVR::R31R30)
|
|
|
|
.Default(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Reg)
|
|
|
|
return Reg;
|
|
|
|
|
|
|
|
report_fatal_error("Invalid register name global variable");
|
|
|
|
}
|
|
|
|
|
2016-11-02 07:47:40 +01:00
|
|
|
} // end of namespace llvm
|