2017-06-20 00:43:19 +02:00
|
|
|
//===- HexagonSubtarget.h - Define Subtarget for the Hexagon ----*- C++ -*-===//
|
2011-12-12 22:14:40 +01:00
|
|
|
//
|
2019-01-19 09:50:56 +01:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2011-12-12 22:14:40 +01:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file declares the Hexagon specific subclass of TargetSubtarget.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2014-08-13 18:26:38 +02:00
|
|
|
#ifndef LLVM_LIB_TARGET_HEXAGON_HEXAGONSUBTARGET_H
|
|
|
|
#define LLVM_LIB_TARGET_HEXAGON_HEXAGONSUBTARGET_H
|
2011-12-12 22:14:40 +01:00
|
|
|
|
2020-01-17 23:29:40 +01:00
|
|
|
#include "HexagonArch.h"
|
2014-06-27 02:27:40 +02:00
|
|
|
#include "HexagonFrameLowering.h"
|
2017-06-20 00:43:19 +02:00
|
|
|
#include "HexagonISelLowering.h"
|
2017-10-18 19:45:22 +02:00
|
|
|
#include "HexagonInstrInfo.h"
|
2017-09-15 17:46:05 +02:00
|
|
|
#include "HexagonRegisterInfo.h"
|
2014-06-27 02:27:40 +02:00
|
|
|
#include "HexagonSelectionDAGInfo.h"
|
2017-06-20 00:43:19 +02:00
|
|
|
#include "llvm/ADT/SmallSet.h"
|
|
|
|
#include "llvm/ADT/StringRef.h"
|
|
|
|
#include "llvm/CodeGen/ScheduleDAGMutation.h"
|
2017-11-17 02:07:10 +01:00
|
|
|
#include "llvm/CodeGen/TargetSubtargetInfo.h"
|
2017-06-20 00:43:19 +02:00
|
|
|
#include "llvm/MC/MCInstrItineraries.h"
|
|
|
|
#include <memory>
|
2011-12-12 22:14:40 +01:00
|
|
|
#include <string>
|
2017-06-20 00:43:19 +02:00
|
|
|
#include <vector>
|
2011-12-12 22:14:40 +01:00
|
|
|
|
|
|
|
#define GET_SUBTARGETINFO_HEADER
|
|
|
|
#include "HexagonGenSubtargetInfo.inc"
|
|
|
|
|
|
|
|
namespace llvm {
|
|
|
|
|
2017-06-20 00:43:19 +02:00
|
|
|
class MachineInstr;
|
|
|
|
class SDep;
|
|
|
|
class SUnit;
|
|
|
|
class TargetMachine;
|
|
|
|
class Triple;
|
|
|
|
|
2011-12-12 22:14:40 +01:00
|
|
|
class HexagonSubtarget : public HexagonGenSubtargetInfo {
|
2013-11-19 01:57:56 +01:00
|
|
|
virtual void anchor();
|
2014-06-27 02:27:40 +02:00
|
|
|
|
2018-05-14 22:09:07 +02:00
|
|
|
bool UseHVX64BOps = false;
|
|
|
|
bool UseHVX128BOps = false;
|
|
|
|
|
2020-01-17 23:29:40 +01:00
|
|
|
bool UseAudioOps = false;
|
[Hexagon] Add a target feature to disable compound instructions
This affects the following instructions:
Tag: M4_mpyrr_addr Syntax: Ry32 = add(Ru32,mpyi(Ry32,Rs32))
Tag: M4_mpyri_addr_u2 Syntax: Rd32 = add(Ru32,mpyi(#u6:2,Rs32))
Tag: M4_mpyri_addr Syntax: Rd32 = add(Ru32,mpyi(Rs32,#u6))
Tag: M4_mpyri_addi Syntax: Rd32 = add(#u6,mpyi(Rs32,#U6))
Tag: M4_mpyrr_addi Syntax: Rd32 = add(#u6,mpyi(Rs32,Rt32))
Tag: S4_addaddi Syntax: Rd32 = add(Rs32,add(Ru32,#s6))
Tag: S4_subaddi Syntax: Rd32 = add(Rs32,sub(#s6,Ru32))
Tag: S4_or_andix Syntax: Rx32 = or(Ru32,and(Rx32,#s10))
Tag: S4_andi_asl_ri Syntax: Rx32 = and(#u8,asl(Rx32,#U5))
Tag: S4_ori_asl_ri Syntax: Rx32 = or(#u8,asl(Rx32,#U5))
Tag: S4_addi_asl_ri Syntax: Rx32 = add(#u8,asl(Rx32,#U5))
Tag: S4_subi_asl_ri Syntax: Rx32 = sub(#u8,asl(Rx32,#U5))
Tag: S4_andi_lsr_ri Syntax: Rx32 = and(#u8,lsr(Rx32,#U5))
Tag: S4_ori_lsr_ri Syntax: Rx32 = or(#u8,lsr(Rx32,#U5))
Tag: S4_addi_lsr_ri Syntax: Rx32 = add(#u8,lsr(Rx32,#U5))
Tag: S4_subi_lsr_ri Syntax: Rx32 = sub(#u8,lsr(Rx32,#U5))
2020-01-16 19:00:35 +01:00
|
|
|
bool UseCompound = false;
|
2018-05-14 22:09:07 +02:00
|
|
|
bool UseLongCalls = false;
|
|
|
|
bool UseMemops = false;
|
2018-03-12 18:47:46 +01:00
|
|
|
bool UsePackets = false;
|
|
|
|
bool UseNewValueJumps = false;
|
2018-05-14 22:41:04 +02:00
|
|
|
bool UseNewValueStores = false;
|
2018-05-14 23:01:56 +02:00
|
|
|
bool UseSmallData = false;
|
2020-01-17 23:29:40 +01:00
|
|
|
bool UseUnsafeMath = false;
|
2018-12-05 21:18:09 +01:00
|
|
|
bool UseZRegOps = false;
|
2011-12-12 22:14:40 +01:00
|
|
|
|
2020-01-17 16:17:38 +01:00
|
|
|
bool HasPreV65 = false;
|
2017-12-11 19:57:54 +01:00
|
|
|
bool HasMemNoShuf = false;
|
|
|
|
bool EnableDuplex = false;
|
2018-02-28 21:29:36 +01:00
|
|
|
bool ReservedR19 = false;
|
2018-11-09 19:16:24 +01:00
|
|
|
bool NoreturnStackElim = false;
|
2018-02-28 21:29:36 +01:00
|
|
|
|
2011-12-12 22:14:40 +01:00
|
|
|
public:
|
2017-10-18 19:45:22 +02:00
|
|
|
Hexagon::ArchEnum HexagonArchVersion;
|
2018-10-19 19:31:11 +02:00
|
|
|
Hexagon::ArchEnum HexagonHVXVersion = Hexagon::ArchEnum::NoArch;
|
2017-11-30 22:25:28 +01:00
|
|
|
CodeGenOpt::Level OptLevel;
|
2015-11-24 15:55:26 +01:00
|
|
|
/// True if the target should use Back-Skip-Back scheduling. This is the
|
|
|
|
/// default for V60.
|
|
|
|
bool UseBSBScheduling;
|
|
|
|
|
2017-08-28 18:24:22 +02:00
|
|
|
struct UsrOverflowMutation : public ScheduleDAGMutation {
|
2016-07-15 19:48:09 +02:00
|
|
|
void apply(ScheduleDAGInstrs *DAG) override;
|
|
|
|
};
|
2017-08-28 18:24:22 +02:00
|
|
|
struct HVXMemLatencyMutation : public ScheduleDAGMutation {
|
|
|
|
void apply(ScheduleDAGInstrs *DAG) override;
|
|
|
|
};
|
|
|
|
struct CallMutation : public ScheduleDAGMutation {
|
|
|
|
void apply(ScheduleDAGInstrs *DAG) override;
|
|
|
|
private:
|
|
|
|
bool shouldTFRICallBind(const HexagonInstrInfo &HII,
|
|
|
|
const SUnit &Inst1, const SUnit &Inst2) const;
|
|
|
|
};
|
2017-08-28 20:36:21 +02:00
|
|
|
struct BankConflictMutation : public ScheduleDAGMutation {
|
|
|
|
void apply(ScheduleDAGInstrs *DAG) override;
|
|
|
|
};
|
2016-07-15 19:48:09 +02:00
|
|
|
|
2014-06-27 02:27:40 +02:00
|
|
|
private:
|
2020-01-13 23:07:30 +01:00
|
|
|
enum HexagonProcFamilyEnum { Others, TinyCore };
|
|
|
|
|
2011-12-12 22:14:40 +01:00
|
|
|
std::string CPUString;
|
Add support for Linux/Musl ABI
Differential revision: https://reviews.llvm.org/D72701
The patch adds a new option ABI for Hexagon. It primary deals with
the way variable arguments are passed and is use in the Hexagon Linux Musl
environment.
If a callee function has a variable argument list, it must perform the
following operations to set up its function prologue:
1. Determine the number of registers which could have been used for passing
unnamed arguments. This can be calculated by counting the number of
registers used for passing named arguments. For example, if the callee
function is as follows:
int foo(int a, ...){ ... }
... then register R0 is used to access the argument ' a '. The registers
available for passing unnamed arguments are R1, R2, R3, R4, and R5.
2. Determine the number and size of the named arguments on the stack.
3. If the callee has named arguments on the stack, it should copy all of these
arguments to a location below the current position on the stack, and the
difference should be the size of the register-saved area plus padding
(if any is necessary).
The register-saved area constitutes all the registers that could have
been used to pass unnamed arguments. If the number of registers forming
the register-saved area is odd, it requires 4 bytes of padding; if the
number is even, no padding is required. This is done to ensure an 8-byte
alignment on the stack. For example, if the callee is as follows:
int foo(int a, ...){ ... }
... then the named arguments should be copied to the following location:
current_position - 5 (for R1-R5) * 4 (bytes) - 4 (bytes of padding)
If the callee is as follows:
int foo(int a, int b, ...){ ... }
... then the named arguments should be copied to the following location:
current_position - 4 (for R2-R5) * 4 (bytes) - 0 (bytes of padding)
4. After any named arguments have been copied, copy all the registers that
could have been used to pass unnamed arguments on the stack. If the number
of registers is odd, leave 4 bytes of padding and then start copying them
on the stack; if the number is even, no padding is required. This
constitutes the register-saved area. If padding is required, ensure
that the start location of padding is 8-byte aligned. If no padding is
required, ensure that the start location of the on-stack copy of the
first register which might have a variable argument is 8-byte aligned.
5. Decrement the stack pointer by the size of register saved area plus the
padding. For example, if the callee is as follows:
int foo(int a, ...){ ... } ;
... then the decrement value should be the following:
5 (for R1-R5) * 4 (bytes) + 4 (bytes of padding) = 24 bytes
The decrement should be performed before the allocframe instruction.
Increment the stack-pointer back by the same amount before returning
from the function.
2019-12-27 20:03:01 +01:00
|
|
|
Triple TargetTriple;
|
2020-01-13 23:07:30 +01:00
|
|
|
|
|
|
|
// The following objects can use the TargetTriple, so they must be
|
|
|
|
// declared after it.
|
|
|
|
HexagonProcFamilyEnum HexagonProcFamily = Others;
|
2014-06-27 02:27:40 +02:00
|
|
|
HexagonInstrInfo InstrInfo;
|
2017-09-15 17:46:05 +02:00
|
|
|
HexagonRegisterInfo RegInfo;
|
2014-06-27 02:27:40 +02:00
|
|
|
HexagonTargetLowering TLInfo;
|
|
|
|
HexagonSelectionDAGInfo TSInfo;
|
|
|
|
HexagonFrameLowering FrameLowering;
|
2011-12-12 22:14:40 +01:00
|
|
|
InstrItineraryData InstrItins;
|
2017-06-20 00:43:19 +02:00
|
|
|
|
2011-12-12 22:14:40 +01:00
|
|
|
public:
|
2015-06-10 14:11:26 +02:00
|
|
|
HexagonSubtarget(const Triple &TT, StringRef CPU, StringRef FS,
|
2014-06-27 02:27:40 +02:00
|
|
|
const TargetMachine &TM);
|
2011-12-12 22:14:40 +01:00
|
|
|
|
Add support for Linux/Musl ABI
Differential revision: https://reviews.llvm.org/D72701
The patch adds a new option ABI for Hexagon. It primary deals with
the way variable arguments are passed and is use in the Hexagon Linux Musl
environment.
If a callee function has a variable argument list, it must perform the
following operations to set up its function prologue:
1. Determine the number of registers which could have been used for passing
unnamed arguments. This can be calculated by counting the number of
registers used for passing named arguments. For example, if the callee
function is as follows:
int foo(int a, ...){ ... }
... then register R0 is used to access the argument ' a '. The registers
available for passing unnamed arguments are R1, R2, R3, R4, and R5.
2. Determine the number and size of the named arguments on the stack.
3. If the callee has named arguments on the stack, it should copy all of these
arguments to a location below the current position on the stack, and the
difference should be the size of the register-saved area plus padding
(if any is necessary).
The register-saved area constitutes all the registers that could have
been used to pass unnamed arguments. If the number of registers forming
the register-saved area is odd, it requires 4 bytes of padding; if the
number is even, no padding is required. This is done to ensure an 8-byte
alignment on the stack. For example, if the callee is as follows:
int foo(int a, ...){ ... }
... then the named arguments should be copied to the following location:
current_position - 5 (for R1-R5) * 4 (bytes) - 4 (bytes of padding)
If the callee is as follows:
int foo(int a, int b, ...){ ... }
... then the named arguments should be copied to the following location:
current_position - 4 (for R2-R5) * 4 (bytes) - 0 (bytes of padding)
4. After any named arguments have been copied, copy all the registers that
could have been used to pass unnamed arguments on the stack. If the number
of registers is odd, leave 4 bytes of padding and then start copying them
on the stack; if the number is even, no padding is required. This
constitutes the register-saved area. If padding is required, ensure
that the start location of padding is 8-byte aligned. If no padding is
required, ensure that the start location of the on-stack copy of the
first register which might have a variable argument is 8-byte aligned.
5. Decrement the stack pointer by the size of register saved area plus the
padding. For example, if the callee is as follows:
int foo(int a, ...){ ... } ;
... then the decrement value should be the following:
5 (for R1-R5) * 4 (bytes) + 4 (bytes of padding) = 24 bytes
The decrement should be performed before the allocframe instruction.
Increment the stack-pointer back by the same amount before returning
from the function.
2019-12-27 20:03:01 +01:00
|
|
|
const Triple &getTargetTriple() const { return TargetTriple; }
|
|
|
|
bool isEnvironmentMusl() const {
|
|
|
|
return TargetTriple.getEnvironment() == Triple::Musl;
|
|
|
|
}
|
|
|
|
|
2014-08-16 00:17:28 +02:00
|
|
|
/// getInstrItins - Return the instruction itineraries based on subtarget
|
2011-12-12 22:14:40 +01:00
|
|
|
/// selection.
|
2014-09-03 13:41:21 +02:00
|
|
|
const InstrItineraryData *getInstrItineraryData() const override {
|
2014-08-04 23:25:23 +02:00
|
|
|
return &InstrItins;
|
|
|
|
}
|
|
|
|
const HexagonInstrInfo *getInstrInfo() const override { return &InstrInfo; }
|
2014-09-03 13:41:21 +02:00
|
|
|
const HexagonRegisterInfo *getRegisterInfo() const override {
|
2017-09-15 17:46:05 +02:00
|
|
|
return &RegInfo;
|
2014-06-27 02:27:40 +02:00
|
|
|
}
|
2014-09-03 13:41:21 +02:00
|
|
|
const HexagonTargetLowering *getTargetLowering() const override {
|
|
|
|
return &TLInfo;
|
|
|
|
}
|
|
|
|
const HexagonFrameLowering *getFrameLowering() const override {
|
2014-06-27 02:27:40 +02:00
|
|
|
return &FrameLowering;
|
|
|
|
}
|
2014-09-03 13:41:21 +02:00
|
|
|
const HexagonSelectionDAGInfo *getSelectionDAGInfo() const override {
|
|
|
|
return &TSInfo;
|
|
|
|
}
|
2011-12-12 22:14:40 +01:00
|
|
|
|
2014-06-27 02:27:40 +02:00
|
|
|
HexagonSubtarget &initializeSubtargetDependencies(StringRef CPU,
|
|
|
|
StringRef FS);
|
2011-12-12 22:14:40 +01:00
|
|
|
|
|
|
|
/// ParseSubtargetFeatures - Parses features string setting specified
|
|
|
|
/// subtarget options. Definition of function is auto generated by tblgen.
|
|
|
|
void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
|
|
|
|
|
2018-06-20 15:56:09 +02:00
|
|
|
bool hasV5Ops() const {
|
2017-10-18 19:45:22 +02:00
|
|
|
return getHexagonArchVersion() >= Hexagon::ArchEnum::V5;
|
|
|
|
}
|
2018-06-20 15:56:09 +02:00
|
|
|
bool hasV5OpsOnly() const {
|
2017-10-18 19:45:22 +02:00
|
|
|
return getHexagonArchVersion() == Hexagon::ArchEnum::V5;
|
|
|
|
}
|
2018-06-20 15:56:09 +02:00
|
|
|
bool hasV55Ops() const {
|
2017-10-18 19:45:22 +02:00
|
|
|
return getHexagonArchVersion() >= Hexagon::ArchEnum::V55;
|
|
|
|
}
|
2018-06-20 15:56:09 +02:00
|
|
|
bool hasV55OpsOnly() const {
|
2017-10-18 19:45:22 +02:00
|
|
|
return getHexagonArchVersion() == Hexagon::ArchEnum::V55;
|
|
|
|
}
|
2018-06-20 15:56:09 +02:00
|
|
|
bool hasV60Ops() const {
|
2017-10-18 19:45:22 +02:00
|
|
|
return getHexagonArchVersion() >= Hexagon::ArchEnum::V60;
|
|
|
|
}
|
2018-06-20 15:56:09 +02:00
|
|
|
bool hasV60OpsOnly() const {
|
2017-10-18 19:45:22 +02:00
|
|
|
return getHexagonArchVersion() == Hexagon::ArchEnum::V60;
|
|
|
|
}
|
2018-06-20 15:56:09 +02:00
|
|
|
bool hasV62Ops() const {
|
2017-10-18 19:45:22 +02:00
|
|
|
return getHexagonArchVersion() >= Hexagon::ArchEnum::V62;
|
|
|
|
}
|
2018-06-20 15:56:09 +02:00
|
|
|
bool hasV62OpsOnly() const {
|
2017-10-18 19:45:22 +02:00
|
|
|
return getHexagonArchVersion() == Hexagon::ArchEnum::V62;
|
|
|
|
}
|
2018-06-20 15:56:09 +02:00
|
|
|
bool hasV65Ops() const {
|
2017-12-11 19:57:54 +01:00
|
|
|
return getHexagonArchVersion() >= Hexagon::ArchEnum::V65;
|
|
|
|
}
|
2018-06-20 15:56:09 +02:00
|
|
|
bool hasV65OpsOnly() const {
|
2017-12-11 19:57:54 +01:00
|
|
|
return getHexagonArchVersion() == Hexagon::ArchEnum::V65;
|
|
|
|
}
|
2018-12-05 21:18:09 +01:00
|
|
|
bool hasV66Ops() const {
|
|
|
|
return getHexagonArchVersion() >= Hexagon::ArchEnum::V66;
|
|
|
|
}
|
|
|
|
bool hasV66OpsOnly() const {
|
|
|
|
return getHexagonArchVersion() == Hexagon::ArchEnum::V66;
|
|
|
|
}
|
2020-01-17 23:29:40 +01:00
|
|
|
bool hasV67Ops() const {
|
|
|
|
return getHexagonArchVersion() >= Hexagon::ArchEnum::V67;
|
|
|
|
}
|
|
|
|
bool hasV67OpsOnly() const {
|
|
|
|
return getHexagonArchVersion() == Hexagon::ArchEnum::V67;
|
|
|
|
}
|
2018-05-14 23:01:56 +02:00
|
|
|
|
2020-01-17 23:29:40 +01:00
|
|
|
bool useAudioOps() const { return UseAudioOps; }
|
[Hexagon] Add a target feature to disable compound instructions
This affects the following instructions:
Tag: M4_mpyrr_addr Syntax: Ry32 = add(Ru32,mpyi(Ry32,Rs32))
Tag: M4_mpyri_addr_u2 Syntax: Rd32 = add(Ru32,mpyi(#u6:2,Rs32))
Tag: M4_mpyri_addr Syntax: Rd32 = add(Ru32,mpyi(Rs32,#u6))
Tag: M4_mpyri_addi Syntax: Rd32 = add(#u6,mpyi(Rs32,#U6))
Tag: M4_mpyrr_addi Syntax: Rd32 = add(#u6,mpyi(Rs32,Rt32))
Tag: S4_addaddi Syntax: Rd32 = add(Rs32,add(Ru32,#s6))
Tag: S4_subaddi Syntax: Rd32 = add(Rs32,sub(#s6,Ru32))
Tag: S4_or_andix Syntax: Rx32 = or(Ru32,and(Rx32,#s10))
Tag: S4_andi_asl_ri Syntax: Rx32 = and(#u8,asl(Rx32,#U5))
Tag: S4_ori_asl_ri Syntax: Rx32 = or(#u8,asl(Rx32,#U5))
Tag: S4_addi_asl_ri Syntax: Rx32 = add(#u8,asl(Rx32,#U5))
Tag: S4_subi_asl_ri Syntax: Rx32 = sub(#u8,asl(Rx32,#U5))
Tag: S4_andi_lsr_ri Syntax: Rx32 = and(#u8,lsr(Rx32,#U5))
Tag: S4_ori_lsr_ri Syntax: Rx32 = or(#u8,lsr(Rx32,#U5))
Tag: S4_addi_lsr_ri Syntax: Rx32 = add(#u8,lsr(Rx32,#U5))
Tag: S4_subi_lsr_ri Syntax: Rx32 = sub(#u8,lsr(Rx32,#U5))
2020-01-16 19:00:35 +01:00
|
|
|
bool useCompound() const { return UseCompound; }
|
2018-05-14 23:01:56 +02:00
|
|
|
bool useLongCalls() const { return UseLongCalls; }
|
2018-05-14 22:09:07 +02:00
|
|
|
bool useMemops() const { return UseMemops; }
|
|
|
|
bool usePackets() const { return UsePackets; }
|
|
|
|
bool useNewValueJumps() const { return UseNewValueJumps; }
|
2018-05-14 22:41:04 +02:00
|
|
|
bool useNewValueStores() const { return UseNewValueStores; }
|
2018-05-14 23:01:56 +02:00
|
|
|
bool useSmallData() const { return UseSmallData; }
|
2020-01-17 23:29:40 +01:00
|
|
|
bool useUnsafeMath() const { return UseUnsafeMath; }
|
2018-12-05 21:18:09 +01:00
|
|
|
bool useZRegOps() const { return UseZRegOps; }
|
2017-02-11 00:46:45 +01:00
|
|
|
|
2020-01-13 23:07:30 +01:00
|
|
|
bool isTinyCore() const { return HexagonProcFamily == TinyCore; }
|
|
|
|
bool isTinyCoreWithDuplex() const { return isTinyCore() && EnableDuplex; }
|
|
|
|
|
2018-10-19 19:31:11 +02:00
|
|
|
bool useHVXOps() const {
|
|
|
|
return HexagonHVXVersion > Hexagon::ArchEnum::NoArch;
|
|
|
|
}
|
2020-01-17 16:40:26 +01:00
|
|
|
bool useHVXV60Ops() const {
|
|
|
|
return HexagonHVXVersion >= Hexagon::ArchEnum::V60;
|
|
|
|
}
|
|
|
|
bool useHVXV62Ops() const {
|
|
|
|
return HexagonHVXVersion >= Hexagon::ArchEnum::V62;
|
|
|
|
}
|
|
|
|
bool useHVXV65Ops() const {
|
|
|
|
return HexagonHVXVersion >= Hexagon::ArchEnum::V65;
|
|
|
|
}
|
|
|
|
bool useHVXV66Ops() const {
|
|
|
|
return HexagonHVXVersion >= Hexagon::ArchEnum::V66;
|
|
|
|
}
|
2020-01-17 23:29:40 +01:00
|
|
|
bool useHVXV67Ops() const {
|
|
|
|
return HexagonHVXVersion >= Hexagon::ArchEnum::V67;
|
|
|
|
}
|
2017-10-18 20:07:07 +02:00
|
|
|
bool useHVX128BOps() const { return useHVXOps() && UseHVX128BOps; }
|
|
|
|
bool useHVX64BOps() const { return useHVXOps() && UseHVX64BOps; }
|
2018-03-12 18:47:46 +01:00
|
|
|
|
2017-12-11 19:57:54 +01:00
|
|
|
bool hasMemNoShuf() const { return HasMemNoShuf; }
|
2018-02-28 21:29:36 +01:00
|
|
|
bool hasReservedR19() const { return ReservedR19; }
|
2017-05-06 00:13:57 +02:00
|
|
|
bool usePredicatedCalls() const;
|
2015-11-24 15:55:26 +01:00
|
|
|
|
2018-11-09 19:16:24 +01:00
|
|
|
bool noreturnStackElim() const { return NoreturnStackElim; }
|
|
|
|
|
2015-11-24 15:55:26 +01:00
|
|
|
bool useBSBScheduling() const { return UseBSBScheduling; }
|
2015-03-11 23:56:10 +01:00
|
|
|
bool enableMachineScheduler() const override;
|
2017-06-20 00:43:19 +02:00
|
|
|
|
2015-03-11 23:56:10 +01:00
|
|
|
// Always use the TargetLowering default scheduler.
|
|
|
|
// FIXME: This will use the vliw scheduler which is probably just hurting
|
|
|
|
// compiler time and will be removed eventually anyway.
|
|
|
|
bool enableMachineSchedDefaultSched() const override { return false; }
|
2015-02-09 22:56:37 +01:00
|
|
|
|
2020-01-17 23:29:40 +01:00
|
|
|
// For use with PostRAScheduling: get the anti-dependence breaking that should
|
|
|
|
// be performed before post-RA scheduling.
|
2016-05-26 17:38:50 +02:00
|
|
|
AntiDepBreakMode getAntiDepBreakMode() const override { return ANTIDEP_ALL; }
|
2020-01-17 23:29:40 +01:00
|
|
|
/// True if the subtarget should run a scheduler after register
|
|
|
|
/// allocation.
|
2016-05-26 21:44:28 +02:00
|
|
|
bool enablePostRAScheduler() const override { return true; }
|
2016-05-26 17:38:50 +02:00
|
|
|
|
2016-05-28 04:02:51 +02:00
|
|
|
bool enableSubRegLiveness() const override;
|
|
|
|
|
2011-12-12 22:14:40 +01:00
|
|
|
const std::string &getCPUString () const { return CPUString; }
|
|
|
|
|
2017-10-18 19:45:22 +02:00
|
|
|
const Hexagon::ArchEnum &getHexagonArchVersion() const {
|
2015-12-14 16:03:54 +01:00
|
|
|
return HexagonArchVersion;
|
2011-12-12 22:14:40 +01:00
|
|
|
}
|
2016-07-15 19:48:09 +02:00
|
|
|
|
|
|
|
void getPostRAMutations(
|
|
|
|
std::vector<std::unique_ptr<ScheduleDAGMutation>> &Mutations)
|
|
|
|
const override;
|
2016-07-15 23:34:02 +02:00
|
|
|
|
2016-12-22 20:44:55 +01:00
|
|
|
void getSMSMutations(
|
|
|
|
std::vector<std::unique_ptr<ScheduleDAGMutation>> &Mutations)
|
|
|
|
const override;
|
|
|
|
|
2018-05-01 17:54:18 +02:00
|
|
|
/// Enable use of alias analysis during code generation (during MI
|
2017-11-30 22:25:28 +01:00
|
|
|
/// scheduling, DAGCombine, etc.).
|
|
|
|
bool useAA() const override;
|
|
|
|
|
2018-05-01 17:54:18 +02:00
|
|
|
/// Perform target specific adjustments to the latency of a schedule
|
2016-07-15 23:34:02 +02:00
|
|
|
/// dependency.
|
2020-03-31 12:57:51 +02:00
|
|
|
void adjustSchedDependency(SUnit *Def, int DefOpIdx, SUnit *Use, int UseOpIdx,
|
|
|
|
SDep &Dep) const override;
|
2016-07-15 23:34:02 +02:00
|
|
|
|
2017-11-21 23:13:16 +01:00
|
|
|
unsigned getVectorLength() const {
|
|
|
|
assert(useHVXOps());
|
|
|
|
if (useHVX64BOps())
|
|
|
|
return 64;
|
|
|
|
if (useHVX128BOps())
|
|
|
|
return 128;
|
|
|
|
llvm_unreachable("Invalid HVX vector length settings");
|
|
|
|
}
|
|
|
|
|
2017-12-20 21:49:43 +01:00
|
|
|
ArrayRef<MVT> getHVXElementTypes() const {
|
|
|
|
static MVT Types[] = { MVT::i8, MVT::i16, MVT::i32 };
|
2017-12-21 01:28:34 +01:00
|
|
|
return makeArrayRef(Types);
|
2017-12-20 21:49:43 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
bool isHVXVectorType(MVT VecTy, bool IncludeBool = false) const {
|
2019-09-17 12:19:23 +02:00
|
|
|
if (!VecTy.isVector() || !useHVXOps() || VecTy.isScalableVector())
|
2017-11-27 19:12:16 +01:00
|
|
|
return false;
|
2017-12-20 21:49:43 +01:00
|
|
|
MVT ElemTy = VecTy.getVectorElementType();
|
|
|
|
if (!IncludeBool && ElemTy == MVT::i1)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned HwLen = getVectorLength();
|
|
|
|
unsigned NumElems = VecTy.getVectorNumElements();
|
|
|
|
ArrayRef<MVT> ElemTypes = getHVXElementTypes();
|
|
|
|
|
|
|
|
if (IncludeBool && ElemTy == MVT::i1) {
|
|
|
|
// Boolean HVX vector types are formed from regular HVX vector types
|
|
|
|
// by replacing the element type with i1.
|
|
|
|
for (MVT T : ElemTypes)
|
|
|
|
if (NumElems * T.getSizeInBits() == 8*HwLen)
|
|
|
|
return true;
|
2017-11-27 19:12:16 +01:00
|
|
|
return false;
|
2017-12-20 21:49:43 +01:00
|
|
|
}
|
|
|
|
|
2017-11-27 19:12:16 +01:00
|
|
|
unsigned VecWidth = VecTy.getSizeInBits();
|
2017-12-20 21:49:43 +01:00
|
|
|
if (VecWidth != 8*HwLen && VecWidth != 16*HwLen)
|
|
|
|
return false;
|
|
|
|
return llvm::any_of(ElemTypes, [ElemTy] (MVT T) { return ElemTy == T; });
|
2017-11-27 19:12:16 +01:00
|
|
|
}
|
|
|
|
|
2018-03-07 18:27:18 +01:00
|
|
|
unsigned getTypeAlignment(MVT Ty) const {
|
|
|
|
if (isHVXVectorType(Ty, true))
|
|
|
|
return getVectorLength();
|
|
|
|
return Ty.getSizeInBits() / 8;
|
|
|
|
}
|
|
|
|
|
2016-07-22 16:22:43 +02:00
|
|
|
unsigned getL1CacheLineSize() const;
|
|
|
|
unsigned getL1PrefetchDistance() const;
|
|
|
|
|
2016-07-15 23:34:02 +02:00
|
|
|
private:
|
|
|
|
// Helper function responsible for increasing the latency only.
|
2016-07-29 23:49:42 +02:00
|
|
|
void updateLatency(MachineInstr &SrcInst, MachineInstr &DstInst, SDep &Dep)
|
2016-07-18 16:23:10 +02:00
|
|
|
const;
|
2017-05-03 22:10:36 +02:00
|
|
|
void restoreLatency(SUnit *Src, SUnit *Dst) const;
|
|
|
|
void changeLatency(SUnit *Src, SUnit *Dst, unsigned Lat) const;
|
|
|
|
bool isBestZeroLatency(SUnit *Src, SUnit *Dst, const HexagonInstrInfo *TII,
|
|
|
|
SmallSet<SUnit*, 4> &ExclSrc, SmallSet<SUnit*, 4> &ExclDst) const;
|
2011-12-12 22:14:40 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
} // end namespace llvm
|
|
|
|
|
2017-06-20 00:43:19 +02:00
|
|
|
#endif // LLVM_LIB_TARGET_HEXAGON_HEXAGONSUBTARGET_H
|