1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-19 11:02:59 +02:00

[SVE][CodeGen] Fix TypeSize/ElementCount related warnings in sve-split-load.ll

I have fixed up a number of warnings resulting from TypeSize -> uint64_t
casts and calling getVectorNumElements() on scalable vector types. I
think most of the changes are fairly trivial except for those in
DAGTypeLegalizer::SplitVecRes_MLOAD I've tried to ensure we create
the MachineMemoryOperands in a sensible way for scalable vectors.

I have added a CHECK line to the following test:

  CodeGen/AArch64/sve-split-load.ll

that ensures no new warnings are added.

Differential Revision: https://reviews.llvm.org/D86697
This commit is contained in:
David Sherwood 2020-08-26 10:34:31 +01:00
parent c8b43e910b
commit c4d572ac0e
6 changed files with 40 additions and 21 deletions

View File

@ -49,6 +49,13 @@ public:
return { Min / RHS, Scalable };
}
friend ElementCount operator-(const ElementCount &LHS,
const ElementCount &RHS) {
assert(LHS.Scalable == RHS.Scalable &&
"Arithmetic using mixed scalable and fixed types");
return {LHS.Min - RHS.Min, LHS.Scalable};
}
bool operator==(const ElementCount& RHS) const {
return Min == RHS.Min && Scalable == RHS.Scalable;
}

View File

@ -718,7 +718,7 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
LLVM_DEBUG(dbgs() << "Legalizing extending load operation\n");
EVT SrcVT = LD->getMemoryVT();
unsigned SrcWidth = SrcVT.getSizeInBits();
TypeSize SrcWidth = SrcVT.getSizeInBits();
MachineMemOperand::Flags MMOFlags = LD->getMemOperand()->getFlags();
AAMDNodes AAInfo = LD->getAAInfo();
@ -764,14 +764,15 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
Value = Result;
Chain = Ch;
} else if (SrcWidth & (SrcWidth - 1)) {
} else if (!isPowerOf2_64(SrcWidth.getKnownMinSize())) {
// If not loading a power-of-2 number of bits, expand as two loads.
assert(!SrcVT.isVector() && "Unsupported extload!");
unsigned LogSrcWidth = Log2_32(SrcWidth);
unsigned SrcWidthBits = SrcWidth.getFixedSize();
unsigned LogSrcWidth = Log2_32(SrcWidthBits);
assert(LogSrcWidth < 32);
unsigned RoundWidth = 1 << LogSrcWidth;
assert(RoundWidth < SrcWidth);
unsigned ExtraWidth = SrcWidth - RoundWidth;
assert(RoundWidth < SrcWidthBits);
unsigned ExtraWidth = SrcWidthBits - RoundWidth;
assert(ExtraWidth < RoundWidth);
assert(!(RoundWidth % 8) && !(ExtraWidth % 8) &&
"Load size not an integral number of bytes!");

View File

@ -1638,9 +1638,10 @@ void DAGTypeLegalizer::SplitVecRes_MLOAD(MaskedLoadSDNode *MLD,
else
std::tie(PassThruLo, PassThruHi) = DAG.SplitVector(PassThru, dl);
unsigned LoSize = MemoryLocation::getSizeOrUnknown(LoMemVT.getStoreSize());
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
MLD->getPointerInfo(), MachineMemOperand::MOLoad, LoMemVT.getStoreSize(),
Alignment, MLD->getAAInfo(), MLD->getRanges());
MLD->getPointerInfo(), MachineMemOperand::MOLoad, LoSize, Alignment,
MLD->getAAInfo(), MLD->getRanges());
Lo = DAG.getMaskedLoad(LoVT, dl, Ch, Ptr, Offset, MaskLo, PassThruLo, LoMemVT,
MMO, MLD->getAddressingMode(), ExtType,
@ -1654,12 +1655,18 @@ void DAGTypeLegalizer::SplitVecRes_MLOAD(MaskedLoadSDNode *MLD,
// Generate hi masked load.
Ptr = TLI.IncrementMemoryAddress(Ptr, MaskLo, dl, LoMemVT, DAG,
MLD->isExpandingLoad());
unsigned HiOffset = LoMemVT.getStoreSize();
unsigned HiSize = MemoryLocation::getSizeOrUnknown(HiMemVT.getStoreSize());
MachinePointerInfo MPI;
if (LoMemVT.isScalableVector())
MPI = MachinePointerInfo(MLD->getPointerInfo().getAddrSpace());
else
MPI = MLD->getPointerInfo().getWithOffset(
LoMemVT.getStoreSize().getFixedSize());
MMO = DAG.getMachineFunction().getMachineMemOperand(
MLD->getPointerInfo().getWithOffset(HiOffset),
MachineMemOperand::MOLoad, HiMemVT.getStoreSize(), Alignment,
MLD->getAAInfo(), MLD->getRanges());
MPI, MachineMemOperand::MOLoad, HiSize, Alignment, MLD->getAAInfo(),
MLD->getRanges());
Hi = DAG.getMaskedLoad(HiVT, dl, Ch, Ptr, Offset, MaskHi, PassThruHi,
HiMemVT, MMO, MLD->getAddressingMode(), ExtType,

View File

@ -6979,7 +6979,7 @@ SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
assert(VT.isVector() == MemVT.isVector() &&
"Cannot use an ext load to convert to or from a vector!");
assert((!VT.isVector() ||
VT.getVectorNumElements() == MemVT.getVectorNumElements()) &&
VT.getVectorElementCount() == MemVT.getVectorElementCount()) &&
"Cannot use an ext load to change the number of vector elements!");
}
@ -9637,24 +9637,24 @@ std::pair<EVT, EVT>
SelectionDAG::GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT,
bool *HiIsEmpty) const {
EVT EltTp = VT.getVectorElementType();
bool IsScalable = VT.isScalableVector();
// Examples:
// custom VL=8 with enveloping VL=8/8 yields 8/0 (hi empty)
// custom VL=9 with enveloping VL=8/8 yields 8/1
// custom VL=10 with enveloping VL=8/8 yields 8/2
// etc.
unsigned VTNumElts = VT.getVectorNumElements();
unsigned EnvNumElts = EnvVT.getVectorNumElements();
ElementCount VTNumElts = VT.getVectorElementCount();
ElementCount EnvNumElts = EnvVT.getVectorElementCount();
assert(VTNumElts.isScalable() == EnvNumElts.isScalable() &&
"Mixing fixed width and scalable vectors when enveloping a type");
EVT LoVT, HiVT;
if (VTNumElts > EnvNumElts) {
if (VTNumElts.getKnownMinValue() > EnvNumElts.getKnownMinValue()) {
LoVT = EnvVT;
HiVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts - EnvNumElts,
IsScalable);
HiVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts - EnvNumElts);
*HiIsEmpty = false;
} else {
// Flag that hi type has zero storage size, but return split envelop type
// (this would be easier if vector types with zero elements were allowed).
LoVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts, IsScalable);
LoVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts);
HiVT = EnvVT;
*HiIsEmpty = true;
}

View File

@ -7221,7 +7221,7 @@ TargetLowering::IncrementMemoryAddress(SDValue Addr, SDValue Mask,
SDValue Increment;
EVT AddrVT = Addr.getValueType();
EVT MaskVT = Mask.getValueType();
assert(DataVT.getVectorNumElements() == MaskVT.getVectorNumElements() &&
assert(DataVT.getVectorElementCount() == MaskVT.getVectorElementCount() &&
"Incompatible types of Data and Mask");
if (IsCompressedMemory) {
if (DataVT.isScalableVector())

View File

@ -1,5 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s 2>%t | FileCheck %s
; RUN: FileCheck --check-prefix=WARN --allow-empty %s <%t
; If this check fails please read test/CodeGen/AArch64/README for instructions on how to resolve it.
; WARN-NOT: warning
; UNPREDICATED