1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 20:51:52 +01:00

[AArch64][GlobalISel] clang-format AArch64LegalizerInfo.cpp. NFC.

This commit is contained in:
Amara Emerson 2020-09-17 16:40:36 -07:00
parent e85d496ffd
commit c09ce4f3bc

View File

@ -63,21 +63,21 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
}
getActionDefinitionsBuilder({G_IMPLICIT_DEF, G_FREEZE})
.legalFor({p0, s1, s8, s16, s32, s64, v2s32, v4s32, v2s64, v16s8, v8s16})
.clampScalar(0, s1, s64)
.widenScalarToNextPow2(0, 8)
.fewerElementsIf(
[=](const LegalityQuery &Query) {
return Query.Types[0].isVector() &&
(Query.Types[0].getElementType() != s64 ||
Query.Types[0].getNumElements() != 2);
},
[=](const LegalityQuery &Query) {
LLT EltTy = Query.Types[0].getElementType();
if (EltTy == s64)
return std::make_pair(0, LLT::vector(2, 64));
return std::make_pair(0, EltTy);
});
.legalFor({p0, s1, s8, s16, s32, s64, v2s32, v4s32, v2s64, v16s8, v8s16})
.clampScalar(0, s1, s64)
.widenScalarToNextPow2(0, 8)
.fewerElementsIf(
[=](const LegalityQuery &Query) {
return Query.Types[0].isVector() &&
(Query.Types[0].getElementType() != s64 ||
Query.Types[0].getNumElements() != 2);
},
[=](const LegalityQuery &Query) {
LLT EltTy = Query.Types[0].getElementType();
if (EltTy == s64)
return std::make_pair(0, LLT::vector(2, 64));
return std::make_pair(0, EltTy);
});
getActionDefinitionsBuilder(G_PHI)
.legalFor({p0, s16, s32, s64, v2s32, v4s32, v2s64})
@ -148,8 +148,7 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
getActionDefinitionsBuilder({G_SREM, G_UREM})
.lowerFor({s1, s8, s16, s32, s64});
getActionDefinitionsBuilder({G_SMULO, G_UMULO})
.lowerFor({{s64, s1}});
getActionDefinitionsBuilder({G_SMULO, G_UMULO}).lowerFor({{s64, s1}});
getActionDefinitionsBuilder({G_SMULH, G_UMULH}).legalFor({s32, s64});
@ -158,7 +157,7 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
.minScalar(0, s32);
getActionDefinitionsBuilder({G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FNEG})
.legalFor({s32, s64, v2s64, v4s32, v2s32});
.legalFor({s32, s64, v2s64, v4s32, v2s32});
getActionDefinitionsBuilder(G_FREM).libcallFor({s32, s64});
@ -270,8 +269,7 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
{v4s32, p0, 128, 8},
{v2s64, p0, 128, 8}})
// These extends are also legal
.legalForTypesWithMemDesc({{s32, p0, 8, 8},
{s32, p0, 16, 8}})
.legalForTypesWithMemDesc({{s32, p0, 8, 8}, {s32, p0, 16, 8}})
.clampScalar(0, s8, s64)
.lowerIfMemSizeNotPow2()
// Lower any any-extending loads left into G_ANYEXT and G_LOAD
@ -310,7 +308,7 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
// Constants
getActionDefinitionsBuilder(G_CONSTANT)
.legalFor({p0, s8, s16, s32, s64})
.legalFor({p0, s8, s16, s32, s64})
.clampScalar(0, s8, s64)
.widenScalarToNextPow2(0);
getActionDefinitionsBuilder(G_FCONSTANT)
@ -386,13 +384,12 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
getActionDefinitionsBuilder(G_TRUNC).alwaysLegal();
getActionDefinitionsBuilder(G_SEXT_INREG)
.legalFor({s32, s64})
.lower();
getActionDefinitionsBuilder(G_SEXT_INREG).legalFor({s32, s64}).lower();
// FP conversions
getActionDefinitionsBuilder(G_FPTRUNC).legalFor(
{{s16, s32}, {s16, s64}, {s32, s64}, {v4s16, v4s32}, {v2s32, v2s64}})
getActionDefinitionsBuilder(G_FPTRUNC)
.legalFor(
{{s16, s32}, {s16, s64}, {s32, s64}, {v4s16, v4s32}, {v2s32, v2s64}})
.clampMaxNumElements(0, s32, 2);
getActionDefinitionsBuilder(G_FPEXT).legalFor(
{{s32, s16}, {s64, s16}, {s64, s32}, {v4s32, v4s16}, {v2s64, v2s32}});
@ -553,8 +550,8 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
return BigTy.getSizeInBits() % LitTy.getSizeInBits() == 0;
})
// Any vectors left are the wrong size. Scalarize them.
.scalarize(0)
.scalarize(1);
.scalarize(0)
.scalarize(1);
}
getActionDefinitionsBuilder(G_EXTRACT_VECTOR_ELT)
@ -593,7 +590,7 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
0, s8)
.minScalarOrElt(0, s8); // Worst case, we need at least s8.
getActionDefinitionsBuilder(G_INSERT_VECTOR_ELT)
getActionDefinitionsBuilder(G_INSERT_VECTOR_ELT)
.legalIf([=](const LegalityQuery &Query) {
const LLT &VecTy = Query.Types[0];
// TODO: Support s8 and s16
@ -618,8 +615,9 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
})
.minScalarSameAs(1, 0);
getActionDefinitionsBuilder(G_CTLZ).legalForCartesianProduct(
{s32, s64, v8s8, v16s8, v4s16, v8s16, v2s32, v4s32})
getActionDefinitionsBuilder(G_CTLZ)
.legalForCartesianProduct(
{s32, s64, v8s8, v16s8, v4s16, v8s16, v2s32, v4s32})
.scalarize(1);
getActionDefinitionsBuilder(G_SHUFFLE_VECTOR)
@ -647,8 +645,7 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
getActionDefinitionsBuilder(G_CONCAT_VECTORS)
.legalFor({{v4s32, v2s32}, {v8s16, v4s16}});
getActionDefinitionsBuilder(G_JUMP_TABLE)
.legalFor({{p0}, {s64}});
getActionDefinitionsBuilder(G_JUMP_TABLE).legalFor({{p0}, {s64}});
getActionDefinitionsBuilder(G_BRJT).legalIf([=](const LegalityQuery &Query) {
return Query.Types[0] == p0 && Query.Types[1] == s64;
@ -687,10 +684,9 @@ bool AArch64LegalizerInfo::legalizeCustom(LegalizerHelper &Helper,
llvm_unreachable("expected switch to return");
}
bool AArch64LegalizerInfo::legalizeSmallCMGlobalValue(MachineInstr &MI,
MachineRegisterInfo &MRI,
MachineIRBuilder &MIRBuilder,
GISelChangeObserver &Observer) const {
bool AArch64LegalizerInfo::legalizeSmallCMGlobalValue(
MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &MIRBuilder,
GISelChangeObserver &Observer) const {
assert(MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
// We do this custom legalization to convert G_GLOBAL_VALUE into target ADRP +
// G_ADD_LOW instructions.
@ -740,8 +736,8 @@ bool AArch64LegalizerInfo::legalizeSmallCMGlobalValue(MachineInstr &MI,
return true;
}
bool AArch64LegalizerInfo::legalizeIntrinsic(
LegalizerHelper &Helper, MachineInstr &MI) const {
bool AArch64LegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
MachineInstr &MI) const {
return true;
}