mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 02:33:06 +01:00
[NFC] Fix a few whitespace issues and typos.
This commit is contained in:
parent
ecd2dc975e
commit
ba16635997
@ -31,7 +31,7 @@ enum class VFParamKind {
|
||||
OMP_LinearPos, // declare simd linear(i:c) uniform(c)
|
||||
OMP_LinearValPos, // declare simd linear(val(i:c)) uniform(c)
|
||||
OMP_LinearRefPos, // declare simd linear(ref(i:c)) uniform(c)
|
||||
OMP_LinearUValPos, // declare simd linear(uval(i:c)) uniform(c
|
||||
OMP_LinearUValPos, // declare simd linear(uval(i:c)) uniform(c)
|
||||
OMP_Uniform, // declare simd uniform(i)
|
||||
GlobalPredicate, // Global logical predicate that acts on all lanes
|
||||
// of the input and output mask concurrently. For
|
||||
|
@ -1474,9 +1474,6 @@ public:
|
||||
///
|
||||
/// If GroupNo is not NULL, it will receive the number of the operand group
|
||||
/// containing OpIdx.
|
||||
///
|
||||
/// The flag operand is an immediate that can be decoded with methods like
|
||||
/// InlineAsm::hasRegClassConstraint().
|
||||
int findInlineAsmFlagIdx(unsigned OpIdx, unsigned *GroupNo = nullptr) const;
|
||||
|
||||
/// Compute the static register class constraint for operand OpIdx.
|
||||
|
@ -216,7 +216,7 @@ class LLVMVectorOfAnyPointersToElt<int num> : LLVMMatchType<num>;
|
||||
class LLVMVectorElementType<int num> : LLVMMatchType<num>;
|
||||
|
||||
// Match the type of another intrinsic parameter that is expected to be a
|
||||
// vector type, but change the element count to be half as many
|
||||
// vector type, but change the element count to be half as many.
|
||||
class LLVMHalfElementsVectorType<int num> : LLVMMatchType<num>;
|
||||
|
||||
// Match the type of another intrinsic parameter that is expected to be a
|
||||
|
@ -707,7 +707,6 @@ def assertsext : SDNode<"ISD::AssertSext", SDT_assert>;
|
||||
def assertzext : SDNode<"ISD::AssertZext", SDT_assert>;
|
||||
def assertalign : SDNode<"ISD::AssertAlign", SDT_assert>;
|
||||
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Selection DAG Condition Codes
|
||||
|
||||
|
@ -646,6 +646,7 @@ bool RecurrenceDescriptor::hasMultipleUsesOf(
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool RecurrenceDescriptor::isReductionPHI(PHINode *Phi, Loop *TheLoop,
|
||||
RecurrenceDescriptor &RedDes,
|
||||
DemandedBits *DB, AssumptionCache *AC,
|
||||
|
@ -903,7 +903,6 @@ bool llvm::maskIsAllZeroOrUndef(Value *Mask) {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool llvm::maskIsAllOneOrUndef(Value *Mask) {
|
||||
assert(isa<VectorType>(Mask->getType()) &&
|
||||
isa<IntegerType>(Mask->getType()->getScalarType()) &&
|
||||
|
@ -1236,6 +1236,7 @@ void DwarfDebug::beginModule(Module *M) {
|
||||
if (!GVMapEntry.size() || (Expr && Expr->isConstant()))
|
||||
GVMapEntry.push_back({nullptr, Expr});
|
||||
}
|
||||
|
||||
DenseSet<DIGlobalVariable *> Processed;
|
||||
for (auto *GVE : CUNode->getGlobalVariables()) {
|
||||
DIGlobalVariable *GV = GVE->getVariable();
|
||||
@ -1553,6 +1554,7 @@ void DwarfDebug::collectVariableInfoFromMFTable(
|
||||
RegVar->initializeMMI(VI.Expr, VI.Slot);
|
||||
LLVM_DEBUG(dbgs() << "Created DbgVariable for " << VI.Var->getName()
|
||||
<< "\n");
|
||||
|
||||
if (DbgVariable *DbgVar = MFVars.lookup(Var))
|
||||
DbgVar->addMMIEntry(*RegVar);
|
||||
else if (InfoHolder.addScopeVariable(Scope, RegVar.get())) {
|
||||
|
@ -12031,6 +12031,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
|
||||
AddToWorklist(ExtLoad.getNode());
|
||||
return SDValue(N, 0); // Return N so it doesn't get rechecked!
|
||||
}
|
||||
|
||||
// fold (sext_inreg (zextload x)) -> (sextload x) iff load has one use
|
||||
if (ISD::isZEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) &&
|
||||
N0.hasOneUse() &&
|
||||
|
@ -2854,6 +2854,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_TruncateHelper(SDNode *N) {
|
||||
HalfLo = DAG.getNode(N->getOpcode(), DL, HalfVT, InLoVec);
|
||||
HalfHi = DAG.getNode(N->getOpcode(), DL, HalfVT, InHiVec);
|
||||
}
|
||||
|
||||
// Concatenate them to get the full intermediate truncation result.
|
||||
EVT InterVT = EVT::getVectorVT(*DAG.getContext(), HalfElementVT, NumElements);
|
||||
SDValue InterVec = DAG.getNode(ISD::CONCAT_VECTORS, DL, InterVT, HalfLo,
|
||||
|
@ -5693,6 +5693,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
|
||||
const APInt &Val = N1C->getAPIntValue();
|
||||
return SignExtendInReg(Val, VT);
|
||||
}
|
||||
|
||||
if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) {
|
||||
SmallVector<SDValue, 8> Ops;
|
||||
llvm::EVT OpVT = N1.getOperand(0).getValueType();
|
||||
@ -5830,7 +5831,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
|
||||
return getConstant(Val.extractBits(ElementSize, Shift), DL, VT);
|
||||
}
|
||||
break;
|
||||
case ISD::EXTRACT_SUBVECTOR:
|
||||
case ISD::EXTRACT_SUBVECTOR: {
|
||||
EVT N1VT = N1.getValueType();
|
||||
assert(VT.isVector() && N1VT.isVector() &&
|
||||
"Extract subvector VTs must be vectors!");
|
||||
@ -5873,6 +5874,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
|
||||
return N1.getOperand(1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Perform trivial constant folding.
|
||||
if (SDValue SV = FoldConstantArithmetic(Opcode, DL, VT, {N1, N2}))
|
||||
|
@ -76,7 +76,7 @@ namespace {
|
||||
// OrigAlignments - Alignments of stack objects before coloring.
|
||||
SmallVector<Align, 16> OrigAlignments;
|
||||
|
||||
// OrigSizes - Sizess of stack objects before coloring.
|
||||
// OrigSizes - Sizes of stack objects before coloring.
|
||||
SmallVector<unsigned, 16> OrigSizes;
|
||||
|
||||
// AllColors - If index is set, it's a spill slot, i.e. color.
|
||||
|
@ -442,6 +442,7 @@ Constant *Constant::getAggregateElement(unsigned Elt) const {
|
||||
if (const auto *CDS = dyn_cast<ConstantDataSequential>(this))
|
||||
return Elt < CDS->getNumElements() ? CDS->getElementAsConstant(Elt)
|
||||
: nullptr;
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
@ -2092,6 +2092,7 @@ void ShuffleVectorInst::setShuffleMask(ArrayRef<int> Mask) {
|
||||
ShuffleMask.assign(Mask.begin(), Mask.end());
|
||||
ShuffleMaskForBitcode = convertShuffleMaskForBitcode(Mask, getType());
|
||||
}
|
||||
|
||||
Constant *ShuffleVectorInst::convertShuffleMaskForBitcode(ArrayRef<int> Mask,
|
||||
Type *ResultTy) {
|
||||
Type *Int32Ty = Type::getInt32Ty(ResultTy->getContext());
|
||||
|
@ -5158,7 +5158,6 @@ SDValue AArch64TargetLowering::LowerFormalArguments(
|
||||
ExtType, DL, VA.getLocVT(), Chain, FIN,
|
||||
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
|
||||
MemVT);
|
||||
|
||||
}
|
||||
|
||||
if (VA.getLocInfo() == CCValAssign::Indirect) {
|
||||
@ -12528,6 +12527,7 @@ static SDValue performMulCombine(SDNode *N, SelectionDAG &DAG,
|
||||
// e.g. 6=3*2=(2+1)*2.
|
||||
// TODO: consider lowering more cases, e.g. C = 14, -6, -14 or even 45
|
||||
// which equals to (1+2)*16-(1+2).
|
||||
|
||||
// TrailingZeroes is used to test if the mul can be lowered to
|
||||
// shift+add+shift.
|
||||
unsigned TrailingZeroes = ConstValue.countTrailingZeros();
|
||||
@ -15952,7 +15952,6 @@ static SDValue getScaledOffsetForBitWidth(SelectionDAG &DAG, SDValue Offset,
|
||||
/// [<Zn>.[S|D]{, #<imm>}]
|
||||
///
|
||||
/// where <imm> = sizeof(<T>) * k, for k = 0, 1, ..., 31.
|
||||
|
||||
inline static bool isValidImmForSVEVecImmAddrMode(unsigned OffsetInBytes,
|
||||
unsigned ScalarSizeInBytes) {
|
||||
// The immediate is not a multiple of the scalar size.
|
||||
|
@ -455,7 +455,7 @@ unsigned getBLRCallOpcode(const MachineFunction &MF);
|
||||
|
||||
// struct TSFlags {
|
||||
#define TSFLAG_ELEMENT_SIZE_TYPE(X) (X) // 3-bits
|
||||
#define TSFLAG_DESTRUCTIVE_INST_TYPE(X) ((X) << 3) // 4-bit
|
||||
#define TSFLAG_DESTRUCTIVE_INST_TYPE(X) ((X) << 3) // 4-bits
|
||||
#define TSFLAG_FALSE_LANE_TYPE(X) ((X) << 7) // 2-bits
|
||||
#define TSFLAG_INSTR_FLAGS(X) ((X) << 9) // 2-bits
|
||||
// }
|
||||
|
@ -969,7 +969,7 @@ let Predicates = [HasSVE] in {
|
||||
// st1h z0.d, p0, [x0, z0.d, uxtw]
|
||||
defm SST1B_D : sve_mem_64b_sst_sv_32_unscaled<0b000, "st1b", AArch64st1_scatter_sxtw, AArch64st1_scatter_uxtw, ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only, nxv2i8>;
|
||||
defm SST1H_D : sve_mem_64b_sst_sv_32_unscaled<0b010, "st1h", AArch64st1_scatter_sxtw, AArch64st1_scatter_uxtw, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i16>;
|
||||
defm SST1W_D : sve_mem_64b_sst_sv_32_unscaled<0b100, "st1w", AArch64st1_scatter_sxtw, AArch64st1_scatter_uxtw, ZPR64ExtSXTW8, ZPR64ExtUXTW8,nxv2i32>;
|
||||
defm SST1W_D : sve_mem_64b_sst_sv_32_unscaled<0b100, "st1w", AArch64st1_scatter_sxtw, AArch64st1_scatter_uxtw, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i32>;
|
||||
defm SST1D : sve_mem_64b_sst_sv_32_unscaled<0b110, "st1d", AArch64st1_scatter_sxtw, AArch64st1_scatter_uxtw, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i64>;
|
||||
|
||||
// Scatters using packed, unscaled 32-bit offsets, e.g.
|
||||
@ -1798,12 +1798,10 @@ let Predicates = [HasSVE] in {
|
||||
// Add more complex addressing modes here as required
|
||||
multiclass pred_load<ValueType Ty, ValueType PredTy, SDPatternOperator Load,
|
||||
Instruction RegRegInst, Instruction RegImmInst, ComplexPattern AddrCP> {
|
||||
// reg + reg
|
||||
let AddedComplexity = 1 in {
|
||||
def _reg_reg_z : Pat<(Ty (Load (AddrCP GPR64:$base, GPR64:$offset), (PredTy PPR:$gp), (SVEDup0Undef))),
|
||||
(RegRegInst PPR:$gp, GPR64:$base, GPR64:$offset)>;
|
||||
}
|
||||
// reg + imm
|
||||
let AddedComplexity = 2 in {
|
||||
def _reg_imm_z : Pat<(Ty (Load (am_sve_indexed_s4 GPR64sp:$base, simm4s1:$offset), (PredTy PPR:$gp), (SVEDup0Undef))),
|
||||
(RegImmInst PPR:$gp, GPR64:$base, simm4s1:$offset)>;
|
||||
@ -1845,12 +1843,10 @@ let Predicates = [HasSVE] in {
|
||||
|
||||
multiclass pred_store<ValueType Ty, ValueType PredTy, SDPatternOperator Store,
|
||||
Instruction RegRegInst, Instruction RegImmInst, ComplexPattern AddrCP> {
|
||||
// reg + reg
|
||||
let AddedComplexity = 1 in {
|
||||
def _reg_reg : Pat<(Store (Ty ZPR:$vec), (AddrCP GPR64:$base, GPR64:$offset), (PredTy PPR:$gp)),
|
||||
(RegRegInst ZPR:$vec, PPR:$gp, GPR64:$base, GPR64:$offset)>;
|
||||
}
|
||||
// reg + imm
|
||||
let AddedComplexity = 2 in {
|
||||
def _reg_imm : Pat<(Store (Ty ZPR:$vec), (am_sve_indexed_s4 GPR64sp:$base, simm4s1:$offset), (PredTy PPR:$gp)),
|
||||
(RegImmInst ZPR:$vec, PPR:$gp, GPR64:$base, simm4s1:$offset)>;
|
||||
|
@ -365,6 +365,7 @@ Instruction *InstCombinerImpl::visitExtractElementInst(ExtractElementInst &EI) {
|
||||
return replaceInstUsesWith(EI, Idx);
|
||||
}
|
||||
}
|
||||
|
||||
// InstSimplify should handle cases where the index is invalid.
|
||||
// For fixed-length vector, it's invalid to extract out-of-range element.
|
||||
if (!EC.isScalable() && IndexC->getValue().uge(NumElts))
|
||||
@ -400,6 +401,7 @@ Instruction *InstCombinerImpl::visitExtractElementInst(ExtractElementInst &EI) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (Instruction *I = foldBitcastExtElt(EI, Builder, DL.isBigEndian()))
|
||||
return I;
|
||||
|
||||
|
@ -586,7 +586,7 @@ bool LoopVectorizationLegality::setupOuterLoopInductions() {
|
||||
|
||||
/// Checks if a function is scalarizable according to the TLI, in
|
||||
/// the sense that it should be vectorized and then expanded in
|
||||
/// multiple scalarcalls. This is represented in the
|
||||
/// multiple scalar calls. This is represented in the
|
||||
/// TLI via mappings that do not specify a vector name, as in the
|
||||
/// following example:
|
||||
///
|
||||
@ -885,6 +885,7 @@ bool LoopVectorizationLegality::canVectorizeMemory() {
|
||||
"loop not vectorized: ", *LAR);
|
||||
});
|
||||
}
|
||||
|
||||
if (!LAI->canVectorizeMemory())
|
||||
return false;
|
||||
|
||||
@ -894,9 +895,9 @@ bool LoopVectorizationLegality::canVectorizeMemory() {
|
||||
"CantVectorizeStoreToLoopInvariantAddress", ORE, TheLoop);
|
||||
return false;
|
||||
}
|
||||
|
||||
Requirements->addRuntimePointerChecks(LAI->getNumRuntimePointerChecks());
|
||||
PSE.addPredicate(LAI->getPSE().getUnionPredicate());
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user