mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-01-31 12:41:49 +01:00
Replace "fallthrough" comments with LLVM_FALLTHROUGH
This is a mechanical change of comments in switches like fallthrough, fall-through, or fall-thru to use the LLVM_FALLTHROUGH macro instead. llvm-svn: 278902
This commit is contained in:
parent
d841f9b87f
commit
b5f5b0ef6d
@ -2773,7 +2773,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
|
|||||||
Q.CxtI, Q.DT);
|
Q.CxtI, Q.DT);
|
||||||
if (!KnownNonNegative)
|
if (!KnownNonNegative)
|
||||||
break;
|
break;
|
||||||
// fall-through
|
LLVM_FALLTHROUGH;
|
||||||
case ICmpInst::ICMP_EQ:
|
case ICmpInst::ICMP_EQ:
|
||||||
case ICmpInst::ICMP_UGT:
|
case ICmpInst::ICMP_UGT:
|
||||||
case ICmpInst::ICMP_UGE:
|
case ICmpInst::ICMP_UGE:
|
||||||
@ -2784,7 +2784,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
|
|||||||
Q.CxtI, Q.DT);
|
Q.CxtI, Q.DT);
|
||||||
if (!KnownNonNegative)
|
if (!KnownNonNegative)
|
||||||
break;
|
break;
|
||||||
// fall-through
|
LLVM_FALLTHROUGH;
|
||||||
case ICmpInst::ICMP_NE:
|
case ICmpInst::ICMP_NE:
|
||||||
case ICmpInst::ICMP_ULT:
|
case ICmpInst::ICMP_ULT:
|
||||||
case ICmpInst::ICMP_ULE:
|
case ICmpInst::ICMP_ULE:
|
||||||
@ -2804,7 +2804,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
|
|||||||
Q.CxtI, Q.DT);
|
Q.CxtI, Q.DT);
|
||||||
if (!KnownNonNegative)
|
if (!KnownNonNegative)
|
||||||
break;
|
break;
|
||||||
// fall-through
|
LLVM_FALLTHROUGH;
|
||||||
case ICmpInst::ICMP_NE:
|
case ICmpInst::ICMP_NE:
|
||||||
case ICmpInst::ICMP_UGT:
|
case ICmpInst::ICMP_UGT:
|
||||||
case ICmpInst::ICMP_UGE:
|
case ICmpInst::ICMP_UGE:
|
||||||
@ -2815,7 +2815,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
|
|||||||
Q.CxtI, Q.DT);
|
Q.CxtI, Q.DT);
|
||||||
if (!KnownNonNegative)
|
if (!KnownNonNegative)
|
||||||
break;
|
break;
|
||||||
// fall-through
|
LLVM_FALLTHROUGH;
|
||||||
case ICmpInst::ICMP_EQ:
|
case ICmpInst::ICMP_EQ:
|
||||||
case ICmpInst::ICMP_ULT:
|
case ICmpInst::ICMP_ULT:
|
||||||
case ICmpInst::ICMP_ULE:
|
case ICmpInst::ICMP_ULE:
|
||||||
@ -2877,7 +2877,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
|
|||||||
case Instruction::LShr:
|
case Instruction::LShr:
|
||||||
if (ICmpInst::isSigned(Pred))
|
if (ICmpInst::isSigned(Pred))
|
||||||
break;
|
break;
|
||||||
// fall-through
|
LLVM_FALLTHROUGH;
|
||||||
case Instruction::SDiv:
|
case Instruction::SDiv:
|
||||||
case Instruction::AShr:
|
case Instruction::AShr:
|
||||||
if (!LBO->isExact() || !RBO->isExact())
|
if (!LBO->isExact() || !RBO->isExact())
|
||||||
|
@ -537,7 +537,7 @@ bool TargetLibraryInfoImpl::isValidProtoForLibFunc(const FunctionType &FTy,
|
|||||||
--NumParams;
|
--NumParams;
|
||||||
if (!IsSizeTTy(FTy.getParamType(NumParams)))
|
if (!IsSizeTTy(FTy.getParamType(NumParams)))
|
||||||
return false;
|
return false;
|
||||||
// fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
case LibFunc::strcpy:
|
case LibFunc::strcpy:
|
||||||
case LibFunc::stpcpy:
|
case LibFunc::stpcpy:
|
||||||
return (NumParams == 2 && FTy.getReturnType() == FTy.getParamType(0) &&
|
return (NumParams == 2 && FTy.getReturnType() == FTy.getParamType(0) &&
|
||||||
@ -549,7 +549,7 @@ bool TargetLibraryInfoImpl::isValidProtoForLibFunc(const FunctionType &FTy,
|
|||||||
--NumParams;
|
--NumParams;
|
||||||
if (!IsSizeTTy(FTy.getParamType(NumParams)))
|
if (!IsSizeTTy(FTy.getParamType(NumParams)))
|
||||||
return false;
|
return false;
|
||||||
// fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
case LibFunc::strncpy:
|
case LibFunc::strncpy:
|
||||||
case LibFunc::stpncpy:
|
case LibFunc::stpncpy:
|
||||||
return (NumParams == 3 && FTy.getReturnType() == FTy.getParamType(0) &&
|
return (NumParams == 3 && FTy.getReturnType() == FTy.getParamType(0) &&
|
||||||
@ -642,7 +642,7 @@ bool TargetLibraryInfoImpl::isValidProtoForLibFunc(const FunctionType &FTy,
|
|||||||
--NumParams;
|
--NumParams;
|
||||||
if (!IsSizeTTy(FTy.getParamType(NumParams)))
|
if (!IsSizeTTy(FTy.getParamType(NumParams)))
|
||||||
return false;
|
return false;
|
||||||
// fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
case LibFunc::memcpy:
|
case LibFunc::memcpy:
|
||||||
case LibFunc::mempcpy:
|
case LibFunc::mempcpy:
|
||||||
case LibFunc::memmove:
|
case LibFunc::memmove:
|
||||||
@ -655,7 +655,7 @@ bool TargetLibraryInfoImpl::isValidProtoForLibFunc(const FunctionType &FTy,
|
|||||||
--NumParams;
|
--NumParams;
|
||||||
if (!IsSizeTTy(FTy.getParamType(NumParams)))
|
if (!IsSizeTTy(FTy.getParamType(NumParams)))
|
||||||
return false;
|
return false;
|
||||||
// fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
case LibFunc::memset:
|
case LibFunc::memset:
|
||||||
return (NumParams == 3 && FTy.getReturnType() == FTy.getParamType(0) &&
|
return (NumParams == 3 && FTy.getReturnType() == FTy.getParamType(0) &&
|
||||||
FTy.getParamType(0)->isPointerTy() &&
|
FTy.getParamType(0)->isPointerTy() &&
|
||||||
|
@ -2271,7 +2271,7 @@ std::error_code BitcodeReader::parseMetadata(bool ModuleLevel) {
|
|||||||
}
|
}
|
||||||
case bitc::METADATA_DISTINCT_NODE:
|
case bitc::METADATA_DISTINCT_NODE:
|
||||||
IsDistinct = true;
|
IsDistinct = true;
|
||||||
// fallthrough...
|
LLVM_FALLTHROUGH;
|
||||||
case bitc::METADATA_NODE: {
|
case bitc::METADATA_NODE: {
|
||||||
SmallVector<Metadata *, 8> Elts;
|
SmallVector<Metadata *, 8> Elts;
|
||||||
Elts.reserve(Record.size());
|
Elts.reserve(Record.size());
|
||||||
@ -3355,7 +3355,7 @@ std::error_code BitcodeReader::parseUseLists() {
|
|||||||
break;
|
break;
|
||||||
case bitc::USELIST_CODE_BB:
|
case bitc::USELIST_CODE_BB:
|
||||||
IsBB = true;
|
IsBB = true;
|
||||||
// fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
case bitc::USELIST_CODE_DEFAULT: {
|
case bitc::USELIST_CODE_DEFAULT: {
|
||||||
unsigned RecordLength = Record.size();
|
unsigned RecordLength = Record.size();
|
||||||
if (RecordLength < 3)
|
if (RecordLength < 3)
|
||||||
|
@ -1583,7 +1583,7 @@ bool MIParser::parseMachineOperand(MachineOperand &Dest,
|
|||||||
lex();
|
lex();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
// fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
default:
|
default:
|
||||||
// FIXME: Parse the MCSymbol machine operand.
|
// FIXME: Parse the MCSymbol machine operand.
|
||||||
return error("expected a machine operand");
|
return error("expected a machine operand");
|
||||||
|
@ -145,7 +145,7 @@ ScoreboardHazardRecognizer::getHazardType(SUnit *SU, int Stalls) {
|
|||||||
case InstrStage::Required:
|
case InstrStage::Required:
|
||||||
// Required FUs conflict with both reserved and required ones
|
// Required FUs conflict with both reserved and required ones
|
||||||
freeUnits &= ~ReservedScoreboard[StageCycle];
|
freeUnits &= ~ReservedScoreboard[StageCycle];
|
||||||
// FALLTHROUGH
|
LLVM_FALLTHROUGH;
|
||||||
case InstrStage::Reserved:
|
case InstrStage::Reserved:
|
||||||
// Reserved FUs can conflict only with required ones.
|
// Reserved FUs can conflict only with required ones.
|
||||||
freeUnits &= ~RequiredScoreboard[StageCycle];
|
freeUnits &= ~RequiredScoreboard[StageCycle];
|
||||||
@ -197,7 +197,7 @@ void ScoreboardHazardRecognizer::EmitInstruction(SUnit *SU) {
|
|||||||
case InstrStage::Required:
|
case InstrStage::Required:
|
||||||
// Required FUs conflict with both reserved and required ones
|
// Required FUs conflict with both reserved and required ones
|
||||||
freeUnits &= ~ReservedScoreboard[cycle + i];
|
freeUnits &= ~ReservedScoreboard[cycle + i];
|
||||||
// FALLTHROUGH
|
LLVM_FALLTHROUGH;
|
||||||
case InstrStage::Reserved:
|
case InstrStage::Reserved:
|
||||||
// Reserved FUs can conflict only with required ones.
|
// Reserved FUs can conflict only with required ones.
|
||||||
freeUnits &= ~RequiredScoreboard[cycle + i];
|
freeUnits &= ~RequiredScoreboard[cycle + i];
|
||||||
|
@ -801,7 +801,7 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
|
|||||||
default: llvm_unreachable("This action is not supported yet!");
|
default: llvm_unreachable("This action is not supported yet!");
|
||||||
case TargetLowering::Custom:
|
case TargetLowering::Custom:
|
||||||
isCustom = true;
|
isCustom = true;
|
||||||
// FALLTHROUGH
|
LLVM_FALLTHROUGH;
|
||||||
case TargetLowering::Legal: {
|
case TargetLowering::Legal: {
|
||||||
Value = SDValue(Node, 0);
|
Value = SDValue(Node, 0);
|
||||||
Chain = SDValue(Node, 1);
|
Chain = SDValue(Node, 1);
|
||||||
@ -1598,6 +1598,7 @@ bool SelectionDAGLegalize::LegalizeSetCCCondCode(EVT VT, SDValue &LHS,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
// Fallthrough if we are unsigned integer.
|
// Fallthrough if we are unsigned integer.
|
||||||
|
LLVM_FALLTHROUGH;
|
||||||
case ISD::SETLE:
|
case ISD::SETLE:
|
||||||
case ISD::SETGT:
|
case ISD::SETGT:
|
||||||
case ISD::SETGE:
|
case ISD::SETGE:
|
||||||
|
@ -1776,7 +1776,7 @@ void DAGTypeLegalizer::ExpandIntRes_ADDSUB(SDNode *N,
|
|||||||
switch (BoolType) {
|
switch (BoolType) {
|
||||||
case TargetLoweringBase::UndefinedBooleanContent:
|
case TargetLoweringBase::UndefinedBooleanContent:
|
||||||
OVF = DAG.getNode(ISD::AND, dl, NVT, DAG.getConstant(1, dl, NVT), OVF);
|
OVF = DAG.getNode(ISD::AND, dl, NVT, DAG.getConstant(1, dl, NVT), OVF);
|
||||||
// Fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
case TargetLoweringBase::ZeroOrOneBooleanContent:
|
case TargetLoweringBase::ZeroOrOneBooleanContent:
|
||||||
Hi = DAG.getNode(N->getOpcode(), dl, NVT, Hi, OVF);
|
Hi = DAG.getNode(N->getOpcode(), dl, NVT, Hi, OVF);
|
||||||
break;
|
break;
|
||||||
|
@ -2481,7 +2481,7 @@ void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero,
|
|||||||
default:
|
default:
|
||||||
if (Op.getOpcode() < ISD::BUILTIN_OP_END)
|
if (Op.getOpcode() < ISD::BUILTIN_OP_END)
|
||||||
break;
|
break;
|
||||||
// Fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
case ISD::INTRINSIC_WO_CHAIN:
|
case ISD::INTRINSIC_WO_CHAIN:
|
||||||
case ISD::INTRINSIC_W_CHAIN:
|
case ISD::INTRINSIC_W_CHAIN:
|
||||||
case ISD::INTRINSIC_VOID:
|
case ISD::INTRINSIC_VOID:
|
||||||
@ -3868,7 +3868,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
|
|||||||
// Handle undef ^ undef -> 0 special case. This is a common
|
// Handle undef ^ undef -> 0 special case. This is a common
|
||||||
// idiom (misuse).
|
// idiom (misuse).
|
||||||
return getConstant(0, DL, VT);
|
return getConstant(0, DL, VT);
|
||||||
// fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
case ISD::ADD:
|
case ISD::ADD:
|
||||||
case ISD::ADDC:
|
case ISD::ADDC:
|
||||||
case ISD::ADDE:
|
case ISD::ADDE:
|
||||||
|
@ -483,7 +483,7 @@ void TargetPassConfig::addPassesToHandleExceptions() {
|
|||||||
// pad is shared by multiple invokes and is also a target of a normal
|
// pad is shared by multiple invokes and is also a target of a normal
|
||||||
// edge from elsewhere.
|
// edge from elsewhere.
|
||||||
addPass(createSjLjEHPreparePass());
|
addPass(createSjLjEHPreparePass());
|
||||||
// FALLTHROUGH
|
LLVM_FALLTHROUGH;
|
||||||
case ExceptionHandling::DwarfCFI:
|
case ExceptionHandling::DwarfCFI:
|
||||||
case ExceptionHandling::ARM:
|
case ExceptionHandling::ARM:
|
||||||
addPass(createDwarfEHPass(TM));
|
addPass(createDwarfEHPass(TM));
|
||||||
|
@ -97,7 +97,8 @@ public:
|
|||||||
(void)p;
|
(void)p;
|
||||||
assert((*p & 0x3B000000) == 0x39000000 &&
|
assert((*p & 0x3B000000) == 0x39000000 &&
|
||||||
"Only expected load / store instructions.");
|
"Only expected load / store instructions.");
|
||||||
} // fall-through
|
LLVM_FALLTHROUGH;
|
||||||
|
}
|
||||||
case MachO::ARM64_RELOC_PAGEOFF12: {
|
case MachO::ARM64_RELOC_PAGEOFF12: {
|
||||||
// Verify that the relocation points to one of the expected load / store
|
// Verify that the relocation points to one of the expected load / store
|
||||||
// or add / sub instructions.
|
// or add / sub instructions.
|
||||||
@ -196,7 +197,8 @@ public:
|
|||||||
assert((*p & 0x3B000000) == 0x39000000 &&
|
assert((*p & 0x3B000000) == 0x39000000 &&
|
||||||
"Only expected load / store instructions.");
|
"Only expected load / store instructions.");
|
||||||
(void)p;
|
(void)p;
|
||||||
} // fall-through
|
LLVM_FALLTHROUGH;
|
||||||
|
}
|
||||||
case MachO::ARM64_RELOC_PAGEOFF12: {
|
case MachO::ARM64_RELOC_PAGEOFF12: {
|
||||||
// Verify that the relocation points to one of the expected load / store
|
// Verify that the relocation points to one of the expected load / store
|
||||||
// or add / sub instructions.
|
// or add / sub instructions.
|
||||||
|
@ -925,7 +925,7 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode,
|
|||||||
// Handle undef ^ undef -> 0 special case. This is a common
|
// Handle undef ^ undef -> 0 special case. This is a common
|
||||||
// idiom (misuse).
|
// idiom (misuse).
|
||||||
return Constant::getNullValue(C1->getType());
|
return Constant::getNullValue(C1->getType());
|
||||||
// Fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
case Instruction::Add:
|
case Instruction::Add:
|
||||||
case Instruction::Sub:
|
case Instruction::Sub:
|
||||||
return UndefValue::get(C1->getType());
|
return UndefValue::get(C1->getType());
|
||||||
|
@ -265,7 +265,7 @@ bool InlineAsm::Verify(FunctionType *Ty, StringRef ConstStr) {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
++NumIndirect;
|
++NumIndirect;
|
||||||
// FALLTHROUGH for Indirect Outputs.
|
LLVM_FALLTHROUGH; // We fall through for Indirect Outputs.
|
||||||
case InlineAsm::isInput:
|
case InlineAsm::isInput:
|
||||||
if (NumClobbers) return false; // inputs before clobbers.
|
if (NumClobbers) return false; // inputs before clobbers.
|
||||||
++NumInputs;
|
++NumInputs;
|
||||||
|
@ -449,7 +449,7 @@ static Value *stripPointerCastsAndOffsets(Value *V) {
|
|||||||
case PSK_InBoundsConstantIndices:
|
case PSK_InBoundsConstantIndices:
|
||||||
if (!GEP->hasAllConstantIndices())
|
if (!GEP->hasAllConstantIndices())
|
||||||
return V;
|
return V;
|
||||||
// fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
case PSK_InBounds:
|
case PSK_InBounds:
|
||||||
if (!GEP->isInBounds())
|
if (!GEP->isInBounds())
|
||||||
return V;
|
return V;
|
||||||
@ -848,7 +848,7 @@ void ValueHandleBase::ValueIsRAUWd(Value *Old, Value *New) {
|
|||||||
// virtual (or inline) interface to handle this though, so instead we make
|
// virtual (or inline) interface to handle this though, so instead we make
|
||||||
// the TrackingVH accessors guarantee that a client never sees this value.
|
// the TrackingVH accessors guarantee that a client never sees this value.
|
||||||
|
|
||||||
// FALLTHROUGH
|
LLVM_FALLTHROUGH;
|
||||||
case Weak:
|
case Weak:
|
||||||
// Weak goes to the new value, which will unlink it from Old's list.
|
// Weak goes to the new value, which will unlink it from Old's list.
|
||||||
Entry->operator=(New);
|
Entry->operator=(New);
|
||||||
|
@ -36,7 +36,7 @@ Expected<std::unique_ptr<SymbolicFile>> SymbolicFile::createSymbolicFile(
|
|||||||
case sys::fs::file_magic::bitcode:
|
case sys::fs::file_magic::bitcode:
|
||||||
if (Context)
|
if (Context)
|
||||||
return errorOrToExpected(IRObjectFile::create(Object, *Context));
|
return errorOrToExpected(IRObjectFile::create(Object, *Context));
|
||||||
// Fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
case sys::fs::file_magic::unknown:
|
case sys::fs::file_magic::unknown:
|
||||||
case sys::fs::file_magic::archive:
|
case sys::fs::file_magic::archive:
|
||||||
case sys::fs::file_magic::macho_universal_binary:
|
case sys::fs::file_magic::macho_universal_binary:
|
||||||
|
@ -317,7 +317,7 @@ static std::string getOptionHelpName(const OptTable &Opts, OptSpecifier Id) {
|
|||||||
case Option::SeparateClass: case Option::JoinedOrSeparateClass:
|
case Option::SeparateClass: case Option::JoinedOrSeparateClass:
|
||||||
case Option::RemainingArgsClass: case Option::RemainingArgsJoinedClass:
|
case Option::RemainingArgsClass: case Option::RemainingArgsJoinedClass:
|
||||||
Name += ' ';
|
Name += ' ';
|
||||||
// FALLTHROUGH
|
LLVM_FALLTHROUGH;
|
||||||
case Option::JoinedClass: case Option::CommaJoinedClass:
|
case Option::JoinedClass: case Option::CommaJoinedClass:
|
||||||
case Option::JoinedAndSeparateClass:
|
case Option::JoinedAndSeparateClass:
|
||||||
if (const char *MetaVarName = Opts.getOptionMetaVar(Id))
|
if (const char *MetaVarName = Opts.getOptionMetaVar(Id))
|
||||||
|
@ -155,7 +155,7 @@ tgtok::TokKind TGLexer::LexToken() {
|
|||||||
case '0': case '1':
|
case '0': case '1':
|
||||||
if (NextChar == 'b')
|
if (NextChar == 'b')
|
||||||
return LexNumber();
|
return LexNumber();
|
||||||
// Fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
case '2': case '3': case '4': case '5':
|
case '2': case '3': case '4': case '5':
|
||||||
case '6': case '7': case '8': case '9':
|
case '6': case '7': case '8': case '9':
|
||||||
case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
|
case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
|
||||||
|
@ -1286,6 +1286,7 @@ Init *TGParser::ParseSimpleValue(Record *CurRec, RecTy *ItemType,
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
// Fallthrough to try convert this to a bit.
|
// Fallthrough to try convert this to a bit.
|
||||||
|
LLVM_FALLTHROUGH;
|
||||||
}
|
}
|
||||||
// All other values must be convertible to just a single bit.
|
// All other values must be convertible to just a single bit.
|
||||||
Init *Bit = Vals[i]->convertInitializerTo(BitRecTy::get());
|
Init *Bit = Vals[i]->convertInitializerTo(BitRecTy::get());
|
||||||
|
@ -1179,7 +1179,8 @@ static void changeVectorFPCCToAArch64CC(ISD::CondCode CC,
|
|||||||
changeFPCCToAArch64CC(CC, CondCode, CondCode2);
|
changeFPCCToAArch64CC(CC, CondCode, CondCode2);
|
||||||
break;
|
break;
|
||||||
case ISD::SETUO:
|
case ISD::SETUO:
|
||||||
Invert = true; // Fallthrough
|
Invert = true;
|
||||||
|
LLVM_FALLTHROUGH;
|
||||||
case ISD::SETO:
|
case ISD::SETO:
|
||||||
CondCode = AArch64CC::MI;
|
CondCode = AArch64CC::MI;
|
||||||
CondCode2 = AArch64CC::GE;
|
CondCode2 = AArch64CC::GE;
|
||||||
@ -6720,8 +6721,8 @@ static SDValue EmitVectorComparison(SDValue LHS, SDValue RHS,
|
|||||||
case AArch64CC::LT:
|
case AArch64CC::LT:
|
||||||
if (!NoNans)
|
if (!NoNans)
|
||||||
return SDValue();
|
return SDValue();
|
||||||
// If we ignore NaNs then we can use to the MI implementation.
|
// If we ignore NaNs then we can use to the MI implementation.
|
||||||
// Fallthrough.
|
LLVM_FALLTHROUGH;
|
||||||
case AArch64CC::MI:
|
case AArch64CC::MI:
|
||||||
if (IsZero)
|
if (IsZero)
|
||||||
return DAG.getNode(AArch64ISD::FCMLTz, dl, VT, LHS);
|
return DAG.getNode(AArch64ISD::FCMLTz, dl, VT, LHS);
|
||||||
|
@ -375,7 +375,8 @@ static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
|
|||||||
// if NZCV is used, do not fold.
|
// if NZCV is used, do not fold.
|
||||||
if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
|
if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
|
||||||
return 0;
|
return 0;
|
||||||
// fall-through to ADDXri and ADDWri.
|
// fall-through to ADDXri and ADDWri.
|
||||||
|
LLVM_FALLTHROUGH;
|
||||||
case AArch64::ADDXri:
|
case AArch64::ADDXri:
|
||||||
case AArch64::ADDWri:
|
case AArch64::ADDWri:
|
||||||
// add x, 1 -> csinc.
|
// add x, 1 -> csinc.
|
||||||
@ -402,7 +403,8 @@ static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
|
|||||||
// if NZCV is used, do not fold.
|
// if NZCV is used, do not fold.
|
||||||
if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
|
if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
|
||||||
return 0;
|
return 0;
|
||||||
// fall-through to SUBXrr and SUBWrr.
|
// fall-through to SUBXrr and SUBWrr.
|
||||||
|
LLVM_FALLTHROUGH;
|
||||||
case AArch64::SUBXrr:
|
case AArch64::SUBXrr:
|
||||||
case AArch64::SUBWrr: {
|
case AArch64::SUBWrr: {
|
||||||
// neg x -> csneg, represented as sub dst, xzr, src.
|
// neg x -> csneg, represented as sub dst, xzr, src.
|
||||||
|
@ -3455,7 +3455,7 @@ bool AArch64AsmParser::validateInstruction(MCInst &Inst,
|
|||||||
if (RI->isSubRegisterEq(Rn, Rt2))
|
if (RI->isSubRegisterEq(Rn, Rt2))
|
||||||
return Error(Loc[1], "unpredictable LDP instruction, writeback base "
|
return Error(Loc[1], "unpredictable LDP instruction, writeback base "
|
||||||
"is also a destination");
|
"is also a destination");
|
||||||
// FALLTHROUGH
|
LLVM_FALLTHROUGH;
|
||||||
}
|
}
|
||||||
case AArch64::LDPDi:
|
case AArch64::LDPDi:
|
||||||
case AArch64::LDPQi:
|
case AArch64::LDPQi:
|
||||||
|
@ -1097,7 +1097,7 @@ static DecodeStatus DecodeExclusiveLdStInstruction(llvm::MCInst &Inst,
|
|||||||
case AArch64::STXRB:
|
case AArch64::STXRB:
|
||||||
case AArch64::STXRH:
|
case AArch64::STXRH:
|
||||||
DecodeGPR32RegisterClass(Inst, Rs, Addr, Decoder);
|
DecodeGPR32RegisterClass(Inst, Rs, Addr, Decoder);
|
||||||
// FALLTHROUGH
|
LLVM_FALLTHROUGH;
|
||||||
case AArch64::LDARW:
|
case AArch64::LDARW:
|
||||||
case AArch64::LDARB:
|
case AArch64::LDARB:
|
||||||
case AArch64::LDARH:
|
case AArch64::LDARH:
|
||||||
@ -1121,7 +1121,7 @@ static DecodeStatus DecodeExclusiveLdStInstruction(llvm::MCInst &Inst,
|
|||||||
case AArch64::STLXRX:
|
case AArch64::STLXRX:
|
||||||
case AArch64::STXRX:
|
case AArch64::STXRX:
|
||||||
DecodeGPR32RegisterClass(Inst, Rs, Addr, Decoder);
|
DecodeGPR32RegisterClass(Inst, Rs, Addr, Decoder);
|
||||||
// FALLTHROUGH
|
LLVM_FALLTHROUGH;
|
||||||
case AArch64::LDARX:
|
case AArch64::LDARX:
|
||||||
case AArch64::LDAXRX:
|
case AArch64::LDAXRX:
|
||||||
case AArch64::LDXRX:
|
case AArch64::LDXRX:
|
||||||
@ -1133,7 +1133,7 @@ static DecodeStatus DecodeExclusiveLdStInstruction(llvm::MCInst &Inst,
|
|||||||
case AArch64::STLXPW:
|
case AArch64::STLXPW:
|
||||||
case AArch64::STXPW:
|
case AArch64::STXPW:
|
||||||
DecodeGPR32RegisterClass(Inst, Rs, Addr, Decoder);
|
DecodeGPR32RegisterClass(Inst, Rs, Addr, Decoder);
|
||||||
// FALLTHROUGH
|
LLVM_FALLTHROUGH;
|
||||||
case AArch64::LDAXPW:
|
case AArch64::LDAXPW:
|
||||||
case AArch64::LDXPW:
|
case AArch64::LDXPW:
|
||||||
DecodeGPR32RegisterClass(Inst, Rt, Addr, Decoder);
|
DecodeGPR32RegisterClass(Inst, Rt, Addr, Decoder);
|
||||||
@ -1142,7 +1142,7 @@ static DecodeStatus DecodeExclusiveLdStInstruction(llvm::MCInst &Inst,
|
|||||||
case AArch64::STLXPX:
|
case AArch64::STLXPX:
|
||||||
case AArch64::STXPX:
|
case AArch64::STXPX:
|
||||||
DecodeGPR32RegisterClass(Inst, Rs, Addr, Decoder);
|
DecodeGPR32RegisterClass(Inst, Rs, Addr, Decoder);
|
||||||
// FALLTHROUGH
|
LLVM_FALLTHROUGH;
|
||||||
case AArch64::LDAXPX:
|
case AArch64::LDAXPX:
|
||||||
case AArch64::LDXPX:
|
case AArch64::LDXPX:
|
||||||
DecodeGPR64RegisterClass(Inst, Rt, Addr, Decoder);
|
DecodeGPR64RegisterClass(Inst, Rt, Addr, Decoder);
|
||||||
@ -1218,7 +1218,7 @@ static DecodeStatus DecodePairLdStInstruction(llvm::MCInst &Inst, uint32_t insn,
|
|||||||
case AArch64::STPXpre:
|
case AArch64::STPXpre:
|
||||||
case AArch64::LDPSWpre:
|
case AArch64::LDPSWpre:
|
||||||
NeedsDisjointWritebackTransfer = true;
|
NeedsDisjointWritebackTransfer = true;
|
||||||
// Fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
case AArch64::LDNPXi:
|
case AArch64::LDNPXi:
|
||||||
case AArch64::STNPXi:
|
case AArch64::STNPXi:
|
||||||
case AArch64::LDPXi:
|
case AArch64::LDPXi:
|
||||||
@ -1232,7 +1232,7 @@ static DecodeStatus DecodePairLdStInstruction(llvm::MCInst &Inst, uint32_t insn,
|
|||||||
case AArch64::LDPWpre:
|
case AArch64::LDPWpre:
|
||||||
case AArch64::STPWpre:
|
case AArch64::STPWpre:
|
||||||
NeedsDisjointWritebackTransfer = true;
|
NeedsDisjointWritebackTransfer = true;
|
||||||
// Fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
case AArch64::LDNPWi:
|
case AArch64::LDNPWi:
|
||||||
case AArch64::STNPWi:
|
case AArch64::STNPWi:
|
||||||
case AArch64::LDPWi:
|
case AArch64::LDPWi:
|
||||||
|
@ -586,9 +586,10 @@ void R600TargetLowering::ReplaceNodeResults(SDNode *N,
|
|||||||
Results.push_back(lowerFP_TO_UINT(N->getOperand(0), DAG));
|
Results.push_back(lowerFP_TO_UINT(N->getOperand(0), DAG));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
// Fall-through. Since we don't care about out of bounds values
|
// Since we don't care about out of bounds values we can use FP_TO_SINT for
|
||||||
// we can use FP_TO_SINT for uints too. The DAGLegalizer code for uint
|
// uints too. The DAGLegalizer code for uint considers some extra cases
|
||||||
// considers some extra cases which are not necessary here.
|
// which are not necessary here.
|
||||||
|
LLVM_FALLTHROUGH;
|
||||||
case ISD::FP_TO_SINT: {
|
case ISD::FP_TO_SINT: {
|
||||||
if (N->getValueType(0) == MVT::i1) {
|
if (N->getValueType(0) == MVT::i1) {
|
||||||
Results.push_back(lowerFP_TO_SINT(N->getOperand(0), DAG));
|
Results.push_back(lowerFP_TO_SINT(N->getOperand(0), DAG));
|
||||||
|
@ -2389,7 +2389,7 @@ SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
|
|||||||
// have the same legalization requires ments as global and private
|
// have the same legalization requires ments as global and private
|
||||||
// loads.
|
// loads.
|
||||||
//
|
//
|
||||||
// Fall-through
|
LLVM_FALLTHROUGH;
|
||||||
case AMDGPUAS::GLOBAL_ADDRESS:
|
case AMDGPUAS::GLOBAL_ADDRESS:
|
||||||
case AMDGPUAS::FLAT_ADDRESS:
|
case AMDGPUAS::FLAT_ADDRESS:
|
||||||
if (NumElements > 4)
|
if (NumElements > 4)
|
||||||
|
@ -1721,7 +1721,7 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
|
|||||||
ErrInfo = "Expected immediate, but got non-immediate";
|
ErrInfo = "Expected immediate, but got non-immediate";
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
// Fall-through
|
LLVM_FALLTHROUGH;
|
||||||
default:
|
default:
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -249,7 +249,7 @@ bool ARMAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
|
|||||||
<< "]";
|
<< "]";
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
// Fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
case 'c': // Don't print "#" before an immediate operand.
|
case 'c': // Don't print "#" before an immediate operand.
|
||||||
if (!MI->getOperand(OpNum).isImm())
|
if (!MI->getOperand(OpNum).isImm())
|
||||||
return true;
|
return true;
|
||||||
|
@ -684,7 +684,7 @@ initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) {
|
|||||||
case ARM::Bcc:
|
case ARM::Bcc:
|
||||||
isCond = true;
|
isCond = true;
|
||||||
UOpc = ARM::B;
|
UOpc = ARM::B;
|
||||||
// Fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
case ARM::B:
|
case ARM::B:
|
||||||
Bits = 24;
|
Bits = 24;
|
||||||
Scale = 4;
|
Scale = 4;
|
||||||
|
@ -1075,7 +1075,8 @@ bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr,
|
|||||||
TII.get(Opc), Res)
|
TII.get(Opc), Res)
|
||||||
.addReg(SrcReg).addImm(1));
|
.addReg(SrcReg).addImm(1));
|
||||||
SrcReg = Res;
|
SrcReg = Res;
|
||||||
} // Fallthrough here.
|
LLVM_FALLTHROUGH;
|
||||||
|
}
|
||||||
case MVT::i8:
|
case MVT::i8:
|
||||||
if (isThumb2) {
|
if (isThumb2) {
|
||||||
if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
|
if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
|
||||||
@ -1848,7 +1849,7 @@ CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC,
|
|||||||
// For AAPCS ABI targets, just use VFP variant of the calling convention.
|
// For AAPCS ABI targets, just use VFP variant of the calling convention.
|
||||||
return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
|
return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
|
||||||
}
|
}
|
||||||
// Fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
case CallingConv::C:
|
case CallingConv::C:
|
||||||
case CallingConv::CXX_FAST_TLS:
|
case CallingConv::CXX_FAST_TLS:
|
||||||
// Use target triple & subtarget features to do actual dispatch.
|
// Use target triple & subtarget features to do actual dispatch.
|
||||||
|
@ -356,7 +356,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
|
|||||||
GPRCS2Size += 4;
|
GPRCS2Size += 4;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
// fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
case ARM::R0:
|
case ARM::R0:
|
||||||
case ARM::R1:
|
case ARM::R1:
|
||||||
case ARM::R2:
|
case ARM::R2:
|
||||||
@ -559,7 +559,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
|
|||||||
case ARM::R12:
|
case ARM::R12:
|
||||||
if (STI.splitFramePushPop())
|
if (STI.splitFramePushPop())
|
||||||
break;
|
break;
|
||||||
// fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
case ARM::R0:
|
case ARM::R0:
|
||||||
case ARM::R1:
|
case ARM::R1:
|
||||||
case ARM::R2:
|
case ARM::R2:
|
||||||
@ -1558,7 +1558,7 @@ void ARMFrameLowering::determineCalleeSaves(MachineFunction &MF,
|
|||||||
switch (Reg) {
|
switch (Reg) {
|
||||||
case ARM::LR:
|
case ARM::LR:
|
||||||
LRSpilled = true;
|
LRSpilled = true;
|
||||||
// Fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
case ARM::R0: case ARM::R1:
|
case ARM::R0: case ARM::R1:
|
||||||
case ARM::R2: case ARM::R3:
|
case ARM::R2: case ARM::R3:
|
||||||
case ARM::R4: case ARM::R5:
|
case ARM::R4: case ARM::R5:
|
||||||
|
@ -4420,7 +4420,7 @@ SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
|
|||||||
case InlineAsm::Constraint_i:
|
case InlineAsm::Constraint_i:
|
||||||
// FIXME: It seems strange that 'i' is needed here since it's supposed to
|
// FIXME: It seems strange that 'i' is needed here since it's supposed to
|
||||||
// be an immediate and not a memory constraint.
|
// be an immediate and not a memory constraint.
|
||||||
// Fallthrough.
|
LLVM_FALLTHROUGH;
|
||||||
case InlineAsm::Constraint_m:
|
case InlineAsm::Constraint_m:
|
||||||
case InlineAsm::Constraint_o:
|
case InlineAsm::Constraint_o:
|
||||||
case InlineAsm::Constraint_Q:
|
case InlineAsm::Constraint_Q:
|
||||||
|
@ -4906,22 +4906,22 @@ static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
|
|||||||
switch (SetCCOpcode) {
|
switch (SetCCOpcode) {
|
||||||
default: llvm_unreachable("Illegal FP comparison");
|
default: llvm_unreachable("Illegal FP comparison");
|
||||||
case ISD::SETUNE:
|
case ISD::SETUNE:
|
||||||
case ISD::SETNE: Invert = true; // Fallthrough
|
case ISD::SETNE: Invert = true; LLVM_FALLTHROUGH;
|
||||||
case ISD::SETOEQ:
|
case ISD::SETOEQ:
|
||||||
case ISD::SETEQ: Opc = ARMISD::VCEQ; break;
|
case ISD::SETEQ: Opc = ARMISD::VCEQ; break;
|
||||||
case ISD::SETOLT:
|
case ISD::SETOLT:
|
||||||
case ISD::SETLT: Swap = true; // Fallthrough
|
case ISD::SETLT: Swap = true; LLVM_FALLTHROUGH;
|
||||||
case ISD::SETOGT:
|
case ISD::SETOGT:
|
||||||
case ISD::SETGT: Opc = ARMISD::VCGT; break;
|
case ISD::SETGT: Opc = ARMISD::VCGT; break;
|
||||||
case ISD::SETOLE:
|
case ISD::SETOLE:
|
||||||
case ISD::SETLE: Swap = true; // Fallthrough
|
case ISD::SETLE: Swap = true; LLVM_FALLTHROUGH;
|
||||||
case ISD::SETOGE:
|
case ISD::SETOGE:
|
||||||
case ISD::SETGE: Opc = ARMISD::VCGE; break;
|
case ISD::SETGE: Opc = ARMISD::VCGE; break;
|
||||||
case ISD::SETUGE: Swap = true; // Fallthrough
|
case ISD::SETUGE: Swap = true; LLVM_FALLTHROUGH;
|
||||||
case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break;
|
case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break;
|
||||||
case ISD::SETUGT: Swap = true; // Fallthrough
|
case ISD::SETUGT: Swap = true; LLVM_FALLTHROUGH;
|
||||||
case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break;
|
case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break;
|
||||||
case ISD::SETUEQ: Invert = true; // Fallthrough
|
case ISD::SETUEQ: Invert = true; LLVM_FALLTHROUGH;
|
||||||
case ISD::SETONE:
|
case ISD::SETONE:
|
||||||
// Expand this to (OLT | OGT).
|
// Expand this to (OLT | OGT).
|
||||||
TmpOp0 = Op0;
|
TmpOp0 = Op0;
|
||||||
@ -4930,7 +4930,9 @@ static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
|
|||||||
Op0 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp1, TmpOp0);
|
Op0 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp1, TmpOp0);
|
||||||
Op1 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp0, TmpOp1);
|
Op1 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp0, TmpOp1);
|
||||||
break;
|
break;
|
||||||
case ISD::SETUO: Invert = true; // Fallthrough
|
case ISD::SETUO:
|
||||||
|
Invert = true;
|
||||||
|
LLVM_FALLTHROUGH;
|
||||||
case ISD::SETO:
|
case ISD::SETO:
|
||||||
// Expand this to (OLT | OGE).
|
// Expand this to (OLT | OGE).
|
||||||
TmpOp0 = Op0;
|
TmpOp0 = Op0;
|
||||||
|
@ -5425,7 +5425,7 @@ bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
// w/ a ':' after the '#', it's just like a plain ':'.
|
// w/ a ':' after the '#', it's just like a plain ':'.
|
||||||
// FALLTHROUGH
|
LLVM_FALLTHROUGH;
|
||||||
}
|
}
|
||||||
case AsmToken::Colon: {
|
case AsmToken::Colon: {
|
||||||
S = Parser.getTok().getLoc();
|
S = Parser.getTok().getLoc();
|
||||||
|
@ -375,7 +375,7 @@ unsigned ARMAsmBackend::adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
|
|||||||
case ARM::fixup_arm_movt_hi16:
|
case ARM::fixup_arm_movt_hi16:
|
||||||
if (!IsPCRel)
|
if (!IsPCRel)
|
||||||
Value >>= 16;
|
Value >>= 16;
|
||||||
// Fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
case ARM::fixup_arm_movw_lo16: {
|
case ARM::fixup_arm_movw_lo16: {
|
||||||
unsigned Hi4 = (Value & 0xF000) >> 12;
|
unsigned Hi4 = (Value & 0xF000) >> 12;
|
||||||
unsigned Lo12 = Value & 0x0FFF;
|
unsigned Lo12 = Value & 0x0FFF;
|
||||||
@ -387,7 +387,7 @@ unsigned ARMAsmBackend::adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
|
|||||||
case ARM::fixup_t2_movt_hi16:
|
case ARM::fixup_t2_movt_hi16:
|
||||||
if (!IsPCRel)
|
if (!IsPCRel)
|
||||||
Value >>= 16;
|
Value >>= 16;
|
||||||
// Fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
case ARM::fixup_t2_movw_lo16: {
|
case ARM::fixup_t2_movw_lo16: {
|
||||||
unsigned Hi4 = (Value & 0xF000) >> 12;
|
unsigned Hi4 = (Value & 0xF000) >> 12;
|
||||||
unsigned i = (Value & 0x800) >> 11;
|
unsigned i = (Value & 0x800) >> 11;
|
||||||
@ -403,7 +403,7 @@ unsigned ARMAsmBackend::adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
|
|||||||
case ARM::fixup_arm_ldst_pcrel_12:
|
case ARM::fixup_arm_ldst_pcrel_12:
|
||||||
// ARM PC-relative values are offset by 8.
|
// ARM PC-relative values are offset by 8.
|
||||||
Value -= 4;
|
Value -= 4;
|
||||||
// FALLTHROUGH
|
LLVM_FALLTHROUGH;
|
||||||
case ARM::fixup_t2_ldst_pcrel_12: {
|
case ARM::fixup_t2_ldst_pcrel_12: {
|
||||||
// Offset by 4, adjusted by two due to the half-word ordering of thumb.
|
// Offset by 4, adjusted by two due to the half-word ordering of thumb.
|
||||||
Value -= 4;
|
Value -= 4;
|
||||||
|
@ -1493,7 +1493,7 @@ getT2SORegOpValue(const MCInst &MI, unsigned OpIdx,
|
|||||||
case ARM_AM::lsl: SBits = 0x0; break;
|
case ARM_AM::lsl: SBits = 0x0; break;
|
||||||
case ARM_AM::lsr: SBits = 0x2; break;
|
case ARM_AM::lsr: SBits = 0x2; break;
|
||||||
case ARM_AM::asr: SBits = 0x4; break;
|
case ARM_AM::asr: SBits = 0x4; break;
|
||||||
case ARM_AM::rrx: // FALLTHROUGH
|
case ARM_AM::rrx: LLVM_FALLTHROUGH;
|
||||||
case ARM_AM::ror: SBits = 0x6; break;
|
case ARM_AM::ror: SBits = 0x6; break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -208,7 +208,7 @@ RecordARMScatteredHalfRelocation(MachObjectWriter *Writer,
|
|||||||
if (Asm.isThumbFunc(A))
|
if (Asm.isThumbFunc(A))
|
||||||
FixedValue &= 0xfffffffe;
|
FixedValue &= 0xfffffffe;
|
||||||
MovtBit = 1;
|
MovtBit = 1;
|
||||||
// Fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
case ARM::fixup_t2_movw_lo16:
|
case ARM::fixup_t2_movw_lo16:
|
||||||
ThumbBit = 1;
|
ThumbBit = 1;
|
||||||
break;
|
break;
|
||||||
|
@ -154,7 +154,7 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF,
|
|||||||
GPRCS2Size += 4;
|
GPRCS2Size += 4;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
// fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
case ARM::R4:
|
case ARM::R4:
|
||||||
case ARM::R5:
|
case ARM::R5:
|
||||||
case ARM::R6:
|
case ARM::R6:
|
||||||
|
@ -651,7 +651,7 @@ Thumb2SizeReduce::ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI,
|
|||||||
case ARM::t2ADDSri: {
|
case ARM::t2ADDSri: {
|
||||||
if (ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, IsSelfLoop))
|
if (ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, IsSelfLoop))
|
||||||
return true;
|
return true;
|
||||||
// fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
}
|
}
|
||||||
case ARM::t2ADDSrr:
|
case ARM::t2ADDSrr:
|
||||||
return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
|
return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
|
||||||
|
@ -172,7 +172,7 @@ void AVRRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
|
|||||||
Opcode = AVR::ADIWRdK;
|
Opcode = AVR::ADIWRdK;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
// Fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
}
|
}
|
||||||
default: {
|
default: {
|
||||||
// This opcode will get expanded into a pair of subi/sbci.
|
// This opcode will get expanded into a pair of subi/sbci.
|
||||||
|
@ -569,8 +569,8 @@ public:
|
|||||||
if (!Resolved) {
|
if (!Resolved) {
|
||||||
switch ((unsigned)Fixup.getKind()) {
|
switch ((unsigned)Fixup.getKind()) {
|
||||||
case fixup_Hexagon_B22_PCREL:
|
case fixup_Hexagon_B22_PCREL:
|
||||||
// GetFixupCount assumes B22 won't relax
|
// GetFixupCount assumes B22 won't relax
|
||||||
// Fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
default:
|
default:
|
||||||
return false;
|
return false;
|
||||||
break;
|
break;
|
||||||
|
@ -215,7 +215,7 @@ bool HexagonShuffler::check() {
|
|||||||
break;
|
break;
|
||||||
case HexagonII::TypeJR:
|
case HexagonII::TypeJR:
|
||||||
++jumpr;
|
++jumpr;
|
||||||
// Fall-through.
|
LLVM_FALLTHROUGH;
|
||||||
case HexagonII::TypeJ:
|
case HexagonII::TypeJ:
|
||||||
++jumps;
|
++jumps;
|
||||||
break;
|
break;
|
||||||
|
@ -807,7 +807,8 @@ static SDValue EmitCMP(SDValue &LHS, SDValue &RHS, SDValue &TargetCC,
|
|||||||
std::swap(LHS, RHS);
|
std::swap(LHS, RHS);
|
||||||
break;
|
break;
|
||||||
case ISD::SETULE:
|
case ISD::SETULE:
|
||||||
std::swap(LHS, RHS); // FALLTHROUGH
|
std::swap(LHS, RHS);
|
||||||
|
LLVM_FALLTHROUGH;
|
||||||
case ISD::SETUGE:
|
case ISD::SETUGE:
|
||||||
// Turn lhs u>= rhs with lhs constant into rhs u< lhs+1, this allows us to
|
// Turn lhs u>= rhs with lhs constant into rhs u< lhs+1, this allows us to
|
||||||
// fold constant into instruction.
|
// fold constant into instruction.
|
||||||
@ -820,7 +821,8 @@ static SDValue EmitCMP(SDValue &LHS, SDValue &RHS, SDValue &TargetCC,
|
|||||||
TCC = MSP430CC::COND_HS; // aka COND_C
|
TCC = MSP430CC::COND_HS; // aka COND_C
|
||||||
break;
|
break;
|
||||||
case ISD::SETUGT:
|
case ISD::SETUGT:
|
||||||
std::swap(LHS, RHS); // FALLTHROUGH
|
std::swap(LHS, RHS);
|
||||||
|
LLVM_FALLTHROUGH;
|
||||||
case ISD::SETULT:
|
case ISD::SETULT:
|
||||||
// Turn lhs u< rhs with lhs constant into rhs u>= lhs+1, this allows us to
|
// Turn lhs u< rhs with lhs constant into rhs u>= lhs+1, this allows us to
|
||||||
// fold constant into instruction.
|
// fold constant into instruction.
|
||||||
@ -833,7 +835,8 @@ static SDValue EmitCMP(SDValue &LHS, SDValue &RHS, SDValue &TargetCC,
|
|||||||
TCC = MSP430CC::COND_LO; // aka COND_NC
|
TCC = MSP430CC::COND_LO; // aka COND_NC
|
||||||
break;
|
break;
|
||||||
case ISD::SETLE:
|
case ISD::SETLE:
|
||||||
std::swap(LHS, RHS); // FALLTHROUGH
|
std::swap(LHS, RHS);
|
||||||
|
LLVM_FALLTHROUGH;
|
||||||
case ISD::SETGE:
|
case ISD::SETGE:
|
||||||
// Turn lhs >= rhs with lhs constant into rhs < lhs+1, this allows us to
|
// Turn lhs >= rhs with lhs constant into rhs < lhs+1, this allows us to
|
||||||
// fold constant into instruction.
|
// fold constant into instruction.
|
||||||
@ -846,7 +849,8 @@ static SDValue EmitCMP(SDValue &LHS, SDValue &RHS, SDValue &TargetCC,
|
|||||||
TCC = MSP430CC::COND_GE;
|
TCC = MSP430CC::COND_GE;
|
||||||
break;
|
break;
|
||||||
case ISD::SETGT:
|
case ISD::SETGT:
|
||||||
std::swap(LHS, RHS); // FALLTHROUGH
|
std::swap(LHS, RHS);
|
||||||
|
LLVM_FALLTHROUGH;
|
||||||
case ISD::SETLT:
|
case ISD::SETLT:
|
||||||
// Turn lhs < rhs with lhs constant into rhs >= lhs+1, this allows us to
|
// Turn lhs < rhs with lhs constant into rhs >= lhs+1, this allows us to
|
||||||
// fold constant into instruction.
|
// fold constant into instruction.
|
||||||
|
@ -1662,7 +1662,7 @@ static DecodeStatus DecodeMemMMImm12(MCInst &Inst,
|
|||||||
break;
|
break;
|
||||||
case Mips::SC_MM:
|
case Mips::SC_MM:
|
||||||
Inst.addOperand(MCOperand::createReg(Reg));
|
Inst.addOperand(MCOperand::createReg(Reg));
|
||||||
// fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
default:
|
default:
|
||||||
Inst.addOperand(MCOperand::createReg(Reg));
|
Inst.addOperand(MCOperand::createReg(Reg));
|
||||||
if (Inst.getOpcode() == Mips::LWP_MM || Inst.getOpcode() == Mips::SWP_MM ||
|
if (Inst.getOpcode() == Mips::LWP_MM || Inst.getOpcode() == Mips::SWP_MM ||
|
||||||
|
@ -531,7 +531,7 @@ bool MipsELFObjectWriter::needsRelocateWithSymbol(const MCSymbol &Sym,
|
|||||||
case ELF::R_MIPS_GPREL32:
|
case ELF::R_MIPS_GPREL32:
|
||||||
if (cast<MCSymbolELF>(Sym).getOther() & ELF::STO_MIPS_MICROMIPS)
|
if (cast<MCSymbolELF>(Sym).getOther() & ELF::STO_MIPS_MICROMIPS)
|
||||||
return true;
|
return true;
|
||||||
// fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
case ELF::R_MIPS_26:
|
case ELF::R_MIPS_26:
|
||||||
case ELF::R_MIPS_64:
|
case ELF::R_MIPS_64:
|
||||||
case ELF::R_MIPS_GPREL16:
|
case ELF::R_MIPS_GPREL16:
|
||||||
|
@ -2765,19 +2765,19 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
|
|||||||
break;
|
break;
|
||||||
case CCValAssign::SExtUpper:
|
case CCValAssign::SExtUpper:
|
||||||
UseUpperBits = true;
|
UseUpperBits = true;
|
||||||
// Fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
case CCValAssign::SExt:
|
case CCValAssign::SExt:
|
||||||
Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, LocVT, Arg);
|
Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, LocVT, Arg);
|
||||||
break;
|
break;
|
||||||
case CCValAssign::ZExtUpper:
|
case CCValAssign::ZExtUpper:
|
||||||
UseUpperBits = true;
|
UseUpperBits = true;
|
||||||
// Fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
case CCValAssign::ZExt:
|
case CCValAssign::ZExt:
|
||||||
Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, LocVT, Arg);
|
Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, LocVT, Arg);
|
||||||
break;
|
break;
|
||||||
case CCValAssign::AExtUpper:
|
case CCValAssign::AExtUpper:
|
||||||
UseUpperBits = true;
|
UseUpperBits = true;
|
||||||
// Fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
case CCValAssign::AExt:
|
case CCValAssign::AExt:
|
||||||
Arg = DAG.getNode(ISD::ANY_EXTEND, DL, LocVT, Arg);
|
Arg = DAG.getNode(ISD::ANY_EXTEND, DL, LocVT, Arg);
|
||||||
break;
|
break;
|
||||||
@ -3235,19 +3235,19 @@ MipsTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
|
|||||||
break;
|
break;
|
||||||
case CCValAssign::AExtUpper:
|
case CCValAssign::AExtUpper:
|
||||||
UseUpperBits = true;
|
UseUpperBits = true;
|
||||||
// Fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
case CCValAssign::AExt:
|
case CCValAssign::AExt:
|
||||||
Val = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Val);
|
Val = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Val);
|
||||||
break;
|
break;
|
||||||
case CCValAssign::ZExtUpper:
|
case CCValAssign::ZExtUpper:
|
||||||
UseUpperBits = true;
|
UseUpperBits = true;
|
||||||
// Fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
case CCValAssign::ZExt:
|
case CCValAssign::ZExt:
|
||||||
Val = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Val);
|
Val = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Val);
|
||||||
break;
|
break;
|
||||||
case CCValAssign::SExtUpper:
|
case CCValAssign::SExtUpper:
|
||||||
UseUpperBits = true;
|
UseUpperBits = true;
|
||||||
// Fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
case CCValAssign::SExt:
|
case CCValAssign::SExt:
|
||||||
Val = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Val);
|
Val = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Val);
|
||||||
break;
|
break;
|
||||||
|
@ -1571,7 +1571,8 @@ bool PPCAsmParser::ParseOperand(OperandVector &Operands) {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Fall-through to process non-register-name identifiers as expression.
|
// Fall-through to process non-register-name identifiers as expression.
|
||||||
|
LLVM_FALLTHROUGH;
|
||||||
// All other expressions
|
// All other expressions
|
||||||
case AsmToken::LParen:
|
case AsmToken::LParen:
|
||||||
case AsmToken::Plus:
|
case AsmToken::Plus:
|
||||||
@ -1644,7 +1645,7 @@ bool PPCAsmParser::ParseOperand(OperandVector &Operands) {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Fall-through..
|
LLVM_FALLTHROUGH;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return Error(S, "invalid memory operand");
|
return Error(S, "invalid memory operand");
|
||||||
|
@ -3575,7 +3575,8 @@ void PPCDAGToDAGISel::PeepholeCROps() {
|
|||||||
Op.getOperand(0) == Op.getOperand(1))
|
Op.getOperand(0) == Op.getOperand(1))
|
||||||
Op2Not = true;
|
Op2Not = true;
|
||||||
}
|
}
|
||||||
} // fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
|
}
|
||||||
case PPC::BC:
|
case PPC::BC:
|
||||||
case PPC::BCn:
|
case PPC::BCn:
|
||||||
case PPC::SELECT_I4:
|
case PPC::SELECT_I4:
|
||||||
|
@ -3748,7 +3748,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_Darwin(
|
|||||||
ArgOffset += PtrByteSize;
|
ArgOffset += PtrByteSize;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
// FALLTHROUGH
|
LLVM_FALLTHROUGH;
|
||||||
case MVT::i64: // PPC64
|
case MVT::i64: // PPC64
|
||||||
if (GPR_idx != Num_GPR_Regs) {
|
if (GPR_idx != Num_GPR_Regs) {
|
||||||
unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
|
unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
|
||||||
|
@ -131,12 +131,12 @@ int PPCTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
|
|||||||
return TTI::TCC_Free;
|
return TTI::TCC_Free;
|
||||||
case Instruction::And:
|
case Instruction::And:
|
||||||
RunFree = true; // (for the rotate-and-mask instructions)
|
RunFree = true; // (for the rotate-and-mask instructions)
|
||||||
// Fallthrough...
|
LLVM_FALLTHROUGH;
|
||||||
case Instruction::Add:
|
case Instruction::Add:
|
||||||
case Instruction::Or:
|
case Instruction::Or:
|
||||||
case Instruction::Xor:
|
case Instruction::Xor:
|
||||||
ShiftedFree = true;
|
ShiftedFree = true;
|
||||||
// Fallthrough...
|
LLVM_FALLTHROUGH;
|
||||||
case Instruction::Sub:
|
case Instruction::Sub:
|
||||||
case Instruction::Mul:
|
case Instruction::Mul:
|
||||||
case Instruction::Shl:
|
case Instruction::Shl:
|
||||||
@ -147,7 +147,8 @@ int PPCTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
|
|||||||
case Instruction::ICmp:
|
case Instruction::ICmp:
|
||||||
UnsignedFree = true;
|
UnsignedFree = true;
|
||||||
ImmIdx = 1;
|
ImmIdx = 1;
|
||||||
// Fallthrough... (zero comparisons can use record-form instructions)
|
// Zero comparisons can use record-form instructions.
|
||||||
|
LLVM_FALLTHROUGH;
|
||||||
case Instruction::Select:
|
case Instruction::Select:
|
||||||
ZeroFree = true;
|
ZeroFree = true;
|
||||||
break;
|
break;
|
||||||
|
@ -182,18 +182,18 @@ getX86ConditionCode(CmpInst::Predicate Predicate) {
|
|||||||
default: break;
|
default: break;
|
||||||
// Floating-point Predicates
|
// Floating-point Predicates
|
||||||
case CmpInst::FCMP_UEQ: CC = X86::COND_E; break;
|
case CmpInst::FCMP_UEQ: CC = X86::COND_E; break;
|
||||||
case CmpInst::FCMP_OLT: NeedSwap = true; // fall-through
|
case CmpInst::FCMP_OLT: NeedSwap = true; LLVM_FALLTHROUGH;
|
||||||
case CmpInst::FCMP_OGT: CC = X86::COND_A; break;
|
case CmpInst::FCMP_OGT: CC = X86::COND_A; break;
|
||||||
case CmpInst::FCMP_OLE: NeedSwap = true; // fall-through
|
case CmpInst::FCMP_OLE: NeedSwap = true; LLVM_FALLTHROUGH;
|
||||||
case CmpInst::FCMP_OGE: CC = X86::COND_AE; break;
|
case CmpInst::FCMP_OGE: CC = X86::COND_AE; break;
|
||||||
case CmpInst::FCMP_UGT: NeedSwap = true; // fall-through
|
case CmpInst::FCMP_UGT: NeedSwap = true; LLVM_FALLTHROUGH;
|
||||||
case CmpInst::FCMP_ULT: CC = X86::COND_B; break;
|
case CmpInst::FCMP_ULT: CC = X86::COND_B; break;
|
||||||
case CmpInst::FCMP_UGE: NeedSwap = true; // fall-through
|
case CmpInst::FCMP_UGE: NeedSwap = true; LLVM_FALLTHROUGH;
|
||||||
case CmpInst::FCMP_ULE: CC = X86::COND_BE; break;
|
case CmpInst::FCMP_ULE: CC = X86::COND_BE; break;
|
||||||
case CmpInst::FCMP_ONE: CC = X86::COND_NE; break;
|
case CmpInst::FCMP_ONE: CC = X86::COND_NE; break;
|
||||||
case CmpInst::FCMP_UNO: CC = X86::COND_P; break;
|
case CmpInst::FCMP_UNO: CC = X86::COND_P; break;
|
||||||
case CmpInst::FCMP_ORD: CC = X86::COND_NP; break;
|
case CmpInst::FCMP_ORD: CC = X86::COND_NP; break;
|
||||||
case CmpInst::FCMP_OEQ: // fall-through
|
case CmpInst::FCMP_OEQ: LLVM_FALLTHROUGH;
|
||||||
case CmpInst::FCMP_UNE: CC = X86::COND_INVALID; break;
|
case CmpInst::FCMP_UNE: CC = X86::COND_INVALID; break;
|
||||||
|
|
||||||
// Integer Predicates
|
// Integer Predicates
|
||||||
@ -229,15 +229,15 @@ getX86SSEConditionCode(CmpInst::Predicate Predicate) {
|
|||||||
switch (Predicate) {
|
switch (Predicate) {
|
||||||
default: llvm_unreachable("Unexpected predicate");
|
default: llvm_unreachable("Unexpected predicate");
|
||||||
case CmpInst::FCMP_OEQ: CC = 0; break;
|
case CmpInst::FCMP_OEQ: CC = 0; break;
|
||||||
case CmpInst::FCMP_OGT: NeedSwap = true; // fall-through
|
case CmpInst::FCMP_OGT: NeedSwap = true; LLVM_FALLTHROUGH;
|
||||||
case CmpInst::FCMP_OLT: CC = 1; break;
|
case CmpInst::FCMP_OLT: CC = 1; break;
|
||||||
case CmpInst::FCMP_OGE: NeedSwap = true; // fall-through
|
case CmpInst::FCMP_OGE: NeedSwap = true; LLVM_FALLTHROUGH;
|
||||||
case CmpInst::FCMP_OLE: CC = 2; break;
|
case CmpInst::FCMP_OLE: CC = 2; break;
|
||||||
case CmpInst::FCMP_UNO: CC = 3; break;
|
case CmpInst::FCMP_UNO: CC = 3; break;
|
||||||
case CmpInst::FCMP_UNE: CC = 4; break;
|
case CmpInst::FCMP_UNE: CC = 4; break;
|
||||||
case CmpInst::FCMP_ULE: NeedSwap = true; // fall-through
|
case CmpInst::FCMP_ULE: NeedSwap = true; LLVM_FALLTHROUGH;
|
||||||
case CmpInst::FCMP_UGE: CC = 5; break;
|
case CmpInst::FCMP_UGE: CC = 5; break;
|
||||||
case CmpInst::FCMP_ULT: NeedSwap = true; // fall-through
|
case CmpInst::FCMP_ULT: NeedSwap = true; LLVM_FALLTHROUGH;
|
||||||
case CmpInst::FCMP_UGT: CC = 6; break;
|
case CmpInst::FCMP_UGT: CC = 6; break;
|
||||||
case CmpInst::FCMP_ORD: CC = 7; break;
|
case CmpInst::FCMP_ORD: CC = 7; break;
|
||||||
case CmpInst::FCMP_UEQ:
|
case CmpInst::FCMP_UEQ:
|
||||||
@ -518,8 +518,8 @@ bool X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill,
|
|||||||
TII.get(X86::AND8ri), AndResult)
|
TII.get(X86::AND8ri), AndResult)
|
||||||
.addReg(ValReg, getKillRegState(ValIsKill)).addImm(1);
|
.addReg(ValReg, getKillRegState(ValIsKill)).addImm(1);
|
||||||
ValReg = AndResult;
|
ValReg = AndResult;
|
||||||
|
LLVM_FALLTHROUGH; // handle i1 as i8.
|
||||||
}
|
}
|
||||||
// FALLTHROUGH, handling i1 as i8.
|
|
||||||
case MVT::i8: Opc = X86::MOV8mr; break;
|
case MVT::i8: Opc = X86::MOV8mr; break;
|
||||||
case MVT::i16: Opc = X86::MOV16mr; break;
|
case MVT::i16: Opc = X86::MOV16mr; break;
|
||||||
case MVT::i32:
|
case MVT::i32:
|
||||||
@ -659,7 +659,9 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
|
|||||||
bool Signed = true;
|
bool Signed = true;
|
||||||
switch (VT.getSimpleVT().SimpleTy) {
|
switch (VT.getSimpleVT().SimpleTy) {
|
||||||
default: break;
|
default: break;
|
||||||
case MVT::i1: Signed = false; // FALLTHROUGH to handle as i8.
|
case MVT::i1:
|
||||||
|
Signed = false;
|
||||||
|
LLVM_FALLTHROUGH; // Handle as i8.
|
||||||
case MVT::i8: Opc = X86::MOV8mi; break;
|
case MVT::i8: Opc = X86::MOV8mi; break;
|
||||||
case MVT::i16: Opc = X86::MOV16mi; break;
|
case MVT::i16: Opc = X86::MOV16mi; break;
|
||||||
case MVT::i32: Opc = X86::MOV32mi; break;
|
case MVT::i32: Opc = X86::MOV32mi; break;
|
||||||
@ -1601,7 +1603,8 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
|
|||||||
switch (Predicate) {
|
switch (Predicate) {
|
||||||
default: break;
|
default: break;
|
||||||
case CmpInst::FCMP_OEQ:
|
case CmpInst::FCMP_OEQ:
|
||||||
std::swap(TrueMBB, FalseMBB); // fall-through
|
std::swap(TrueMBB, FalseMBB);
|
||||||
|
LLVM_FALLTHROUGH;
|
||||||
case CmpInst::FCMP_UNE:
|
case CmpInst::FCMP_UNE:
|
||||||
NeedExtraBranch = true;
|
NeedExtraBranch = true;
|
||||||
Predicate = CmpInst::FCMP_ONE;
|
Predicate = CmpInst::FCMP_ONE;
|
||||||
|
@ -2700,7 +2700,7 @@ SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
|
|||||||
case InlineAsm::Constraint_i:
|
case InlineAsm::Constraint_i:
|
||||||
// FIXME: It seems strange that 'i' is needed here since it's supposed to
|
// FIXME: It seems strange that 'i' is needed here since it's supposed to
|
||||||
// be an immediate and not a memory constraint.
|
// be an immediate and not a memory constraint.
|
||||||
// Fallthrough.
|
LLVM_FALLTHROUGH;
|
||||||
case InlineAsm::Constraint_o: // offsetable ??
|
case InlineAsm::Constraint_o: // offsetable ??
|
||||||
case InlineAsm::Constraint_v: // not offsetable ??
|
case InlineAsm::Constraint_v: // not offsetable ??
|
||||||
case InlineAsm::Constraint_m: // memory
|
case InlineAsm::Constraint_m: // memory
|
||||||
|
@ -7567,7 +7567,7 @@ static SDValue lowerVectorShuffleAsBlend(const SDLoc &DL, MVT VT, SDValue V1,
|
|||||||
case MVT::v4i64:
|
case MVT::v4i64:
|
||||||
case MVT::v8i32:
|
case MVT::v8i32:
|
||||||
assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!");
|
assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!");
|
||||||
// FALLTHROUGH
|
LLVM_FALLTHROUGH;
|
||||||
case MVT::v2i64:
|
case MVT::v2i64:
|
||||||
case MVT::v4i32:
|
case MVT::v4i32:
|
||||||
// If we have AVX2 it is faster to use VPBLENDD when the shuffle fits into
|
// If we have AVX2 it is faster to use VPBLENDD when the shuffle fits into
|
||||||
@ -7583,7 +7583,7 @@ static SDValue lowerVectorShuffleAsBlend(const SDLoc &DL, MVT VT, SDValue V1,
|
|||||||
VT, DAG.getNode(X86ISD::BLENDI, DL, BlendVT, V1, V2,
|
VT, DAG.getNode(X86ISD::BLENDI, DL, BlendVT, V1, V2,
|
||||||
DAG.getConstant(BlendMask, DL, MVT::i8)));
|
DAG.getConstant(BlendMask, DL, MVT::i8)));
|
||||||
}
|
}
|
||||||
// FALLTHROUGH
|
LLVM_FALLTHROUGH;
|
||||||
case MVT::v8i16: {
|
case MVT::v8i16: {
|
||||||
// For integer shuffles we need to expand the mask and cast the inputs to
|
// For integer shuffles we need to expand the mask and cast the inputs to
|
||||||
// v8i16s prior to blending.
|
// v8i16s prior to blending.
|
||||||
@ -7609,8 +7609,8 @@ static SDValue lowerVectorShuffleAsBlend(const SDLoc &DL, MVT VT, SDValue V1,
|
|||||||
return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
|
return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
|
||||||
DAG.getConstant(BlendMask, DL, MVT::i8));
|
DAG.getConstant(BlendMask, DL, MVT::i8));
|
||||||
}
|
}
|
||||||
|
LLVM_FALLTHROUGH;
|
||||||
}
|
}
|
||||||
// FALLTHROUGH
|
|
||||||
case MVT::v16i8:
|
case MVT::v16i8:
|
||||||
case MVT::v32i8: {
|
case MVT::v32i8: {
|
||||||
assert((VT.is128BitVector() || Subtarget.hasAVX2()) &&
|
assert((VT.is128BitVector() || Subtarget.hasAVX2()) &&
|
||||||
@ -15383,19 +15383,19 @@ static int translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
|
|||||||
case ISD::SETOEQ:
|
case ISD::SETOEQ:
|
||||||
case ISD::SETEQ: SSECC = 0; break;
|
case ISD::SETEQ: SSECC = 0; break;
|
||||||
case ISD::SETOGT:
|
case ISD::SETOGT:
|
||||||
case ISD::SETGT: Swap = true; // Fallthrough
|
case ISD::SETGT: Swap = true; LLVM_FALLTHROUGH;
|
||||||
case ISD::SETLT:
|
case ISD::SETLT:
|
||||||
case ISD::SETOLT: SSECC = 1; break;
|
case ISD::SETOLT: SSECC = 1; break;
|
||||||
case ISD::SETOGE:
|
case ISD::SETOGE:
|
||||||
case ISD::SETGE: Swap = true; // Fallthrough
|
case ISD::SETGE: Swap = true; LLVM_FALLTHROUGH;
|
||||||
case ISD::SETLE:
|
case ISD::SETLE:
|
||||||
case ISD::SETOLE: SSECC = 2; break;
|
case ISD::SETOLE: SSECC = 2; break;
|
||||||
case ISD::SETUO: SSECC = 3; break;
|
case ISD::SETUO: SSECC = 3; break;
|
||||||
case ISD::SETUNE:
|
case ISD::SETUNE:
|
||||||
case ISD::SETNE: SSECC = 4; break;
|
case ISD::SETNE: SSECC = 4; break;
|
||||||
case ISD::SETULE: Swap = true; // Fallthrough
|
case ISD::SETULE: Swap = true; LLVM_FALLTHROUGH;
|
||||||
case ISD::SETUGE: SSECC = 5; break;
|
case ISD::SETUGE: SSECC = 5; break;
|
||||||
case ISD::SETULT: Swap = true; // Fallthrough
|
case ISD::SETULT: Swap = true; LLVM_FALLTHROUGH;
|
||||||
case ISD::SETUGT: SSECC = 6; break;
|
case ISD::SETUGT: SSECC = 6; break;
|
||||||
case ISD::SETO: SSECC = 7; break;
|
case ISD::SETO: SSECC = 7; break;
|
||||||
case ISD::SETUEQ:
|
case ISD::SETUEQ:
|
||||||
@ -15501,12 +15501,12 @@ static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG) {
|
|||||||
case ISD::SETNE: SSECC = 4; break;
|
case ISD::SETNE: SSECC = 4; break;
|
||||||
case ISD::SETEQ: Opc = X86ISD::PCMPEQM; break;
|
case ISD::SETEQ: Opc = X86ISD::PCMPEQM; break;
|
||||||
case ISD::SETUGT: SSECC = 6; Unsigned = true; break;
|
case ISD::SETUGT: SSECC = 6; Unsigned = true; break;
|
||||||
case ISD::SETLT: Swap = true; //fall-through
|
case ISD::SETLT: Swap = true; LLVM_FALLTHROUGH;
|
||||||
case ISD::SETGT: Opc = X86ISD::PCMPGTM; break;
|
case ISD::SETGT: Opc = X86ISD::PCMPGTM; break;
|
||||||
case ISD::SETULT: SSECC = 1; Unsigned = true; break;
|
case ISD::SETULT: SSECC = 1; Unsigned = true; break;
|
||||||
case ISD::SETUGE: SSECC = 5; Unsigned = true; break; //NLT
|
case ISD::SETUGE: SSECC = 5; Unsigned = true; break; //NLT
|
||||||
case ISD::SETGE: Swap = true; SSECC = 2; break; // LE + swap
|
case ISD::SETGE: Swap = true; SSECC = 2; break; // LE + swap
|
||||||
case ISD::SETULE: Unsigned = true; //fall-through
|
case ISD::SETULE: Unsigned = true; LLVM_FALLTHROUGH;
|
||||||
case ISD::SETLE: SSECC = 2; break;
|
case ISD::SETLE: SSECC = 2; break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -18267,7 +18267,8 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget &Subtarget
|
|||||||
case Intrinsic::x86_avx_vtestz_pd:
|
case Intrinsic::x86_avx_vtestz_pd:
|
||||||
case Intrinsic::x86_avx_vtestz_ps_256:
|
case Intrinsic::x86_avx_vtestz_ps_256:
|
||||||
case Intrinsic::x86_avx_vtestz_pd_256:
|
case Intrinsic::x86_avx_vtestz_pd_256:
|
||||||
IsTestPacked = true; // Fallthrough
|
IsTestPacked = true;
|
||||||
|
LLVM_FALLTHROUGH;
|
||||||
case Intrinsic::x86_sse41_ptestz:
|
case Intrinsic::x86_sse41_ptestz:
|
||||||
case Intrinsic::x86_avx_ptestz_256:
|
case Intrinsic::x86_avx_ptestz_256:
|
||||||
// ZF = 1
|
// ZF = 1
|
||||||
@ -18277,7 +18278,8 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget &Subtarget
|
|||||||
case Intrinsic::x86_avx_vtestc_pd:
|
case Intrinsic::x86_avx_vtestc_pd:
|
||||||
case Intrinsic::x86_avx_vtestc_ps_256:
|
case Intrinsic::x86_avx_vtestc_ps_256:
|
||||||
case Intrinsic::x86_avx_vtestc_pd_256:
|
case Intrinsic::x86_avx_vtestc_pd_256:
|
||||||
IsTestPacked = true; // Fallthrough
|
IsTestPacked = true;
|
||||||
|
LLVM_FALLTHROUGH;
|
||||||
case Intrinsic::x86_sse41_ptestc:
|
case Intrinsic::x86_sse41_ptestc:
|
||||||
case Intrinsic::x86_avx_ptestc_256:
|
case Intrinsic::x86_avx_ptestc_256:
|
||||||
// CF = 1
|
// CF = 1
|
||||||
@ -18287,7 +18289,8 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget &Subtarget
|
|||||||
case Intrinsic::x86_avx_vtestnzc_pd:
|
case Intrinsic::x86_avx_vtestnzc_pd:
|
||||||
case Intrinsic::x86_avx_vtestnzc_ps_256:
|
case Intrinsic::x86_avx_vtestnzc_ps_256:
|
||||||
case Intrinsic::x86_avx_vtestnzc_pd_256:
|
case Intrinsic::x86_avx_vtestnzc_pd_256:
|
||||||
IsTestPacked = true; // Fallthrough
|
IsTestPacked = true;
|
||||||
|
LLVM_FALLTHROUGH;
|
||||||
case Intrinsic::x86_sse41_ptestnzc:
|
case Intrinsic::x86_sse41_ptestnzc:
|
||||||
case Intrinsic::x86_avx_ptestnzc_256:
|
case Intrinsic::x86_avx_ptestnzc_256:
|
||||||
// ZF and CF = 0
|
// ZF and CF = 0
|
||||||
@ -24759,7 +24762,7 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
|
|||||||
// These nodes' second result is a boolean.
|
// These nodes' second result is a boolean.
|
||||||
if (Op.getResNo() == 0)
|
if (Op.getResNo() == 0)
|
||||||
break;
|
break;
|
||||||
// Fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
case X86ISD::SETCC:
|
case X86ISD::SETCC:
|
||||||
KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
|
KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
|
||||||
break;
|
break;
|
||||||
@ -25946,7 +25949,7 @@ combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
|
|||||||
|
|
||||||
Chain.push_back(V);
|
Chain.push_back(V);
|
||||||
|
|
||||||
// Fallthrough!
|
LLVM_FALLTHROUGH;
|
||||||
case ISD::BITCAST:
|
case ISD::BITCAST:
|
||||||
V = V.getOperand(0);
|
V = V.getOperand(0);
|
||||||
continue;
|
continue;
|
||||||
@ -27705,7 +27708,7 @@ static bool checkBoolTestAndOrSetCCCombine(SDValue Cond, X86::CondCode &CC0,
|
|||||||
case ISD::AND:
|
case ISD::AND:
|
||||||
case X86ISD::AND:
|
case X86ISD::AND:
|
||||||
isAnd = true;
|
isAnd = true;
|
||||||
// fallthru
|
LLVM_FALLTHROUGH;
|
||||||
case ISD::OR:
|
case ISD::OR:
|
||||||
case X86ISD::OR:
|
case X86ISD::OR:
|
||||||
SetCC0 = Cond->getOperand(0);
|
SetCC0 = Cond->getOperand(0);
|
||||||
@ -31675,7 +31678,7 @@ bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
|
|||||||
case ISD::OR:
|
case ISD::OR:
|
||||||
case ISD::XOR:
|
case ISD::XOR:
|
||||||
Commute = true;
|
Commute = true;
|
||||||
// fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
case ISD::SUB: {
|
case ISD::SUB: {
|
||||||
SDValue N0 = Op.getOperand(0);
|
SDValue N0 = Op.getOperand(0);
|
||||||
SDValue N1 = Op.getOperand(1);
|
SDValue N1 = Op.getOperand(1);
|
||||||
|
@ -151,13 +151,14 @@ X86RegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
|
|||||||
// If VLX isn't support we shouldn't inflate to these classes.
|
// If VLX isn't support we shouldn't inflate to these classes.
|
||||||
if (!Subtarget.hasVLX())
|
if (!Subtarget.hasVLX())
|
||||||
break;
|
break;
|
||||||
// Fallthrough. The VLX check above passed, AVX512 check below will pass.
|
// The VLX check above passed, AVX512 check below will pass.
|
||||||
|
LLVM_FALLTHROUGH;
|
||||||
case X86::VR128XRegClassID:
|
case X86::VR128XRegClassID:
|
||||||
case X86::VR256XRegClassID:
|
case X86::VR256XRegClassID:
|
||||||
// If AVX-512 isn't support we shouldn't inflate to these classes.
|
// If AVX-512 isn't support we shouldn't inflate to these classes.
|
||||||
if (!Subtarget.hasAVX512())
|
if (!Subtarget.hasAVX512())
|
||||||
break;
|
break;
|
||||||
// Fallthrough.
|
LLVM_FALLTHROUGH;
|
||||||
case X86::GR8RegClassID:
|
case X86::GR8RegClassID:
|
||||||
case X86::GR16RegClassID:
|
case X86::GR16RegClassID:
|
||||||
case X86::GR32RegClassID:
|
case X86::GR32RegClassID:
|
||||||
|
@ -1413,7 +1413,7 @@ int X86TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
|
|||||||
// immediates here as the normal path expects bit 31 to be sign extended.
|
// immediates here as the normal path expects bit 31 to be sign extended.
|
||||||
if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue()))
|
if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue()))
|
||||||
return TTI::TCC_Free;
|
return TTI::TCC_Free;
|
||||||
// Fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
case Instruction::Add:
|
case Instruction::Add:
|
||||||
case Instruction::Sub:
|
case Instruction::Sub:
|
||||||
case Instruction::Mul:
|
case Instruction::Mul:
|
||||||
|
@ -2289,7 +2289,7 @@ Instruction *InstCombiner::foldICmpWithConstant(ICmpInst &ICI) {
|
|||||||
case Instruction::UDiv:
|
case Instruction::UDiv:
|
||||||
if (Instruction *I = foldICmpUDivConstant(ICI, LHSI, RHSV))
|
if (Instruction *I = foldICmpUDivConstant(ICI, LHSI, RHSV))
|
||||||
return I;
|
return I;
|
||||||
// fall-through
|
LLVM_FALLTHROUGH;
|
||||||
case Instruction::SDiv:
|
case Instruction::SDiv:
|
||||||
if (Instruction *I = foldICmpDivConstant(ICI, LHSI, RHSV))
|
if (Instruction *I = foldICmpDivConstant(ICI, LHSI, RHSV))
|
||||||
return I;
|
return I;
|
||||||
@ -4165,7 +4165,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
|
|||||||
case Instruction::LShr:
|
case Instruction::LShr:
|
||||||
if (I.isSigned())
|
if (I.isSigned())
|
||||||
break;
|
break;
|
||||||
// fall-through
|
LLVM_FALLTHROUGH;
|
||||||
case Instruction::SDiv:
|
case Instruction::SDiv:
|
||||||
case Instruction::AShr:
|
case Instruction::AShr:
|
||||||
if (!BO0->isExact() || !BO1->isExact())
|
if (!BO0->isExact() || !BO1->isExact())
|
||||||
|
@ -511,7 +511,7 @@ static ConstantInt *createOrdering(IRBuilder<> *IRB, AtomicOrdering ord) {
|
|||||||
switch (ord) {
|
switch (ord) {
|
||||||
case AtomicOrdering::NotAtomic:
|
case AtomicOrdering::NotAtomic:
|
||||||
llvm_unreachable("unexpected atomic ordering!");
|
llvm_unreachable("unexpected atomic ordering!");
|
||||||
case AtomicOrdering::Unordered: // Fall-through.
|
case AtomicOrdering::Unordered: LLVM_FALLTHROUGH;
|
||||||
case AtomicOrdering::Monotonic: v = 0; break;
|
case AtomicOrdering::Monotonic: v = 0; break;
|
||||||
// Not specified yet:
|
// Not specified yet:
|
||||||
// case AtomicOrdering::Consume: v = 1; break;
|
// case AtomicOrdering::Consume: v = 1; break;
|
||||||
|
@ -423,7 +423,7 @@ bool ObjCARCContract::tryToPeepholeInstruction(
|
|||||||
if (!optimizeRetainCall(F, Inst))
|
if (!optimizeRetainCall(F, Inst))
|
||||||
return false;
|
return false;
|
||||||
// If we succeed in our optimization, fall through.
|
// If we succeed in our optimization, fall through.
|
||||||
// FALLTHROUGH
|
LLVM_FALLTHROUGH;
|
||||||
case ARCInstKind::RetainRV:
|
case ARCInstKind::RetainRV:
|
||||||
case ARCInstKind::ClaimRV: {
|
case ARCInstKind::ClaimRV: {
|
||||||
// If we're compiling for a target which needs a special inline-asm
|
// If we're compiling for a target which needs a special inline-asm
|
||||||
|
@ -275,7 +275,7 @@ InductiveRangeCheck::parseRangeCheckICmp(Loop *L, ICmpInst *ICI,
|
|||||||
|
|
||||||
case ICmpInst::ICMP_SLE:
|
case ICmpInst::ICMP_SLE:
|
||||||
std::swap(LHS, RHS);
|
std::swap(LHS, RHS);
|
||||||
// fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
case ICmpInst::ICMP_SGE:
|
case ICmpInst::ICMP_SGE:
|
||||||
if (match(RHS, m_ConstantInt<0>())) {
|
if (match(RHS, m_ConstantInt<0>())) {
|
||||||
Index = LHS;
|
Index = LHS;
|
||||||
@ -285,7 +285,7 @@ InductiveRangeCheck::parseRangeCheckICmp(Loop *L, ICmpInst *ICI,
|
|||||||
|
|
||||||
case ICmpInst::ICMP_SLT:
|
case ICmpInst::ICMP_SLT:
|
||||||
std::swap(LHS, RHS);
|
std::swap(LHS, RHS);
|
||||||
// fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
case ICmpInst::ICMP_SGT:
|
case ICmpInst::ICMP_SGT:
|
||||||
if (match(RHS, m_ConstantInt<-1>())) {
|
if (match(RHS, m_ConstantInt<-1>())) {
|
||||||
Index = LHS;
|
Index = LHS;
|
||||||
@ -301,7 +301,7 @@ InductiveRangeCheck::parseRangeCheckICmp(Loop *L, ICmpInst *ICI,
|
|||||||
|
|
||||||
case ICmpInst::ICMP_ULT:
|
case ICmpInst::ICMP_ULT:
|
||||||
std::swap(LHS, RHS);
|
std::swap(LHS, RHS);
|
||||||
// fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
case ICmpInst::ICMP_UGT:
|
case ICmpInst::ICMP_UGT:
|
||||||
if (IsNonNegativeAndNotLoopVarying(LHS)) {
|
if (IsNonNegativeAndNotLoopVarying(LHS)) {
|
||||||
Index = RHS;
|
Index = RHS;
|
||||||
|
@ -501,7 +501,8 @@ static bool shouldSpeculateInstrs(BasicBlock::iterator Begin,
|
|||||||
// GEPs are cheap if all indices are constant.
|
// GEPs are cheap if all indices are constant.
|
||||||
if (!cast<GEPOperator>(I)->hasAllConstantIndices())
|
if (!cast<GEPOperator>(I)->hasAllConstantIndices())
|
||||||
return false;
|
return false;
|
||||||
// fall-thru to increment case
|
// fall-thru to increment case
|
||||||
|
LLVM_FALLTHROUGH;
|
||||||
case Instruction::Add:
|
case Instruction::Add:
|
||||||
case Instruction::Sub:
|
case Instruction::Sub:
|
||||||
case Instruction::And:
|
case Instruction::And:
|
||||||
|
@ -770,7 +770,7 @@ static void copyMetadata(Instruction *DstInst, const Instruction *SrcInst,
|
|||||||
MD.second = NewMD;
|
MD.second = NewMD;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// fallthrough.
|
LLVM_FALLTHROUGH;
|
||||||
case LLVMContext::MD_make_implicit:
|
case LLVMContext::MD_make_implicit:
|
||||||
case LLVMContext::MD_dbg:
|
case LLVMContext::MD_dbg:
|
||||||
DstInst->setMetadata(MD.first, MD.second);
|
DstInst->setMetadata(MD.first, MD.second);
|
||||||
|
@ -1565,7 +1565,7 @@ PointerIntPair<DeclContext *, 1> DeclContextTree::getChildDeclContext(
|
|||||||
!DIE->getAttributeValueAsUnsignedConstant(&U.getOrigUnit(),
|
!DIE->getAttributeValueAsUnsignedConstant(&U.getOrigUnit(),
|
||||||
dwarf::DW_AT_external, 0))
|
dwarf::DW_AT_external, 0))
|
||||||
return PointerIntPair<DeclContext *, 1>(nullptr);
|
return PointerIntPair<DeclContext *, 1>(nullptr);
|
||||||
// Fallthrough
|
LLVM_FALLTHROUGH;
|
||||||
case dwarf::DW_TAG_member:
|
case dwarf::DW_TAG_member:
|
||||||
case dwarf::DW_TAG_namespace:
|
case dwarf::DW_TAG_namespace:
|
||||||
case dwarf::DW_TAG_structure_type:
|
case dwarf::DW_TAG_structure_type:
|
||||||
|
@ -1656,7 +1656,7 @@ void ELFDumper<ELFT>::printValue(uint64_t Type, uint64_t Value) {
|
|||||||
OS << "RELA";
|
OS << "RELA";
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
// Fallthrough.
|
LLVM_FALLTHROUGH;
|
||||||
case DT_PLTGOT:
|
case DT_PLTGOT:
|
||||||
case DT_HASH:
|
case DT_HASH:
|
||||||
case DT_STRTAB:
|
case DT_STRTAB:
|
||||||
|
Loading…
x
Reference in New Issue
Block a user