1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 03:02:36 +01:00

[X86] Fix some clang-tidy bugprone-argument-comment issues

This commit is contained in:
Fangrui Song 2020-10-08 15:22:32 -07:00
parent 074f58db6d
commit c199a61ffa
3 changed files with 33 additions and 30 deletions

View File

@ -2146,7 +2146,7 @@ unsigned X86AsmParser::ParseIntelInlineAsmOperator(unsigned OpKind) {
SMLoc Start = Tok.getLoc(), End; SMLoc Start = Tok.getLoc(), End;
StringRef Identifier = Tok.getString(); StringRef Identifier = Tok.getString();
if (ParseIntelInlineAsmIdentifier(Val, Identifier, Info, if (ParseIntelInlineAsmIdentifier(Val, Identifier, Info,
/*Unevaluated=*/true, End)) /*IsUnevaluatedOperand=*/true, End))
return 0; return 0;
if (!Info.isKind(InlineAsmIdentifierInfo::IK_Var)) { if (!Info.isKind(InlineAsmIdentifierInfo::IK_Var)) {

View File

@ -1231,13 +1231,15 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
if (SrcVT == MVT::i1) { if (SrcVT == MVT::i1) {
if (Outs[0].Flags.isSExt()) if (Outs[0].Flags.isSExt())
return false; return false;
SrcReg = fastEmitZExtFromI1(MVT::i8, SrcReg, /*TODO: Kill=*/false); // TODO
SrcReg = fastEmitZExtFromI1(MVT::i8, SrcReg, /*Op0IsKill=*/false);
SrcVT = MVT::i8; SrcVT = MVT::i8;
} }
unsigned Op = Outs[0].Flags.isZExt() ? ISD::ZERO_EXTEND : unsigned Op = Outs[0].Flags.isZExt() ? ISD::ZERO_EXTEND :
ISD::SIGN_EXTEND; ISD::SIGN_EXTEND;
SrcReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Op, // TODO
SrcReg, /*TODO: Kill=*/false); SrcReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Op, SrcReg,
/*Op0IsKill=*/false);
} }
// Make the copy. // Make the copy.
@ -1431,8 +1433,8 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
ResultReg = createResultReg(&X86::GR32RegClass); ResultReg = createResultReg(&X86::GR32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV32r0), BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV32r0),
ResultReg); ResultReg);
ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultReg, /*Kill=*/true, ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultReg,
X86::sub_8bit); /*Op0IsKill=*/true, X86::sub_8bit);
if (!ResultReg) if (!ResultReg)
return false; return false;
break; break;
@ -1555,11 +1557,11 @@ bool X86FastISel::X86SelectZExt(const Instruction *I) {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOVZX32rr8), BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOVZX32rr8),
Result32).addReg(ResultReg); Result32).addReg(ResultReg);
ResultReg = fastEmitInst_extractsubreg(MVT::i16, Result32, /*Kill=*/true, ResultReg = fastEmitInst_extractsubreg(MVT::i16, Result32,
X86::sub_16bit); /*Op0IsKill=*/true, X86::sub_16bit);
} else if (DstVT != MVT::i8) { } else if (DstVT != MVT::i8) {
ResultReg = fastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::ZERO_EXTEND, ResultReg = fastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::ZERO_EXTEND,
ResultReg, /*Kill=*/true); ResultReg, /*Op0IsKill=*/true);
if (ResultReg == 0) if (ResultReg == 0)
return false; return false;
} }
@ -1601,11 +1603,11 @@ bool X86FastISel::X86SelectSExt(const Instruction *I) {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOVSX32rr8), BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOVSX32rr8),
Result32).addReg(ResultReg); Result32).addReg(ResultReg);
ResultReg = fastEmitInst_extractsubreg(MVT::i16, Result32, /*Kill=*/true, ResultReg = fastEmitInst_extractsubreg(MVT::i16, Result32,
X86::sub_16bit); /*Op0IsKill=*/true, X86::sub_16bit);
} else if (DstVT != MVT::i8) { } else if (DstVT != MVT::i8) {
ResultReg = fastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::SIGN_EXTEND, ResultReg = fastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::SIGN_EXTEND,
ResultReg, /*Kill=*/true); ResultReg, /*Op0IsKill=*/true);
if (ResultReg == 0) if (ResultReg == 0)
return false; return false;
} }
@ -1757,7 +1759,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), OpReg) TII.get(TargetOpcode::COPY), OpReg)
.addReg(KOpReg); .addReg(KOpReg);
OpReg = fastEmitInst_extractsubreg(MVT::i8, OpReg, /*Kill=*/true, OpReg = fastEmitInst_extractsubreg(MVT::i8, OpReg, /*Op0IsKill=*/true,
X86::sub_8bit); X86::sub_8bit);
} }
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri)) BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri))
@ -1989,7 +1991,7 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) {
// Now reference the 8-bit subreg of the result. // Now reference the 8-bit subreg of the result.
ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultSuperReg, ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultSuperReg,
/*Kill=*/true, X86::sub_8bit); /*Op0IsKill=*/true, X86::sub_8bit);
} }
// Copy the result out of the physreg if we haven't already. // Copy the result out of the physreg if we haven't already.
if (!ResultReg) { if (!ResultReg) {
@ -2103,7 +2105,7 @@ bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), CondReg) TII.get(TargetOpcode::COPY), CondReg)
.addReg(KCondReg, getKillRegState(CondIsKill)); .addReg(KCondReg, getKillRegState(CondIsKill));
CondReg = fastEmitInst_extractsubreg(MVT::i8, CondReg, /*Kill=*/true, CondReg = fastEmitInst_extractsubreg(MVT::i8, CondReg, /*Op0IsKill=*/true,
X86::sub_8bit); X86::sub_8bit);
} }
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri)) BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri))
@ -2257,12 +2259,12 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
const TargetRegisterClass *VR128 = &X86::VR128RegClass; const TargetRegisterClass *VR128 = &X86::VR128RegClass;
Register CmpReg = fastEmitInst_rri(Opc[0], RC, CmpLHSReg, CmpLHSIsKill, Register CmpReg = fastEmitInst_rri(Opc[0], RC, CmpLHSReg, CmpLHSIsKill,
CmpRHSReg, CmpRHSIsKill, CC); CmpRHSReg, CmpRHSIsKill, CC);
Register AndReg = fastEmitInst_rr(Opc[1], VR128, CmpReg, /*IsKill=*/false, Register AndReg = fastEmitInst_rr(Opc[1], VR128, CmpReg,
LHSReg, LHSIsKill); /*Op0IsKill=*/false, LHSReg, LHSIsKill);
Register AndNReg = fastEmitInst_rr(Opc[2], VR128, CmpReg, /*IsKill=*/true, Register AndNReg = fastEmitInst_rr(Opc[2], VR128, CmpReg,
RHSReg, RHSIsKill); /*Op0IsKill=*/true, RHSReg, RHSIsKill);
Register OrReg = fastEmitInst_rr(Opc[3], VR128, AndNReg, /*IsKill=*/true, Register OrReg = fastEmitInst_rr(Opc[3], VR128, AndNReg, /*Op0IsKill=*/true,
AndReg, /*IsKill=*/true); AndReg, /*Op1IsKill=*/true);
ResultReg = createResultReg(RC); ResultReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg).addReg(OrReg); TII.get(TargetOpcode::COPY), ResultReg).addReg(OrReg);
@ -2321,7 +2323,7 @@ bool X86FastISel::X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I) {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), CondReg) TII.get(TargetOpcode::COPY), CondReg)
.addReg(KCondReg, getKillRegState(CondIsKill)); .addReg(KCondReg, getKillRegState(CondIsKill));
CondReg = fastEmitInst_extractsubreg(MVT::i8, CondReg, /*Kill=*/true, CondReg = fastEmitInst_extractsubreg(MVT::i8, CondReg, /*Op0IsKill=*/true,
X86::sub_8bit); X86::sub_8bit);
} }
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri)) BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri))
@ -2578,7 +2580,7 @@ bool X86FastISel::TryEmitSmallMemcpy(X86AddressMode DestAM,
unsigned Reg; unsigned Reg;
bool RV = X86FastEmitLoad(VT, SrcAM, nullptr, Reg); bool RV = X86FastEmitLoad(VT, SrcAM, nullptr, Reg);
RV &= X86FastEmitStore(VT, Reg, /*Kill=*/true, DestAM); RV &= X86FastEmitStore(VT, Reg, /*ValIsKill=*/true, DestAM);
assert(RV && "Failed to emit load or store??"); assert(RV && "Failed to emit load or store??");
unsigned Size = VT.getSizeInBits()/8; unsigned Size = VT.getSizeInBits()/8;
@ -2642,15 +2644,15 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
assert(Op->getType()->isIntegerTy(16) && "Expected a 16-bit integer!"); assert(Op->getType()->isIntegerTy(16) && "Expected a 16-bit integer!");
// Explicitly zero-extend the input to 32-bit. // Explicitly zero-extend the input to 32-bit.
InputReg = fastEmit_r(MVT::i16, MVT::i32, ISD::ZERO_EXTEND, InputReg, InputReg = fastEmit_r(MVT::i16, MVT::i32, ISD::ZERO_EXTEND, InputReg,
/*Kill=*/false); /*Op0IsKill=*/false);
// The following SCALAR_TO_VECTOR will be expanded into a VMOVDI2PDIrr. // The following SCALAR_TO_VECTOR will be expanded into a VMOVDI2PDIrr.
InputReg = fastEmit_r(MVT::i32, MVT::v4i32, ISD::SCALAR_TO_VECTOR, InputReg = fastEmit_r(MVT::i32, MVT::v4i32, ISD::SCALAR_TO_VECTOR,
InputReg, /*Kill=*/true); InputReg, /*Op0IsKill=*/true);
unsigned Opc = Subtarget->hasVLX() ? X86::VCVTPH2PSZ128rr unsigned Opc = Subtarget->hasVLX() ? X86::VCVTPH2PSZ128rr
: X86::VCVTPH2PSrr; : X86::VCVTPH2PSrr;
InputReg = fastEmitInst_r(Opc, RC, InputReg, /*Kill=*/true); InputReg = fastEmitInst_r(Opc, RC, InputReg, /*Op0IsKill=*/true);
// The result value is in the lower 32-bits of ResultReg. // The result value is in the lower 32-bits of ResultReg.
// Emit an explicit copy from register class VR128 to register class FR32. // Emit an explicit copy from register class VR128 to register class FR32.
@ -3692,10 +3694,10 @@ unsigned X86FastISel::X86MaterializeInt(const ConstantInt *CI, MVT VT) {
default: llvm_unreachable("Unexpected value type"); default: llvm_unreachable("Unexpected value type");
case MVT::i1: case MVT::i1:
case MVT::i8: case MVT::i8:
return fastEmitInst_extractsubreg(MVT::i8, SrcReg, /*Kill=*/true, return fastEmitInst_extractsubreg(MVT::i8, SrcReg, /*Op0IsKill=*/true,
X86::sub_8bit); X86::sub_8bit);
case MVT::i16: case MVT::i16:
return fastEmitInst_extractsubreg(MVT::i16, SrcReg, /*Kill=*/true, return fastEmitInst_extractsubreg(MVT::i16, SrcReg, /*Op0IsKill=*/true,
X86::sub_16bit); X86::sub_16bit);
case MVT::i32: case MVT::i32:
return SrcReg; return SrcReg;

View File

@ -3754,7 +3754,7 @@ SDValue X86TargetLowering::LowerFormalArguments(
// same, so the size of funclets' (mostly empty) frames is dictated by // same, so the size of funclets' (mostly empty) frames is dictated by
// how far this slot is from the bottom (since they allocate just enough // how far this slot is from the bottom (since they allocate just enough
// space to accommodate holding this slot at the correct offset). // space to accommodate holding this slot at the correct offset).
int PSPSymFI = MFI.CreateStackObject(8, Align(8), /*isSS=*/false); int PSPSymFI = MFI.CreateStackObject(8, Align(8), /*isSpillSlot=*/false);
EHInfo->PSPSymFrameIdx = PSPSymFI; EHInfo->PSPSymFrameIdx = PSPSymFI;
} }
} }
@ -24315,7 +24315,8 @@ SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
SDVTList VTs = DAG.getVTList(getPointerTy(DAG.getDataLayout()), MVT::Other); SDVTList VTs = DAG.getVTList(getPointerTy(DAG.getDataLayout()), MVT::Other);
SDValue VAARG = DAG.getMemIntrinsicNode( SDValue VAARG = DAG.getMemIntrinsicNode(
X86ISD::VAARG_64, dl, VTs, InstOps, MVT::i64, MachinePointerInfo(SV), X86ISD::VAARG_64, dl, VTs, InstOps, MVT::i64, MachinePointerInfo(SV),
/*Align=*/None, MachineMemOperand::MOLoad | MachineMemOperand::MOStore); /*Alignment=*/None,
MachineMemOperand::MOLoad | MachineMemOperand::MOStore);
Chain = VAARG.getValue(1); Chain = VAARG.getValue(1);
// Load the next argument and return it // Load the next argument and return it