diff --git a/lib/CodeGen/SafeStack.cpp b/lib/CodeGen/SafeStack.cpp index e097843d5e0..94add920f28 100644 --- a/lib/CodeGen/SafeStack.cpp +++ b/lib/CodeGen/SafeStack.cpp @@ -425,8 +425,7 @@ void SafeStack::findInsts(Function &F, for (Argument &Arg : F.args()) { if (!Arg.hasByValAttr()) continue; - uint64_t Size = - DL.getTypeStoreSize(Arg.getType()->getPointerElementType()); + uint64_t Size = DL.getTypeStoreSize(Arg.getParamByValType()); if (IsSafeStackAlloca(&Arg, Size)) continue; @@ -527,7 +526,7 @@ Value *SafeStack::moveStaticAllocasToUnsafeStack( } for (Argument *Arg : ByValArguments) { - Type *Ty = Arg->getType()->getPointerElementType(); + Type *Ty = Arg->getParamByValType(); uint64_t Size = DL.getTypeStoreSize(Ty); if (Size == 0) Size = 1; // Don't create zero-sized stack objects. @@ -584,7 +583,7 @@ Value *SafeStack::moveStaticAllocasToUnsafeStack( for (Argument *Arg : ByValArguments) { unsigned Offset = SSL.getObjectOffset(Arg); MaybeAlign Align(SSL.getObjectAlignment(Arg)); - Type *Ty = Arg->getType()->getPointerElementType(); + Type *Ty = Arg->getParamByValType(); uint64_t Size = DL.getTypeStoreSize(Ty); if (Size == 0) diff --git a/lib/IR/ConstantFold.cpp b/lib/IR/ConstantFold.cpp index 84e773aafef..78e21aeecef 100644 --- a/lib/IR/ConstantFold.cpp +++ b/lib/IR/ConstantFold.cpp @@ -880,7 +880,7 @@ Constant *llvm::ConstantFoldExtractElementInstruction(Constant *Val, // ee (gep (ptr, idx0, ...), idx) -> gep (ee (ptr, idx), ee (idx0, idx), ...) if (auto *CE = dyn_cast(Val)) { - if (CE->getOpcode() == Instruction::GetElementPtr) { + if (auto *GEP = dyn_cast(CE)) { SmallVector Ops; Ops.reserve(CE->getNumOperands()); for (unsigned i = 0, e = CE->getNumOperands(); i != e; ++i) { @@ -894,7 +894,7 @@ Constant *llvm::ConstantFoldExtractElementInstruction(Constant *Val, Ops.push_back(Op); } return CE->getWithOperands(Ops, ValVTy->getElementType(), false, - Ops[0]->getType()->getPointerElementType()); + GEP->getSourceElementType()); } else if (CE->getOpcode() == Instruction::InsertElement) { if (const auto *IEIdx = dyn_cast(CE->getOperand(2))) { if (APSInt::isSameValue(APSInt(IEIdx->getValue()), diff --git a/lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp b/lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp index 09a2702361c..7d6845b287b 100644 --- a/lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp +++ b/lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp @@ -114,9 +114,9 @@ void AMDGPUAnnotateUniformValues::visitLoadInst(LoadInst &I) { Value *Idx = Constant::getIntegerValue( Type::getInt32Ty(Ptr->getContext()), APInt(64, 0)); // Insert GEP at the entry to make it dominate all uses - PtrI = GetElementPtrInst::Create( - Ptr->getType()->getPointerElementType(), Ptr, - ArrayRef(Idx), Twine(""), F->getEntryBlock().getFirstNonPHI()); + PtrI = GetElementPtrInst::Create(I.getType(), Ptr, + ArrayRef(Idx), Twine(""), + F->getEntryBlock().getFirstNonPHI()); } I.replaceUsesOfWith(Ptr, PtrI); } diff --git a/lib/Target/ARM/MVEGatherScatterLowering.cpp b/lib/Target/ARM/MVEGatherScatterLowering.cpp index a65d7eb3c52..d85bae5c0ed 100644 --- a/lib/Target/ARM/MVEGatherScatterLowering.cpp +++ b/lib/Target/ARM/MVEGatherScatterLowering.cpp @@ -488,9 +488,9 @@ Value *MVEGatherScatterLowering::tryCreateMaskedGatherOffset( if (Load) return Load; - int Scale = computeScale( - BasePtr->getType()->getPointerElementType()->getPrimitiveSizeInBits(), - OriginalTy->getScalarSizeInBits()); + int Scale = + computeScale(GEP->getSourceElementType()->getPrimitiveSizeInBits(), + OriginalTy->getScalarSizeInBits()); if (Scale == -1) return nullptr; Root = Extend; @@ -630,9 +630,9 @@ Value *MVEGatherScatterLowering::tryCreateMaskedScatterOffset( tryCreateIncrementingGatScat(I, BasePtr, Offsets, GEP, Builder); if (Store) return Store; - int Scale = computeScale( - BasePtr->getType()->getPointerElementType()->getPrimitiveSizeInBits(), - MemoryTy->getScalarSizeInBits()); + int Scale = + computeScale(GEP->getSourceElementType()->getPrimitiveSizeInBits(), + MemoryTy->getScalarSizeInBits()); if (Scale == -1) return nullptr; @@ -1143,9 +1143,8 @@ bool MVEGatherScatterLowering::optimiseAddress(Value *Address, BasicBlock *BB, // (always i32 if it is not of vector type) and the base has to be a // pointer. if (Offsets && Base && Base != GEP) { - PointerType *BaseType = cast(Base->getType()); GetElementPtrInst *NewAddress = GetElementPtrInst::Create( - BaseType->getPointerElementType(), Base, Offsets, "gep.merged", GEP); + GEP->getSourceElementType(), Base, Offsets, "gep.merged", GEP); GEP->replaceAllUsesWith(NewAddress); GEP = NewAddress; Changed = true; diff --git a/lib/Target/Hexagon/HexagonVectorCombine.cpp b/lib/Target/Hexagon/HexagonVectorCombine.cpp index 73798de474e..942d3720956 100644 --- a/lib/Target/Hexagon/HexagonVectorCombine.cpp +++ b/lib/Target/Hexagon/HexagonVectorCombine.cpp @@ -1321,8 +1321,7 @@ auto HexagonVectorCombine::calculatePointerDifference(Value *Ptr0, return None; Builder B(Gep0->getParent()); - Value *BasePtr = Gep0->getPointerOperand(); - int Scale = DL.getTypeStoreSize(BasePtr->getType()->getPointerElementType()); + int Scale = DL.getTypeStoreSize(Gep0->getSourceElementType()); // FIXME: for now only check GEPs with a single index. if (Gep0->getNumOperands() != 2 || Gep1->getNumOperands() != 2)