mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-01-31 12:41:49 +01:00
Rename getABITypeSize to getTypePaddedSize, as
suggested by Chris. llvm-svn: 62099
This commit is contained in:
parent
27f9c78c56
commit
bcdbfb63dc
@ -70,7 +70,7 @@ external size_in_bits : TargetData.t -> Llvm.lltype -> Int64.t
|
||||
external store_size : TargetData.t -> Llvm.lltype -> Int64.t = "llvm_store_size"
|
||||
|
||||
(** Computes the ABI size of a type in bytes for a target.
|
||||
See the method llvm::TargetData::getABITypeSize. *)
|
||||
See the method llvm::TargetData::getTypePaddedSize. *)
|
||||
external abi_size : TargetData.t -> Llvm.lltype -> Int64.t = "llvm_abi_size"
|
||||
|
||||
(** Computes the ABI alignment of a type in bytes for a target.
|
||||
|
@ -588,7 +588,7 @@ status code.</p>
|
||||
<a href="mailto:foldr@codedgers.com">Mikhail Glushenkov</a><br />
|
||||
<a href="http://llvm.org">LLVM Compiler Infrastructure</a><br />
|
||||
|
||||
Last modified: $Date: 2008-12-11 11:34:48 -0600 (Thu, 11 Dec 2008) $
|
||||
Last modified: $Date$
|
||||
</address></div>
|
||||
</div>
|
||||
</div>
|
||||
|
@ -70,7 +70,7 @@ unsigned long long LLVMSizeOfTypeInBits(LLVMTargetDataRef, LLVMTypeRef);
|
||||
unsigned long long LLVMStoreSizeOfType(LLVMTargetDataRef, LLVMTypeRef);
|
||||
|
||||
/** Computes the ABI size of a type in bytes for a target.
|
||||
See the method llvm::TargetData::getABITypeSize. */
|
||||
See the method llvm::TargetData::getTypePaddedSize. */
|
||||
unsigned long long LLVMABISizeOfType(LLVMTargetDataRef, LLVMTypeRef);
|
||||
|
||||
/** Computes the ABI alignment of a type in bytes for a target.
|
||||
|
@ -173,21 +173,21 @@ public:
|
||||
return 8*getTypeStoreSize(Ty);
|
||||
}
|
||||
|
||||
/// getABITypeSize - Return the offset in bytes between successive objects
|
||||
/// getTypePaddedSize - Return the offset in bytes between successive objects
|
||||
/// of the specified type, including alignment padding. This is the amount
|
||||
/// that alloca reserves for this type. For example, returns 12 or 16 for
|
||||
/// x86_fp80, depending on alignment.
|
||||
uint64_t getABITypeSize(const Type* Ty) const {
|
||||
uint64_t getTypePaddedSize(const Type* Ty) const {
|
||||
// Round up to the next alignment boundary.
|
||||
return RoundUpAlignment(getTypeStoreSize(Ty), getABITypeAlignment(Ty));
|
||||
}
|
||||
|
||||
/// getABITypeSizeInBits - Return the offset in bits between successive
|
||||
/// getTypePaddedSizeInBits - Return the offset in bits between successive
|
||||
/// objects of the specified type, including alignment padding; always a
|
||||
/// multiple of 8. This is the amount that alloca reserves for this type.
|
||||
/// For example, returns 96 or 128 for x86_fp80, depending on alignment.
|
||||
uint64_t getABITypeSizeInBits(const Type* Ty) const {
|
||||
return 8*getABITypeSize(Ty);
|
||||
uint64_t getTypePaddedSizeInBits(const Type* Ty) const {
|
||||
return 8*getTypePaddedSize(Ty);
|
||||
}
|
||||
|
||||
/// getABITypeAlignment - Return the minimum ABI-required alignment for the
|
||||
|
@ -195,7 +195,7 @@ static bool isObjectSmallerThan(const Value *V, unsigned Size,
|
||||
}
|
||||
|
||||
if (AccessTy->isSized())
|
||||
return TD.getABITypeSize(AccessTy) < Size;
|
||||
return TD.getTypePaddedSize(AccessTy) < Size;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -76,7 +76,7 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
|
||||
Offset += TD.getStructLayout(ST)->getElementOffset(CI->getZExtValue());
|
||||
} else {
|
||||
const SequentialType *SQT = cast<SequentialType>(*GTI);
|
||||
Offset += TD.getABITypeSize(SQT->getElementType())*CI->getSExtValue();
|
||||
Offset += TD.getTypePaddedSize(SQT->getElementType())*CI->getSExtValue();
|
||||
}
|
||||
}
|
||||
return true;
|
||||
|
@ -459,7 +459,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
|
||||
const Type *IndexedTy = GTI.getIndexedType();
|
||||
if (!IndexedTy->isSized()) return;
|
||||
unsigned GEPOpiBits = Index->getType()->getPrimitiveSizeInBits();
|
||||
uint64_t TypeSize = TD ? TD->getABITypeSize(IndexedTy) : 1;
|
||||
uint64_t TypeSize = TD ? TD->getTypePaddedSize(IndexedTy) : 1;
|
||||
LocalMask = APInt::getAllOnesValue(GEPOpiBits);
|
||||
LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0);
|
||||
ComputeMaskedBits(Index, LocalMask,
|
||||
|
@ -298,7 +298,7 @@ void AsmPrinter::EmitConstantPool(MachineConstantPool *MCP) {
|
||||
// Emit inter-object padding for alignment.
|
||||
if (J != E) {
|
||||
const Type *Ty = Entry.getType();
|
||||
unsigned EntSize = TM.getTargetData()->getABITypeSize(Ty);
|
||||
unsigned EntSize = TM.getTargetData()->getTypePaddedSize(Ty);
|
||||
unsigned ValEnd = Entry.getOffset() + EntSize;
|
||||
EmitZeros(J->second.first.getOffset()-ValEnd);
|
||||
}
|
||||
@ -857,12 +857,12 @@ void AsmPrinter::EmitConstantValueOnly(const Constant *CV) {
|
||||
|
||||
// We can emit the pointer value into this slot if the slot is an
|
||||
// integer slot greater or equal to the size of the pointer.
|
||||
if (TD->getABITypeSize(Ty) >= TD->getABITypeSize(Op->getType()))
|
||||
if (TD->getTypePaddedSize(Ty) >= TD->getTypePaddedSize(Op->getType()))
|
||||
return EmitConstantValueOnly(Op);
|
||||
|
||||
O << "((";
|
||||
EmitConstantValueOnly(Op);
|
||||
APInt ptrMask = APInt::getAllOnesValue(TD->getABITypeSizeInBits(Ty));
|
||||
APInt ptrMask = APInt::getAllOnesValue(TD->getTypePaddedSizeInBits(Ty));
|
||||
|
||||
SmallString<40> S;
|
||||
ptrMask.toStringUnsigned(S);
|
||||
@ -958,14 +958,14 @@ void AsmPrinter::EmitGlobalConstantVector(const ConstantVector *CP) {
|
||||
void AsmPrinter::EmitGlobalConstantStruct(const ConstantStruct *CVS) {
|
||||
// Print the fields in successive locations. Pad to align if needed!
|
||||
const TargetData *TD = TM.getTargetData();
|
||||
unsigned Size = TD->getABITypeSize(CVS->getType());
|
||||
unsigned Size = TD->getTypePaddedSize(CVS->getType());
|
||||
const StructLayout *cvsLayout = TD->getStructLayout(CVS->getType());
|
||||
uint64_t sizeSoFar = 0;
|
||||
for (unsigned i = 0, e = CVS->getNumOperands(); i != e; ++i) {
|
||||
const Constant* field = CVS->getOperand(i);
|
||||
|
||||
// Check if padding is needed and insert one or more 0s.
|
||||
uint64_t fieldSize = TD->getABITypeSize(field->getType());
|
||||
uint64_t fieldSize = TD->getTypePaddedSize(field->getType());
|
||||
uint64_t padSize = ((i == e-1 ? Size : cvsLayout->getElementOffset(i+1))
|
||||
- cvsLayout->getElementOffset(i)) - fieldSize;
|
||||
sizeSoFar += fieldSize + padSize;
|
||||
@ -1059,7 +1059,7 @@ void AsmPrinter::EmitGlobalConstantFP(const ConstantFP *CFP) {
|
||||
<< '\t' << TAI->getCommentString()
|
||||
<< " long double most significant halfword\n";
|
||||
}
|
||||
EmitZeros(TD->getABITypeSize(Type::X86_FP80Ty) -
|
||||
EmitZeros(TD->getTypePaddedSize(Type::X86_FP80Ty) -
|
||||
TD->getTypeStoreSize(Type::X86_FP80Ty));
|
||||
return;
|
||||
} else if (CFP->getType() == Type::PPC_FP128Ty) {
|
||||
@ -1139,7 +1139,7 @@ void AsmPrinter::EmitGlobalConstantLargeInt(const ConstantInt *CI) {
|
||||
void AsmPrinter::EmitGlobalConstant(const Constant *CV) {
|
||||
const TargetData *TD = TM.getTargetData();
|
||||
const Type *type = CV->getType();
|
||||
unsigned Size = TD->getABITypeSize(type);
|
||||
unsigned Size = TD->getTypePaddedSize(type);
|
||||
|
||||
if (CV->isNullValue() || isa<UndefValue>(CV)) {
|
||||
EmitZeros(Size);
|
||||
|
@ -276,7 +276,7 @@ void ELFWriter::EmitGlobal(GlobalVariable *GV) {
|
||||
|
||||
unsigned Align = TM.getTargetData()->getPreferredAlignment(GV);
|
||||
unsigned Size =
|
||||
TM.getTargetData()->getABITypeSize(GV->getType()->getElementType());
|
||||
TM.getTargetData()->getTypePaddedSize(GV->getType()->getElementType());
|
||||
|
||||
// If this global has a zero initializer, it is part of the .bss or common
|
||||
// section.
|
||||
|
@ -276,7 +276,7 @@ void MachOCodeEmitter::emitConstantPool(MachineConstantPool *MCP) {
|
||||
// "giant object for PIC" optimization.
|
||||
for (unsigned i = 0, e = CP.size(); i != e; ++i) {
|
||||
const Type *Ty = CP[i].getType();
|
||||
unsigned Size = TM.getTargetData()->getABITypeSize(Ty);
|
||||
unsigned Size = TM.getTargetData()->getTypePaddedSize(Ty);
|
||||
|
||||
MachOWriter::MachOSection *Sec = MOW.getConstSection(CP[i].Val.ConstVal);
|
||||
OutputBuffer SecDataOut(Sec->SectionData, is64Bit, isLittleEndian);
|
||||
@ -350,7 +350,7 @@ MachOWriter::~MachOWriter() {
|
||||
|
||||
void MachOWriter::AddSymbolToSection(MachOSection *Sec, GlobalVariable *GV) {
|
||||
const Type *Ty = GV->getType()->getElementType();
|
||||
unsigned Size = TM.getTargetData()->getABITypeSize(Ty);
|
||||
unsigned Size = TM.getTargetData()->getTypePaddedSize(Ty);
|
||||
unsigned Align = TM.getTargetData()->getPreferredAlignment(GV);
|
||||
|
||||
// Reserve space in the .bss section for this symbol while maintaining the
|
||||
@ -395,7 +395,7 @@ void MachOWriter::AddSymbolToSection(MachOSection *Sec, GlobalVariable *GV) {
|
||||
|
||||
void MachOWriter::EmitGlobal(GlobalVariable *GV) {
|
||||
const Type *Ty = GV->getType()->getElementType();
|
||||
unsigned Size = TM.getTargetData()->getABITypeSize(Ty);
|
||||
unsigned Size = TM.getTargetData()->getTypePaddedSize(Ty);
|
||||
bool NoInit = !GV->hasInitializer();
|
||||
|
||||
// If this global has a zero initializer, it is part of the .bss or common
|
||||
@ -820,7 +820,7 @@ void MachOWriter::InitMem(const Constant *C, void *Addr, intptr_t Offset,
|
||||
continue;
|
||||
} else if (const ConstantVector *CP = dyn_cast<ConstantVector>(PC)) {
|
||||
unsigned ElementSize =
|
||||
TD->getABITypeSize(CP->getType()->getElementType());
|
||||
TD->getTypePaddedSize(CP->getType()->getElementType());
|
||||
for (unsigned i = 0, e = CP->getNumOperands(); i != e; ++i)
|
||||
WorkList.push_back(CPair(CP->getOperand(i), PA+i*ElementSize));
|
||||
} else if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(PC)) {
|
||||
@ -921,10 +921,10 @@ void MachOWriter::InitMem(const Constant *C, void *Addr, intptr_t Offset,
|
||||
abort();
|
||||
}
|
||||
} else if (isa<ConstantAggregateZero>(PC)) {
|
||||
memset((void*)PA, 0, (size_t)TD->getABITypeSize(PC->getType()));
|
||||
memset((void*)PA, 0, (size_t)TD->getTypePaddedSize(PC->getType()));
|
||||
} else if (const ConstantArray *CPA = dyn_cast<ConstantArray>(PC)) {
|
||||
unsigned ElementSize =
|
||||
TD->getABITypeSize(CPA->getType()->getElementType());
|
||||
TD->getTypePaddedSize(CPA->getType()->getElementType());
|
||||
for (unsigned i = 0, e = CPA->getNumOperands(); i != e; ++i)
|
||||
WorkList.push_back(CPair(CPA->getOperand(i), PA+i*ElementSize));
|
||||
} else if (const ConstantStruct *CPS = dyn_cast<ConstantStruct>(PC)) {
|
||||
|
@ -468,7 +468,7 @@ namespace llvm {
|
||||
|
||||
const Type *Ty = C->getType();
|
||||
if (Ty->isPrimitiveType() || Ty->isInteger()) {
|
||||
unsigned Size = TM.getTargetData()->getABITypeSize(Ty);
|
||||
unsigned Size = TM.getTargetData()->getTypePaddedSize(Ty);
|
||||
switch(Size) {
|
||||
default: break; // Fall through to __TEXT,__const
|
||||
case 4:
|
||||
|
@ -503,7 +503,7 @@ unsigned MachineConstantPool::getConstantPoolIndex(Constant *C,
|
||||
unsigned Offset = 0;
|
||||
if (!Constants.empty()) {
|
||||
Offset = Constants.back().getOffset();
|
||||
Offset += TD->getABITypeSize(Constants.back().getType());
|
||||
Offset += TD->getTypePaddedSize(Constants.back().getType());
|
||||
Offset = (Offset+AlignMask)&~AlignMask;
|
||||
}
|
||||
|
||||
@ -527,7 +527,7 @@ unsigned MachineConstantPool::getConstantPoolIndex(MachineConstantPoolValue *V,
|
||||
unsigned Offset = 0;
|
||||
if (!Constants.empty()) {
|
||||
Offset = Constants.back().getOffset();
|
||||
Offset += TD->getABITypeSize(Constants.back().getType());
|
||||
Offset += TD->getTypePaddedSize(Constants.back().getType());
|
||||
Offset = (Offset+AlignMask)&~AlignMask;
|
||||
}
|
||||
|
||||
|
@ -273,7 +273,7 @@ bool FastISel::SelectGetElementPtr(User *I) {
|
||||
if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
|
||||
if (CI->getZExtValue() == 0) continue;
|
||||
uint64_t Offs =
|
||||
TD.getABITypeSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
|
||||
TD.getTypePaddedSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
|
||||
N = FastEmit_ri_(VT, ISD::ADD, N, Offs, VT);
|
||||
if (N == 0)
|
||||
// Unhandled operand. Halt "fast" selection and bail.
|
||||
@ -282,7 +282,7 @@ bool FastISel::SelectGetElementPtr(User *I) {
|
||||
}
|
||||
|
||||
// N = N + Idx * ElementSize;
|
||||
uint64_t ElementSize = TD.getABITypeSize(Ty);
|
||||
uint64_t ElementSize = TD.getTypePaddedSize(Ty);
|
||||
unsigned IdxN = getRegForGEPIndex(Idx);
|
||||
if (IdxN == 0)
|
||||
// Unhandled operand. Halt "fast" selection and bail.
|
||||
|
@ -3572,8 +3572,9 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
|
||||
SDValue VAList = DAG.getLoad(TLI.getPointerTy(), Tmp1, Tmp2, V, 0);
|
||||
// Increment the pointer, VAList, to the next vaarg
|
||||
Tmp3 = DAG.getNode(ISD::ADD, TLI.getPointerTy(), VAList,
|
||||
DAG.getConstant(TLI.getTargetData()->getABITypeSize(VT.getTypeForMVT()),
|
||||
TLI.getPointerTy()));
|
||||
DAG.getConstant(TLI.getTargetData()->
|
||||
getTypePaddedSize(VT.getTypeForMVT()),
|
||||
TLI.getPointerTy()));
|
||||
// Store the incremented VAList to the legalized pointer
|
||||
Tmp3 = DAG.getStore(VAList.getValue(1), Tmp3, Tmp2, V, 0);
|
||||
// Load the actual argument out of the pointer VAList
|
||||
|
@ -296,7 +296,7 @@ void ScheduleDAGSDNodes::AddOperand(MachineInstr *MI, SDValue Op,
|
||||
Align = TM.getTargetData()->getPreferredTypeAlignmentShift(Type);
|
||||
if (Align == 0) {
|
||||
// Alignment of vector types. FIXME!
|
||||
Align = TM.getTargetData()->getABITypeSize(Type);
|
||||
Align = TM.getTargetData()->getTypePaddedSize(Type);
|
||||
Align = Log2_64(Align);
|
||||
}
|
||||
}
|
||||
|
@ -125,7 +125,7 @@ static void ComputeValueVTs(const TargetLowering &TLI, const Type *Ty,
|
||||
// Given an array type, recursively traverse the elements.
|
||||
if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
|
||||
const Type *EltTy = ATy->getElementType();
|
||||
uint64_t EltSize = TLI.getTargetData()->getABITypeSize(EltTy);
|
||||
uint64_t EltSize = TLI.getTargetData()->getTypePaddedSize(EltTy);
|
||||
for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
|
||||
ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
|
||||
StartingOffset + i * EltSize);
|
||||
@ -288,7 +288,7 @@ void FunctionLoweringInfo::set(Function &fn, MachineFunction &mf,
|
||||
if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
|
||||
if (ConstantInt *CUI = dyn_cast<ConstantInt>(AI->getArraySize())) {
|
||||
const Type *Ty = AI->getAllocatedType();
|
||||
uint64_t TySize = TLI.getTargetData()->getABITypeSize(Ty);
|
||||
uint64_t TySize = TLI.getTargetData()->getTypePaddedSize(Ty);
|
||||
unsigned Align =
|
||||
std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
|
||||
AI->getAlignment());
|
||||
@ -2603,14 +2603,14 @@ void SelectionDAGLowering::visitGetElementPtr(User &I) {
|
||||
if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
|
||||
if (CI->getZExtValue() == 0) continue;
|
||||
uint64_t Offs =
|
||||
TD->getABITypeSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
|
||||
TD->getTypePaddedSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
|
||||
N = DAG.getNode(ISD::ADD, N.getValueType(), N,
|
||||
DAG.getIntPtrConstant(Offs));
|
||||
continue;
|
||||
}
|
||||
|
||||
// N = N + Idx * ElementSize;
|
||||
uint64_t ElementSize = TD->getABITypeSize(Ty);
|
||||
uint64_t ElementSize = TD->getTypePaddedSize(Ty);
|
||||
SDValue IdxN = getValue(Idx);
|
||||
|
||||
// If the index is smaller or larger than intptr_t, truncate or extend
|
||||
@ -2646,7 +2646,7 @@ void SelectionDAGLowering::visitAlloca(AllocaInst &I) {
|
||||
return; // getValue will auto-populate this.
|
||||
|
||||
const Type *Ty = I.getAllocatedType();
|
||||
uint64_t TySize = TLI.getTargetData()->getABITypeSize(Ty);
|
||||
uint64_t TySize = TLI.getTargetData()->getTypePaddedSize(Ty);
|
||||
unsigned Align =
|
||||
std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
|
||||
I.getAlignment());
|
||||
@ -4951,7 +4951,7 @@ void SelectionDAGLowering::visitInlineAsm(CallSite CS) {
|
||||
// Otherwise, create a stack slot and emit a store to it before the
|
||||
// asm.
|
||||
const Type *Ty = OpVal->getType();
|
||||
uint64_t TySize = TLI.getTargetData()->getABITypeSize(Ty);
|
||||
uint64_t TySize = TLI.getTargetData()->getTypePaddedSize(Ty);
|
||||
unsigned Align = TLI.getTargetData()->getPrefTypeAlignment(Ty);
|
||||
MachineFunction &MF = DAG.getMachineFunction();
|
||||
int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align);
|
||||
@ -5236,7 +5236,7 @@ void SelectionDAGLowering::visitMalloc(MallocInst &I) {
|
||||
Src = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, Src);
|
||||
|
||||
// Scale the source by the type size.
|
||||
uint64_t ElementSize = TD->getABITypeSize(I.getType()->getElementType());
|
||||
uint64_t ElementSize = TD->getTypePaddedSize(I.getType()->getElementType());
|
||||
Src = DAG.getNode(ISD::MUL, Src.getValueType(),
|
||||
Src, DAG.getIntPtrConstant(ElementSize));
|
||||
|
||||
@ -5337,7 +5337,7 @@ void TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG,
|
||||
const PointerType *Ty = cast<PointerType>(I->getType());
|
||||
const Type *ElementTy = Ty->getElementType();
|
||||
unsigned FrameAlign = getByValTypeAlignment(ElementTy);
|
||||
unsigned FrameSize = getTargetData()->getABITypeSize(ElementTy);
|
||||
unsigned FrameSize = getTargetData()->getTypePaddedSize(ElementTy);
|
||||
// For ByVal, alignment should be passed from FE. BE will guess if
|
||||
// this info is not there but there are cases it cannot get right.
|
||||
if (F.getParamAlignment(j))
|
||||
@ -5470,7 +5470,7 @@ TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
|
||||
const PointerType *Ty = cast<PointerType>(Args[i].Ty);
|
||||
const Type *ElementTy = Ty->getElementType();
|
||||
unsigned FrameAlign = getByValTypeAlignment(ElementTy);
|
||||
unsigned FrameSize = getTargetData()->getABITypeSize(ElementTy);
|
||||
unsigned FrameSize = getTargetData()->getTypePaddedSize(ElementTy);
|
||||
// For ByVal, alignment should come from FE. BE will guess if this
|
||||
// info is not there but there are cases it cannot get right.
|
||||
if (Args[i].Alignment)
|
||||
|
@ -114,7 +114,7 @@ bool StackProtector::RequiresStackProtector() const {
|
||||
if (const ArrayType *AT = dyn_cast<ArrayType>(AI->getAllocatedType()))
|
||||
// If an array has more than SSPBufferSize bytes of allocated space,
|
||||
// then we emit stack protectors.
|
||||
if (SSPBufferSize <= TD->getABITypeSize(AT))
|
||||
if (SSPBufferSize <= TD->getTypePaddedSize(AT))
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -54,7 +54,7 @@ ExecutionEngine::~ExecutionEngine() {
|
||||
|
||||
char* ExecutionEngine::getMemoryForGV(const GlobalVariable* GV) {
|
||||
const Type *ElTy = GV->getType()->getElementType();
|
||||
size_t GVSize = (size_t)getTargetData()->getABITypeSize(ElTy);
|
||||
size_t GVSize = (size_t)getTargetData()->getTypePaddedSize(ElTy);
|
||||
return new char[GVSize];
|
||||
}
|
||||
|
||||
@ -845,16 +845,16 @@ void ExecutionEngine::InitializeMemory(const Constant *Init, void *Addr) {
|
||||
return;
|
||||
} else if (const ConstantVector *CP = dyn_cast<ConstantVector>(Init)) {
|
||||
unsigned ElementSize =
|
||||
getTargetData()->getABITypeSize(CP->getType()->getElementType());
|
||||
getTargetData()->getTypePaddedSize(CP->getType()->getElementType());
|
||||
for (unsigned i = 0, e = CP->getNumOperands(); i != e; ++i)
|
||||
InitializeMemory(CP->getOperand(i), (char*)Addr+i*ElementSize);
|
||||
return;
|
||||
} else if (isa<ConstantAggregateZero>(Init)) {
|
||||
memset(Addr, 0, (size_t)getTargetData()->getABITypeSize(Init->getType()));
|
||||
memset(Addr, 0, (size_t)getTargetData()->getTypePaddedSize(Init->getType()));
|
||||
return;
|
||||
} else if (const ConstantArray *CPA = dyn_cast<ConstantArray>(Init)) {
|
||||
unsigned ElementSize =
|
||||
getTargetData()->getABITypeSize(CPA->getType()->getElementType());
|
||||
getTargetData()->getTypePaddedSize(CPA->getType()->getElementType());
|
||||
for (unsigned i = 0, e = CPA->getNumOperands(); i != e; ++i)
|
||||
InitializeMemory(CPA->getOperand(i), (char*)Addr+i*ElementSize);
|
||||
return;
|
||||
@ -1001,7 +1001,7 @@ void ExecutionEngine::EmitGlobalVariable(const GlobalVariable *GV) {
|
||||
InitializeMemory(GV->getInitializer(), GA);
|
||||
|
||||
const Type *ElTy = GV->getType()->getElementType();
|
||||
size_t GVSize = (size_t)getTargetData()->getABITypeSize(ElTy);
|
||||
size_t GVSize = (size_t)getTargetData()->getTypePaddedSize(ElTy);
|
||||
NumInitBytes += (unsigned)GVSize;
|
||||
++NumGlobals;
|
||||
}
|
||||
|
@ -750,7 +750,7 @@ void Interpreter::visitAllocationInst(AllocationInst &I) {
|
||||
unsigned NumElements =
|
||||
getOperandValue(I.getOperand(0), SF).IntVal.getZExtValue();
|
||||
|
||||
unsigned TypeSize = (size_t)TD.getABITypeSize(Ty);
|
||||
unsigned TypeSize = (size_t)TD.getTypePaddedSize(Ty);
|
||||
|
||||
// Avoid malloc-ing zero bytes, use max()...
|
||||
unsigned MemToAlloc = std::max(1U, NumElements * TypeSize);
|
||||
@ -810,7 +810,7 @@ GenericValue Interpreter::executeGEPOperation(Value *Ptr, gep_type_iterator I,
|
||||
assert(BitWidth == 64 && "Invalid index type for getelementptr");
|
||||
Idx = (int64_t)IdxGV.IntVal.getZExtValue();
|
||||
}
|
||||
Total += TD.getABITypeSize(ST->getElementType())*Idx;
|
||||
Total += TD.getTypePaddedSize(ST->getElementType())*Idx;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -562,7 +562,7 @@ void *JIT::getOrEmitGlobalVariable(const GlobalVariable *GV) {
|
||||
// emit it into memory. It goes in the same array as the generated
|
||||
// code, jump tables, etc.
|
||||
const Type *GlobalType = GV->getType()->getElementType();
|
||||
size_t S = getTargetData()->getABITypeSize(GlobalType);
|
||||
size_t S = getTargetData()->getTypePaddedSize(GlobalType);
|
||||
size_t A = getTargetData()->getPreferredAlignment(GV);
|
||||
if (GV->isThreadLocal()) {
|
||||
MutexGuard locked(lock);
|
||||
@ -617,7 +617,7 @@ void *JIT::recompileAndRelinkFunction(Function *F) {
|
||||
///
|
||||
char* JIT::getMemoryForGV(const GlobalVariable* GV) {
|
||||
const Type *ElTy = GV->getType()->getElementType();
|
||||
size_t GVSize = (size_t)getTargetData()->getABITypeSize(ElTy);
|
||||
size_t GVSize = (size_t)getTargetData()->getTypePaddedSize(ElTy);
|
||||
if (GV->isThreadLocal()) {
|
||||
MutexGuard locked(lock);
|
||||
return TJI.allocateThreadLocalMemory(GVSize);
|
||||
|
@ -659,7 +659,7 @@ static unsigned GetConstantPoolSizeInBytes(MachineConstantPool *MCP) {
|
||||
unsigned Size = CPE.Offset;
|
||||
const Type *Ty = CPE.isMachineConstantPoolEntry()
|
||||
? CPE.Val.MachineCPVal->getType() : CPE.Val.ConstVal->getType();
|
||||
Size += TheJIT->getTargetData()->getABITypeSize(Ty);
|
||||
Size += TheJIT->getTargetData()->getTypePaddedSize(Ty);
|
||||
return Size;
|
||||
}
|
||||
|
||||
@ -687,7 +687,7 @@ static uintptr_t RoundUpToAlign(uintptr_t Size, unsigned Alignment) {
|
||||
|
||||
unsigned JITEmitter::addSizeOfGlobal(const GlobalVariable *GV, unsigned Size) {
|
||||
const Type *ElTy = GV->getType()->getElementType();
|
||||
size_t GVSize = (size_t)TheJIT->getTargetData()->getABITypeSize(ElTy);
|
||||
size_t GVSize = (size_t)TheJIT->getTargetData()->getTypePaddedSize(ElTy);
|
||||
size_t GVAlign =
|
||||
(size_t)TheJIT->getTargetData()->getPreferredAlignment(GV);
|
||||
DOUT << "JIT: Adding in size " << GVSize << " alignment " << GVAlign;
|
||||
@ -1080,7 +1080,7 @@ void JITEmitter::emitConstantPool(MachineConstantPool *MCP) {
|
||||
unsigned Size = CPE.Offset;
|
||||
const Type *Ty = CPE.isMachineConstantPoolEntry()
|
||||
? CPE.Val.MachineCPVal->getType() : CPE.Val.ConstVal->getType();
|
||||
Size += TheJIT->getTargetData()->getABITypeSize(Ty);
|
||||
Size += TheJIT->getTargetData()->getTypePaddedSize(Ty);
|
||||
|
||||
unsigned Align = 1 << MCP->getConstantPoolAlignment();
|
||||
ConstantPoolBase = allocateSpace(Size, Align);
|
||||
|
@ -295,7 +295,7 @@ void ARMConstantIslands::DoInitialPlacement(MachineFunction &Fn,
|
||||
|
||||
const TargetData &TD = *Fn.getTarget().getTargetData();
|
||||
for (unsigned i = 0, e = CPs.size(); i != e; ++i) {
|
||||
unsigned Size = TD.getABITypeSize(CPs[i].getType());
|
||||
unsigned Size = TD.getTypePaddedSize(CPs[i].getType());
|
||||
// Verify that all constant pool entries are a multiple of 4 bytes. If not,
|
||||
// we would have to pad them out or something so that instructions stay
|
||||
// aligned.
|
||||
|
@ -825,7 +825,7 @@ void ARMAsmPrinter::printModuleLevelGV(const GlobalVariable* GVar) {
|
||||
std::string name = Mang->getValueName(GVar);
|
||||
Constant *C = GVar->getInitializer();
|
||||
const Type *Type = C->getType();
|
||||
unsigned Size = TD->getABITypeSize(Type);
|
||||
unsigned Size = TD->getTypePaddedSize(Type);
|
||||
unsigned Align = TD->getPreferredAlignmentLog(GVar);
|
||||
bool isDarwin = Subtarget->isTargetDarwin();
|
||||
|
||||
|
@ -216,7 +216,7 @@ void AlphaAsmPrinter::printModuleLevelGV(const GlobalVariable* GVar) {
|
||||
|
||||
std::string name = Mang->getValueName(GVar);
|
||||
Constant *C = GVar->getInitializer();
|
||||
unsigned Size = TD->getABITypeSize(C->getType());
|
||||
unsigned Size = TD->getTypePaddedSize(C->getType());
|
||||
unsigned Align = TD->getPreferredAlignmentLog(GVar);
|
||||
|
||||
// 0: Switch to section
|
||||
|
@ -490,7 +490,7 @@ CWriter::printSimpleType(raw_ostream &Out, const Type *Ty, bool isSigned,
|
||||
const VectorType *VTy = cast<VectorType>(Ty);
|
||||
return printSimpleType(Out, VTy->getElementType(), isSigned,
|
||||
" __attribute__((vector_size(" +
|
||||
utostr(TD->getABITypeSize(VTy)) + " ))) " + NameSoFar);
|
||||
utostr(TD->getTypePaddedSize(VTy)) + " ))) " + NameSoFar);
|
||||
}
|
||||
|
||||
default:
|
||||
@ -535,7 +535,7 @@ CWriter::printSimpleType(std::ostream &Out, const Type *Ty, bool isSigned,
|
||||
const VectorType *VTy = cast<VectorType>(Ty);
|
||||
return printSimpleType(Out, VTy->getElementType(), isSigned,
|
||||
" __attribute__((vector_size(" +
|
||||
utostr(TD->getABITypeSize(VTy)) + " ))) " + NameSoFar);
|
||||
utostr(TD->getTypePaddedSize(VTy)) + " ))) " + NameSoFar);
|
||||
}
|
||||
|
||||
default:
|
||||
|
@ -529,7 +529,7 @@ void LinuxAsmPrinter::printModuleLevelGV(const GlobalVariable* GVar) {
|
||||
|
||||
Constant *C = GVar->getInitializer();
|
||||
const Type *Type = C->getType();
|
||||
unsigned Size = TD->getABITypeSize(Type);
|
||||
unsigned Size = TD->getTypePaddedSize(Type);
|
||||
unsigned Align = TD->getPreferredAlignmentLog(GVar);
|
||||
|
||||
SwitchToSection(TAI->SectionForGlobal(GVar));
|
||||
|
@ -117,7 +117,7 @@ DarwinTargetAsmInfo::MergeableStringSection(const GlobalVariable *GV) const {
|
||||
Constant *C = cast<GlobalVariable>(GV)->getInitializer();
|
||||
const Type *Type = cast<ConstantArray>(C)->getType()->getElementType();
|
||||
|
||||
unsigned Size = TD->getABITypeSize(Type);
|
||||
unsigned Size = TD->getTypePaddedSize(Type);
|
||||
if (Size) {
|
||||
unsigned Align = TD->getPreferredAlignment(GV);
|
||||
if (Align <= 32)
|
||||
@ -138,7 +138,7 @@ inline const Section*
|
||||
DarwinTargetAsmInfo::MergeableConstSection(const Type *Ty) const {
|
||||
const TargetData *TD = TM.getTargetData();
|
||||
|
||||
unsigned Size = TD->getABITypeSize(Ty);
|
||||
unsigned Size = TD->getTypePaddedSize(Ty);
|
||||
if (Size == 4)
|
||||
return FourByteConstantSection;
|
||||
else if (Size == 8)
|
||||
|
@ -109,7 +109,7 @@ ELFTargetAsmInfo::MergeableConstSection(const Type *Ty) const {
|
||||
// FIXME: string here is temporary, until stuff will fully land in.
|
||||
// We cannot use {Four,Eight,Sixteen}ByteConstantSection here, since it's
|
||||
// currently directly used by asmprinter.
|
||||
unsigned Size = TD->getABITypeSize(Ty);
|
||||
unsigned Size = TD->getTypePaddedSize(Ty);
|
||||
if (Size == 4 || Size == 8 || Size == 16) {
|
||||
std::string Name = ".rodata.cst" + utostr(Size);
|
||||
|
||||
@ -128,7 +128,7 @@ ELFTargetAsmInfo::MergeableStringSection(const GlobalVariable *GV) const {
|
||||
const ConstantArray *CVA = cast<ConstantArray>(C);
|
||||
const Type *Ty = CVA->getType()->getElementType();
|
||||
|
||||
unsigned Size = TD->getABITypeSize(Ty);
|
||||
unsigned Size = TD->getTypePaddedSize(Ty);
|
||||
if (Size <= 16) {
|
||||
assert(getCStringSection() && "Should have string section prefix");
|
||||
|
||||
|
@ -266,7 +266,7 @@ void IA64AsmPrinter::printModuleLevelGV(const GlobalVariable* GVar) {
|
||||
O << "\n\n";
|
||||
std::string name = Mang->getValueName(GVar);
|
||||
Constant *C = GVar->getInitializer();
|
||||
unsigned Size = TD->getABITypeSize(C->getType());
|
||||
unsigned Size = TD->getTypePaddedSize(C->getType());
|
||||
unsigned Align = TD->getPreferredAlignmentLog(GVar);
|
||||
|
||||
printVisibility(name, GVar->getVisibility());
|
||||
|
@ -378,7 +378,7 @@ std::string MSILWriter::getTypePostfix(const Type* Ty, bool Expand,
|
||||
case Type::DoubleTyID:
|
||||
return "r8";
|
||||
case Type::PointerTyID:
|
||||
return "i"+utostr(TD->getABITypeSize(Ty));
|
||||
return "i"+utostr(TD->getTypePaddedSize(Ty));
|
||||
default:
|
||||
cerr << "TypeID = " << Ty->getTypeID() << '\n';
|
||||
assert(0 && "Invalid type in TypeToPostfix()");
|
||||
@ -688,14 +688,14 @@ void MSILWriter::printGepInstruction(const Value* V, gep_type_iterator I,
|
||||
uint64_t FieldIndex = cast<ConstantInt>(IndexValue)->getZExtValue();
|
||||
// Offset is the sum of all previous structure fields.
|
||||
for (uint64_t F = 0; F<FieldIndex; ++F)
|
||||
Size += TD->getABITypeSize(StrucTy->getContainedType((unsigned)F));
|
||||
Size += TD->getTypePaddedSize(StrucTy->getContainedType((unsigned)F));
|
||||
printPtrLoad(Size);
|
||||
printSimpleInstruction("add");
|
||||
continue;
|
||||
} else if (const SequentialType* SeqTy = dyn_cast<SequentialType>(*I)) {
|
||||
Size = TD->getABITypeSize(SeqTy->getElementType());
|
||||
Size = TD->getTypePaddedSize(SeqTy->getElementType());
|
||||
} else {
|
||||
Size = TD->getABITypeSize(*I);
|
||||
Size = TD->getTypePaddedSize(*I);
|
||||
}
|
||||
// Add offset of current element to stack top.
|
||||
if (!isZeroValue(IndexValue)) {
|
||||
@ -1020,7 +1020,7 @@ void MSILWriter::printVAArgInstruction(const VAArgInst* Inst) {
|
||||
|
||||
|
||||
void MSILWriter::printAllocaInstruction(const AllocaInst* Inst) {
|
||||
uint64_t Size = TD->getABITypeSize(Inst->getAllocatedType());
|
||||
uint64_t Size = TD->getTypePaddedSize(Inst->getAllocatedType());
|
||||
// Constant optimization.
|
||||
if (const ConstantInt* CInt = dyn_cast<ConstantInt>(Inst->getOperand(0))) {
|
||||
printPtrLoad(CInt->getZExtValue()*Size);
|
||||
@ -1436,7 +1436,8 @@ void MSILWriter::printDeclarations(const TypeSymbolTable& ST) {
|
||||
// Print not duplicated type
|
||||
if (Printed.insert(Ty).second) {
|
||||
Out << ".class value explicit ansi sealed '" << Name << "'";
|
||||
Out << " { .pack " << 1 << " .size " << TD->getABITypeSize(Ty)<< " }\n\n";
|
||||
Out << " { .pack " << 1 << " .size " << TD->getTypePaddedSize(Ty);
|
||||
Out << " }\n\n";
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1465,7 +1466,7 @@ void MSILWriter::printStaticConstant(const Constant* C, uint64_t& Offset) {
|
||||
const Type* Ty = C->getType();
|
||||
// Print zero initialized constant.
|
||||
if (isa<ConstantAggregateZero>(C) || C->isNullValue()) {
|
||||
TySize = TD->getABITypeSize(C->getType());
|
||||
TySize = TD->getTypePaddedSize(C->getType());
|
||||
Offset += TySize;
|
||||
Out << "int8 (0) [" << TySize << "]";
|
||||
return;
|
||||
@ -1473,14 +1474,14 @@ void MSILWriter::printStaticConstant(const Constant* C, uint64_t& Offset) {
|
||||
// Print constant initializer
|
||||
switch (Ty->getTypeID()) {
|
||||
case Type::IntegerTyID: {
|
||||
TySize = TD->getABITypeSize(Ty);
|
||||
TySize = TD->getTypePaddedSize(Ty);
|
||||
const ConstantInt* Int = cast<ConstantInt>(C);
|
||||
Out << getPrimitiveTypeName(Ty,true) << "(" << Int->getSExtValue() << ")";
|
||||
break;
|
||||
}
|
||||
case Type::FloatTyID:
|
||||
case Type::DoubleTyID: {
|
||||
TySize = TD->getABITypeSize(Ty);
|
||||
TySize = TD->getTypePaddedSize(Ty);
|
||||
const ConstantFP* FP = cast<ConstantFP>(C);
|
||||
if (Ty->getTypeID() == Type::FloatTyID)
|
||||
Out << "int32 (" <<
|
||||
@ -1499,7 +1500,7 @@ void MSILWriter::printStaticConstant(const Constant* C, uint64_t& Offset) {
|
||||
}
|
||||
break;
|
||||
case Type::PointerTyID:
|
||||
TySize = TD->getABITypeSize(C->getType());
|
||||
TySize = TD->getTypePaddedSize(C->getType());
|
||||
// Initialize with global variable address
|
||||
if (const GlobalVariable *G = dyn_cast<GlobalVariable>(C)) {
|
||||
std::string name = getValueName(G);
|
||||
|
@ -480,7 +480,7 @@ printModuleLevelGV(const GlobalVariable* GVar) {
|
||||
std::string name = Mang->getValueName(GVar);
|
||||
Constant *C = GVar->getInitializer();
|
||||
const Type *CTy = C->getType();
|
||||
unsigned Size = TD->getABITypeSize(CTy);
|
||||
unsigned Size = TD->getTypePaddedSize(CTy);
|
||||
const ConstantArray *CVA = dyn_cast<ConstantArray>(C);
|
||||
bool printSizeAndType = true;
|
||||
|
||||
|
@ -212,7 +212,7 @@ bool MipsTargetLowering::IsGlobalInSmallSection(GlobalValue *GV)
|
||||
return false;
|
||||
|
||||
const Type *Ty = GV->getType()->getElementType();
|
||||
unsigned Size = TD->getABITypeSize(Ty);
|
||||
unsigned Size = TD->getTypePaddedSize(Ty);
|
||||
|
||||
// if this is a internal constant string, there is a special
|
||||
// section for it, but not in small data/bss.
|
||||
@ -543,7 +543,7 @@ LowerConstantPool(SDValue Op, SelectionDAG &DAG)
|
||||
// hacking it. This feature should come soon so we can uncomment the
|
||||
// stuff below.
|
||||
//if (!Subtarget->hasABICall() &&
|
||||
// IsInSmallSection(getTargetData()->getABITypeSize(C->getType()))) {
|
||||
// IsInSmallSection(getTargetData()->getTypePaddedSize(C->getType()))) {
|
||||
// SDValue GPRelNode = DAG.getNode(MipsISD::GPRel, MVT::i32, CP);
|
||||
// SDValue GOT = DAG.getNode(ISD::GLOBAL_OFFSET_TABLE, MVT::i32);
|
||||
// ResNode = DAG.getNode(ISD::ADD, MVT::i32, GOT, GPRelNode);
|
||||
|
@ -66,7 +66,7 @@ SectionKindForGlobal(const GlobalValue *GV) const {
|
||||
|
||||
if (isa<GlobalVariable>(GV)) {
|
||||
const TargetData *TD = TM.getTargetData();
|
||||
unsigned Size = TD->getABITypeSize(GV->getType()->getElementType());
|
||||
unsigned Size = TD->getTypePaddedSize(GV->getType()->getElementType());
|
||||
unsigned Threshold = Subtarget->getSSectionThreshold();
|
||||
|
||||
if (Size > 0 && Size <= Threshold) {
|
||||
|
@ -299,7 +299,7 @@ void PIC16AsmPrinter::EmitUnInitData (Module &M)
|
||||
continue;
|
||||
|
||||
const Type *Ty = C->getType();
|
||||
unsigned Size = TD->getABITypeSize(Ty);
|
||||
unsigned Size = TD->getTypePaddedSize(Ty);
|
||||
O << name << " " <<"RES"<< " " << Size ;
|
||||
O << "\n";
|
||||
}
|
||||
@ -327,7 +327,7 @@ void PIC16AsmPrinter::emitFunctionData(MachineFunction &MF) {
|
||||
O << CurrentFnName << ".retval:\n";
|
||||
const Type *RetType = F->getReturnType();
|
||||
if (RetType->getTypeID() != Type::VoidTyID) {
|
||||
unsigned RetSize = TD->getABITypeSize(RetType);
|
||||
unsigned RetSize = TD->getTypePaddedSize(RetType);
|
||||
if (RetSize > 0)
|
||||
O << CurrentFnName << ".retval" << " RES " << RetSize;
|
||||
}
|
||||
@ -337,7 +337,7 @@ void PIC16AsmPrinter::emitFunctionData(MachineFunction &MF) {
|
||||
AI != AE; ++AI) {
|
||||
std::string ArgName = Mang->getValueName(AI);
|
||||
const Type *ArgTy = AI->getType();
|
||||
unsigned ArgSize = TD->getABITypeSize(ArgTy);
|
||||
unsigned ArgSize = TD->getTypePaddedSize(ArgTy);
|
||||
O << CurrentFnName << ".args." << ArgName << " RES " << ArgSize;
|
||||
}
|
||||
// Emit the function variables.
|
||||
@ -357,7 +357,7 @@ void PIC16AsmPrinter::emitFunctionData(MachineFunction &MF) {
|
||||
|
||||
Constant *C = I->getInitializer();
|
||||
const Type *Ty = C->getType();
|
||||
unsigned Size = TD->getABITypeSize(Ty);
|
||||
unsigned Size = TD->getTypePaddedSize(Ty);
|
||||
// Emit memory reserve directive.
|
||||
O << VarName << " RES " << Size << "\n";
|
||||
}
|
||||
|
@ -679,7 +679,7 @@ void PPCLinuxAsmPrinter::printModuleLevelGV(const GlobalVariable* GVar) {
|
||||
|
||||
Constant *C = GVar->getInitializer();
|
||||
const Type *Type = C->getType();
|
||||
unsigned Size = TD->getABITypeSize(Type);
|
||||
unsigned Size = TD->getTypePaddedSize(Type);
|
||||
unsigned Align = TD->getPreferredAlignmentLog(GVar);
|
||||
|
||||
SwitchToSection(TAI->SectionForGlobal(GVar));
|
||||
@ -904,7 +904,7 @@ void PPCDarwinAsmPrinter::printModuleLevelGV(const GlobalVariable* GVar) {
|
||||
|
||||
Constant *C = GVar->getInitializer();
|
||||
const Type *Type = C->getType();
|
||||
unsigned Size = TD->getABITypeSize(Type);
|
||||
unsigned Size = TD->getTypePaddedSize(Type);
|
||||
unsigned Align = TD->getPreferredAlignmentLog(GVar);
|
||||
|
||||
SwitchToSection(TAI->SectionForGlobal(GVar));
|
||||
|
@ -246,7 +246,7 @@ void SparcAsmPrinter::printModuleLevelGV(const GlobalVariable* GVar) {
|
||||
O << "\n\n";
|
||||
std::string name = Mang->getValueName(GVar);
|
||||
Constant *C = GVar->getInitializer();
|
||||
unsigned Size = TD->getABITypeSize(C->getType());
|
||||
unsigned Size = TD->getTypePaddedSize(C->getType());
|
||||
unsigned Align = TD->getPreferredAlignment(GVar);
|
||||
|
||||
printVisibility(name, GVar->getVisibility());
|
||||
|
@ -53,7 +53,7 @@ unsigned long long LLVMStoreSizeOfType(LLVMTargetDataRef TD, LLVMTypeRef Ty) {
|
||||
}
|
||||
|
||||
unsigned long long LLVMABISizeOfType(LLVMTargetDataRef TD, LLVMTypeRef Ty) {
|
||||
return unwrap(TD)->getABITypeSize(unwrap(Ty));
|
||||
return unwrap(TD)->getTypePaddedSize(unwrap(Ty));
|
||||
}
|
||||
|
||||
unsigned LLVMABIAlignmentOfType(LLVMTargetDataRef TD, LLVMTypeRef Ty) {
|
||||
|
@ -58,7 +58,7 @@ StructLayout::StructLayout(const StructType *ST, const TargetData &TD) {
|
||||
StructAlignment = std::max(TyAlign, StructAlignment);
|
||||
|
||||
MemberOffsets[i] = StructSize;
|
||||
StructSize += TD.getABITypeSize(Ty); // Consume space for this data item
|
||||
StructSize += TD.getTypePaddedSize(Ty); // Consume space for this data item
|
||||
}
|
||||
|
||||
// Empty structures have alignment of 1 byte.
|
||||
@ -425,7 +425,7 @@ uint64_t TargetData::getTypeSizeInBits(const Type *Ty) const {
|
||||
return getPointerSizeInBits();
|
||||
case Type::ArrayTyID: {
|
||||
const ArrayType *ATy = cast<ArrayType>(Ty);
|
||||
return getABITypeSizeInBits(ATy->getElementType())*ATy->getNumElements();
|
||||
return getTypePaddedSizeInBits(ATy->getElementType())*ATy->getNumElements();
|
||||
}
|
||||
case Type::StructTyID:
|
||||
// Get the layout annotation... which is lazily created on demand.
|
||||
@ -568,7 +568,7 @@ uint64_t TargetData::getIndexedOffset(const Type *ptrTy, Value* const* Indices,
|
||||
|
||||
// Get the array index and the size of each array element.
|
||||
int64_t arrayIdx = cast<ConstantInt>(Indices[CurIDX])->getSExtValue();
|
||||
Result += arrayIdx * (int64_t)getABITypeSize(Ty);
|
||||
Result += arrayIdx * (int64_t)getTypePaddedSize(Ty);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -75,7 +75,7 @@ static X86MachineFunctionInfo calculateFunctionInfo(const Function *F,
|
||||
Ty = cast<PointerType>(Ty)->getElementType();
|
||||
|
||||
// Size should be aligned to DWORD boundary
|
||||
Size += ((TD->getABITypeSize(Ty) + 3)/4)*4;
|
||||
Size += ((TD->getTypePaddedSize(Ty) + 3)/4)*4;
|
||||
}
|
||||
|
||||
// We're not supporting tooooo huge arguments :)
|
||||
@ -767,7 +767,7 @@ void X86ATTAsmPrinter::printModuleLevelGV(const GlobalVariable* GVar) {
|
||||
std::string name = Mang->getValueName(GVar);
|
||||
Constant *C = GVar->getInitializer();
|
||||
const Type *Type = C->getType();
|
||||
unsigned Size = TD->getABITypeSize(Type);
|
||||
unsigned Size = TD->getTypePaddedSize(Type);
|
||||
unsigned Align = TD->getPreferredAlignmentLog(GVar);
|
||||
|
||||
printVisibility(name, GVar->getVisibility());
|
||||
|
@ -58,7 +58,7 @@ static X86MachineFunctionInfo calculateFunctionInfo(const Function *F,
|
||||
Ty = cast<PointerType>(Ty)->getElementType();
|
||||
|
||||
// Size should be aligned to DWORD boundary
|
||||
Size += ((TD->getABITypeSize(Ty) + 3)/4)*4;
|
||||
Size += ((TD->getTypePaddedSize(Ty) + 3)/4)*4;
|
||||
}
|
||||
|
||||
// We're not supporting tooooo huge arguments :)
|
||||
|
@ -386,7 +386,7 @@ bool X86FastISel::X86SelectAddress(Value *V, X86AddressMode &AM, bool isCall) {
|
||||
unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
|
||||
Disp += SL->getElementOffset(Idx);
|
||||
} else {
|
||||
uint64_t S = TD.getABITypeSize(GTI.getIndexedType());
|
||||
uint64_t S = TD.getTypePaddedSize(GTI.getIndexedType());
|
||||
if (ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
|
||||
// Constant-offset addressing.
|
||||
Disp += CI->getSExtValue() * S;
|
||||
@ -1469,7 +1469,7 @@ unsigned X86FastISel::TargetMaterializeConstant(Constant *C) {
|
||||
unsigned Align = TD.getPreferredTypeAlignmentShift(C->getType());
|
||||
if (Align == 0) {
|
||||
// Alignment of vector types. FIXME!
|
||||
Align = TD.getABITypeSize(C->getType());
|
||||
Align = TD.getTypePaddedSize(C->getType());
|
||||
Align = Log2_64(Align);
|
||||
}
|
||||
|
||||
|
@ -214,7 +214,7 @@ emitGlobal(const GlobalVariable *GV)
|
||||
|
||||
EmitAlignment(Align, GV, 2);
|
||||
|
||||
unsigned Size = TD->getABITypeSize(C->getType());
|
||||
unsigned Size = TD->getTypePaddedSize(C->getType());
|
||||
if (GV->isThreadLocal()) {
|
||||
Size *= MaxThreads;
|
||||
}
|
||||
|
@ -265,7 +265,7 @@ LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG)
|
||||
}
|
||||
SDValue base = getGlobalAddressWrapper(GA, GV, DAG);
|
||||
const TargetData *TD = TM.getTargetData();
|
||||
unsigned Size = TD->getABITypeSize(Ty);
|
||||
unsigned Size = TD->getTypePaddedSize(Ty);
|
||||
SDValue offset = DAG.getNode(ISD::MUL, MVT::i32, BuildGetId(DAG),
|
||||
DAG.getConstant(Size, MVT::i32));
|
||||
return DAG.getNode(ISD::ADD, MVT::i32, base, offset);
|
||||
|
@ -106,7 +106,7 @@ inline const Section*
|
||||
XCoreTargetAsmInfo::MergeableConstSection(const Type *Ty) const {
|
||||
const TargetData *TD = TM.getTargetData();
|
||||
|
||||
unsigned Size = TD->getABITypeSize(Ty);
|
||||
unsigned Size = TD->getTypePaddedSize(Ty);
|
||||
if (Size == 4 || Size == 8 || Size == 16) {
|
||||
std::string Name = ".cp.const" + utostr(Size);
|
||||
|
||||
|
@ -511,7 +511,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD) {
|
||||
return 0; // It's not worth it.
|
||||
NewGlobals.reserve(NumElements);
|
||||
|
||||
uint64_t EltSize = TD.getABITypeSize(STy->getElementType());
|
||||
uint64_t EltSize = TD.getTypePaddedSize(STy->getElementType());
|
||||
unsigned EltAlign = TD.getABITypeAlignment(STy->getElementType());
|
||||
for (unsigned i = 0, e = NumElements; i != e; ++i) {
|
||||
Constant *In = getAggregateConstantElement(Init,
|
||||
@ -1445,7 +1445,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
|
||||
// (2048 bytes currently), as we don't want to introduce a 16M global or
|
||||
// something.
|
||||
if (NElements->getZExtValue()*
|
||||
TD.getABITypeSize(MI->getAllocatedType()) < 2048) {
|
||||
TD.getTypePaddedSize(MI->getAllocatedType()) < 2048) {
|
||||
GVI = OptimizeGlobalAddressOfMalloc(GV, MI);
|
||||
return true;
|
||||
}
|
||||
|
@ -817,7 +817,7 @@ bool AddressingModeMatcher::MatchOperationAddr(User *AddrInst, unsigned Opcode,
|
||||
cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue();
|
||||
ConstantOffset += SL->getElementOffset(Idx);
|
||||
} else {
|
||||
uint64_t TypeSize = TD->getABITypeSize(GTI.getIndexedType());
|
||||
uint64_t TypeSize = TD->getTypePaddedSize(GTI.getIndexedType());
|
||||
if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) {
|
||||
ConstantOffset += CI->getSExtValue()*TypeSize;
|
||||
} else if (TypeSize) { // Scales of zero don't do anything.
|
||||
|
@ -305,11 +305,11 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
|
||||
if (AllocaInst* A = dyn_cast<AllocaInst>(*I)) {
|
||||
if (ConstantInt* C = dyn_cast<ConstantInt>(A->getArraySize()))
|
||||
pointerSize = C->getZExtValue() *
|
||||
TD.getABITypeSize(A->getAllocatedType());
|
||||
TD.getTypePaddedSize(A->getAllocatedType());
|
||||
} else {
|
||||
const PointerType* PT = cast<PointerType>(
|
||||
cast<Argument>(*I)->getType());
|
||||
pointerSize = TD.getABITypeSize(PT->getElementType());
|
||||
pointerSize = TD.getTypePaddedSize(PT->getElementType());
|
||||
}
|
||||
|
||||
// See if the call site touches it
|
||||
@ -382,10 +382,10 @@ bool DSE::RemoveUndeadPointers(Value* killPointer, uint64_t killPointerSize,
|
||||
if (AllocaInst* A = dyn_cast<AllocaInst>(*I)) {
|
||||
if (ConstantInt* C = dyn_cast<ConstantInt>(A->getArraySize()))
|
||||
pointerSize = C->getZExtValue() *
|
||||
TD.getABITypeSize(A->getAllocatedType());
|
||||
TD.getTypePaddedSize(A->getAllocatedType());
|
||||
} else {
|
||||
const PointerType* PT = cast<PointerType>(cast<Argument>(*I)->getType());
|
||||
pointerSize = TD.getABITypeSize(PT->getElementType());
|
||||
pointerSize = TD.getTypePaddedSize(PT->getElementType());
|
||||
}
|
||||
|
||||
// See if this pointer could alias it
|
||||
|
@ -5142,7 +5142,7 @@ static Value *EmitGEPOffset(User *GEP, Instruction &I, InstCombiner &IC) {
|
||||
for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e;
|
||||
++i, ++GTI) {
|
||||
Value *Op = *i;
|
||||
uint64_t Size = TD.getABITypeSize(GTI.getIndexedType()) & PtrSizeMask;
|
||||
uint64_t Size = TD.getTypePaddedSize(GTI.getIndexedType()) & PtrSizeMask;
|
||||
if (ConstantInt *OpC = dyn_cast<ConstantInt>(Op)) {
|
||||
if (OpC->isZero()) continue;
|
||||
|
||||
@ -5233,7 +5233,7 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, Instruction &I,
|
||||
if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
|
||||
Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
|
||||
} else {
|
||||
uint64_t Size = TD.getABITypeSize(GTI.getIndexedType());
|
||||
uint64_t Size = TD.getTypePaddedSize(GTI.getIndexedType());
|
||||
Offset += Size*CI->getSExtValue();
|
||||
}
|
||||
} else {
|
||||
@ -5249,7 +5249,7 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, Instruction &I,
|
||||
Value *VariableIdx = GEP->getOperand(i);
|
||||
// Determine the scale factor of the variable element. For example, this is
|
||||
// 4 if the variable index is into an array of i32.
|
||||
uint64_t VariableScale = TD.getABITypeSize(GTI.getIndexedType());
|
||||
uint64_t VariableScale = TD.getTypePaddedSize(GTI.getIndexedType());
|
||||
|
||||
// Verify that there are no other variable indices. If so, emit the hard way.
|
||||
for (++i, ++GTI; i != e; ++i, ++GTI) {
|
||||
@ -5263,7 +5263,7 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, Instruction &I,
|
||||
if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
|
||||
Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
|
||||
} else {
|
||||
uint64_t Size = TD.getABITypeSize(GTI.getIndexedType());
|
||||
uint64_t Size = TD.getTypePaddedSize(GTI.getIndexedType());
|
||||
Offset += Size*CI->getSExtValue();
|
||||
}
|
||||
}
|
||||
@ -7419,8 +7419,8 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
|
||||
// same, we open the door to infinite loops of various kinds.
|
||||
if (!AI.hasOneUse() && CastElTyAlign == AllocElTyAlign) return 0;
|
||||
|
||||
uint64_t AllocElTySize = TD->getABITypeSize(AllocElTy);
|
||||
uint64_t CastElTySize = TD->getABITypeSize(CastElTy);
|
||||
uint64_t AllocElTySize = TD->getTypePaddedSize(AllocElTy);
|
||||
uint64_t CastElTySize = TD->getTypePaddedSize(CastElTy);
|
||||
if (CastElTySize == 0 || AllocElTySize == 0) return 0;
|
||||
|
||||
// See if we can satisfy the modulus by pulling a scale out of the array
|
||||
@ -7708,7 +7708,7 @@ static bool FindElementAtOffset(const Type *Ty, int64_t Offset,
|
||||
// is something like [0 x {int, int}]
|
||||
const Type *IntPtrTy = TD->getIntPtrType();
|
||||
int64_t FirstIdx = 0;
|
||||
if (int64_t TySize = TD->getABITypeSize(Ty)) {
|
||||
if (int64_t TySize = TD->getTypePaddedSize(Ty)) {
|
||||
FirstIdx = Offset/TySize;
|
||||
Offset -= FirstIdx*TySize;
|
||||
|
||||
@ -7740,7 +7740,7 @@ static bool FindElementAtOffset(const Type *Ty, int64_t Offset,
|
||||
Offset -= SL->getElementOffset(Elt);
|
||||
Ty = STy->getElementType(Elt);
|
||||
} else if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
|
||||
uint64_t EltSize = TD->getABITypeSize(AT->getElementType());
|
||||
uint64_t EltSize = TD->getTypePaddedSize(AT->getElementType());
|
||||
assert(EltSize && "Cannot index into a zero-sized array");
|
||||
NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize));
|
||||
Offset %= EltSize;
|
||||
@ -8407,7 +8407,7 @@ Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) {
|
||||
// is a single-index GEP.
|
||||
if (X->getType() == CI.getType()) {
|
||||
// Get the size of the pointee type.
|
||||
uint64_t Size = TD->getABITypeSize(DestPointee);
|
||||
uint64_t Size = TD->getTypePaddedSize(DestPointee);
|
||||
|
||||
// Convert the constant to intptr type.
|
||||
APInt Offset = Cst->getValue();
|
||||
@ -8427,7 +8427,7 @@ Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) {
|
||||
// "inttoptr+GEP" instead of "add+intptr".
|
||||
|
||||
// Get the size of the pointee type.
|
||||
uint64_t Size = TD->getABITypeSize(DestPointee);
|
||||
uint64_t Size = TD->getTypePaddedSize(DestPointee);
|
||||
|
||||
// Convert the constant to intptr type.
|
||||
APInt Offset = Cst->getValue();
|
||||
@ -9492,7 +9492,7 @@ static bool isSafeToEliminateVarargsCast(const CallSite CS,
|
||||
const Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
|
||||
if (!SrcTy->isSized() || !DstTy->isSized())
|
||||
return false;
|
||||
if (TD->getABITypeSize(SrcTy) != TD->getABITypeSize(DstTy))
|
||||
if (TD->getTypePaddedSize(SrcTy) != TD->getTypePaddedSize(DstTy))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
@ -10608,8 +10608,8 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
|
||||
const Type *SrcElTy = cast<PointerType>(X->getType())->getElementType();
|
||||
const Type *ResElTy=cast<PointerType>(PtrOp->getType())->getElementType();
|
||||
if (isa<ArrayType>(SrcElTy) &&
|
||||
TD->getABITypeSize(cast<ArrayType>(SrcElTy)->getElementType()) ==
|
||||
TD->getABITypeSize(ResElTy)) {
|
||||
TD->getTypePaddedSize(cast<ArrayType>(SrcElTy)->getElementType()) ==
|
||||
TD->getTypePaddedSize(ResElTy)) {
|
||||
Value *Idx[2];
|
||||
Idx[0] = Constant::getNullValue(Type::Int32Ty);
|
||||
Idx[1] = GEP.getOperand(1);
|
||||
@ -10626,7 +10626,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
|
||||
|
||||
if (isa<ArrayType>(SrcElTy) && ResElTy == Type::Int8Ty) {
|
||||
uint64_t ArrayEltSize =
|
||||
TD->getABITypeSize(cast<ArrayType>(SrcElTy)->getElementType());
|
||||
TD->getTypePaddedSize(cast<ArrayType>(SrcElTy)->getElementType());
|
||||
|
||||
// Check to see if "tmp" is a scale by a multiple of ArrayEltSize. We
|
||||
// allow either a mul, shift, or constant here.
|
||||
@ -10779,7 +10779,7 @@ Instruction *InstCombiner::visitAllocationInst(AllocationInst &AI) {
|
||||
// Note that we only do this for alloca's, because malloc should allocate and
|
||||
// return a unique pointer, even for a zero byte allocation.
|
||||
if (isa<AllocaInst>(AI) && AI.getAllocatedType()->isSized() &&
|
||||
TD->getABITypeSize(AI.getAllocatedType()) == 0)
|
||||
TD->getTypePaddedSize(AI.getAllocatedType()) == 0)
|
||||
return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
|
||||
|
||||
return 0;
|
||||
|
@ -330,7 +330,7 @@ SCEVHandle LoopStrengthReduce::GetExpressionSCEV(Instruction *Exp) {
|
||||
Value *OpVal = getCastedVersionOf(opcode, *i);
|
||||
SCEVHandle Idx = SE->getSCEV(OpVal);
|
||||
|
||||
uint64_t TypeSize = TD->getABITypeSize(GTI.getIndexedType());
|
||||
uint64_t TypeSize = TD->getTypePaddedSize(GTI.getIndexedType());
|
||||
if (TypeSize != 1)
|
||||
Idx = SE->getMulExpr(Idx,
|
||||
SE->getConstant(ConstantInt::get(UIntPtrTy,
|
||||
|
@ -104,7 +104,7 @@ static int64_t GetOffsetFromIndex(const GetElementPtrInst *GEP, unsigned Idx,
|
||||
|
||||
// Otherwise, we have a sequential type like an array or vector. Multiply
|
||||
// the index by the ElementSize.
|
||||
uint64_t Size = TD.getABITypeSize(GTI.getIndexedType());
|
||||
uint64_t Size = TD.getTypePaddedSize(GTI.getIndexedType());
|
||||
Offset += Size*OpC->getSExtValue();
|
||||
}
|
||||
|
||||
@ -511,7 +511,7 @@ bool MemCpyOpt::performCallSlotOptzn(MemCpyInst *cpy, CallInst *C) {
|
||||
if (!srcArraySize)
|
||||
return false;
|
||||
|
||||
uint64_t srcSize = TD.getABITypeSize(srcAlloca->getAllocatedType()) *
|
||||
uint64_t srcSize = TD.getTypePaddedSize(srcAlloca->getAllocatedType()) *
|
||||
srcArraySize->getZExtValue();
|
||||
|
||||
if (cpyLength->getZExtValue() < srcSize)
|
||||
@ -526,7 +526,7 @@ bool MemCpyOpt::performCallSlotOptzn(MemCpyInst *cpy, CallInst *C) {
|
||||
if (!destArraySize)
|
||||
return false;
|
||||
|
||||
uint64_t destSize = TD.getABITypeSize(A->getAllocatedType()) *
|
||||
uint64_t destSize = TD.getTypePaddedSize(A->getAllocatedType()) *
|
||||
destArraySize->getZExtValue();
|
||||
|
||||
if (destSize < srcSize)
|
||||
@ -538,7 +538,7 @@ bool MemCpyOpt::performCallSlotOptzn(MemCpyInst *cpy, CallInst *C) {
|
||||
return false;
|
||||
|
||||
const Type* StructTy = cast<PointerType>(A->getType())->getElementType();
|
||||
uint64_t destSize = TD.getABITypeSize(StructTy);
|
||||
uint64_t destSize = TD.getTypePaddedSize(StructTy);
|
||||
|
||||
if (destSize < srcSize)
|
||||
return false;
|
||||
|
@ -243,7 +243,7 @@ bool SROA::performScalarRepl(Function &F) {
|
||||
isa<ArrayType>(AI->getAllocatedType())) &&
|
||||
AI->getAllocatedType()->isSized() &&
|
||||
// Do not promote any struct whose size is larger than "128" bytes.
|
||||
TD->getABITypeSize(AI->getAllocatedType()) < SRThreshold &&
|
||||
TD->getTypePaddedSize(AI->getAllocatedType()) < SRThreshold &&
|
||||
// Do not promote any struct into more than "32" separate vars.
|
||||
getNumSAElements(AI->getAllocatedType()) < SRThreshold/4) {
|
||||
// Check that all of the users of the allocation are capable of being
|
||||
@ -562,7 +562,7 @@ void SROA::isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI,
|
||||
|
||||
// If not the whole aggregate, give up.
|
||||
if (Length->getZExtValue() !=
|
||||
TD->getABITypeSize(AI->getType()->getElementType()))
|
||||
TD->getTypePaddedSize(AI->getType()->getElementType()))
|
||||
return MarkUnsafe(Info);
|
||||
|
||||
// We only know about memcpy/memset/memmove.
|
||||
@ -595,8 +595,8 @@ void SROA::isSafeUseOfBitCastedAllocation(BitCastInst *BC, AllocationInst *AI,
|
||||
// cast a {i32,i32}* to i64* and store through it. This is similar to the
|
||||
// memcpy case and occurs in various "byval" cases and emulated memcpys.
|
||||
if (isa<IntegerType>(SI->getOperand(0)->getType()) &&
|
||||
TD->getABITypeSize(SI->getOperand(0)->getType()) ==
|
||||
TD->getABITypeSize(AI->getType()->getElementType())) {
|
||||
TD->getTypePaddedSize(SI->getOperand(0)->getType()) ==
|
||||
TD->getTypePaddedSize(AI->getType()->getElementType())) {
|
||||
Info.isMemCpyDst = true;
|
||||
continue;
|
||||
}
|
||||
@ -607,8 +607,8 @@ void SROA::isSafeUseOfBitCastedAllocation(BitCastInst *BC, AllocationInst *AI,
|
||||
// cast a {i32,i32}* to i64* and load through it. This is similar to the
|
||||
// memcpy case and occurs in various "byval" cases and emulated memcpys.
|
||||
if (isa<IntegerType>(LI->getType()) &&
|
||||
TD->getABITypeSize(LI->getType()) ==
|
||||
TD->getABITypeSize(AI->getType()->getElementType())) {
|
||||
TD->getTypePaddedSize(LI->getType()) ==
|
||||
TD->getTypePaddedSize(AI->getType()->getElementType())) {
|
||||
Info.isMemCpySrc = true;
|
||||
continue;
|
||||
}
|
||||
@ -789,7 +789,7 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst,
|
||||
OtherElt = new BitCastInst(OtherElt, BytePtrTy,OtherElt->getNameStr(),
|
||||
MI);
|
||||
|
||||
unsigned EltSize = TD->getABITypeSize(EltTy);
|
||||
unsigned EltSize = TD->getTypePaddedSize(EltTy);
|
||||
|
||||
// Finally, insert the meminst for this element.
|
||||
if (isa<MemCpyInst>(MI) || isa<MemMoveInst>(MI)) {
|
||||
@ -823,13 +823,13 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI,
|
||||
// and store the element value to the individual alloca.
|
||||
Value *SrcVal = SI->getOperand(0);
|
||||
const Type *AllocaEltTy = AI->getType()->getElementType();
|
||||
uint64_t AllocaSizeBits = TD->getABITypeSizeInBits(AllocaEltTy);
|
||||
uint64_t AllocaSizeBits = TD->getTypePaddedSizeInBits(AllocaEltTy);
|
||||
|
||||
// If this isn't a store of an integer to the whole alloca, it may be a store
|
||||
// to the first element. Just ignore the store in this case and normal SROA
|
||||
// will handle it.
|
||||
if (!isa<IntegerType>(SrcVal->getType()) ||
|
||||
TD->getABITypeSizeInBits(SrcVal->getType()) != AllocaSizeBits)
|
||||
TD->getTypePaddedSizeInBits(SrcVal->getType()) != AllocaSizeBits)
|
||||
return;
|
||||
|
||||
DOUT << "PROMOTING STORE TO WHOLE ALLOCA: " << *AI << *SI;
|
||||
@ -845,7 +845,7 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI,
|
||||
uint64_t Shift = Layout->getElementOffsetInBits(i);
|
||||
|
||||
if (TD->isBigEndian())
|
||||
Shift = AllocaSizeBits-Shift-TD->getABITypeSizeInBits(FieldTy);
|
||||
Shift = AllocaSizeBits-Shift-TD->getTypePaddedSizeInBits(FieldTy);
|
||||
|
||||
Value *EltVal = SrcVal;
|
||||
if (Shift) {
|
||||
@ -880,7 +880,7 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI,
|
||||
} else {
|
||||
const ArrayType *ATy = cast<ArrayType>(AllocaEltTy);
|
||||
const Type *ArrayEltTy = ATy->getElementType();
|
||||
uint64_t ElementOffset = TD->getABITypeSizeInBits(ArrayEltTy);
|
||||
uint64_t ElementOffset = TD->getTypePaddedSizeInBits(ArrayEltTy);
|
||||
uint64_t ElementSizeBits = TD->getTypeSizeInBits(ArrayEltTy);
|
||||
|
||||
uint64_t Shift;
|
||||
@ -935,13 +935,13 @@ void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocationInst *AI,
|
||||
// Extract each element out of the NewElts according to its structure offset
|
||||
// and form the result value.
|
||||
const Type *AllocaEltTy = AI->getType()->getElementType();
|
||||
uint64_t AllocaSizeBits = TD->getABITypeSizeInBits(AllocaEltTy);
|
||||
uint64_t AllocaSizeBits = TD->getTypePaddedSizeInBits(AllocaEltTy);
|
||||
|
||||
// If this isn't a load of the whole alloca to an integer, it may be a load
|
||||
// of the first element. Just ignore the load in this case and normal SROA
|
||||
// will handle it.
|
||||
if (!isa<IntegerType>(LI->getType()) ||
|
||||
TD->getABITypeSizeInBits(LI->getType()) != AllocaSizeBits)
|
||||
TD->getTypePaddedSizeInBits(LI->getType()) != AllocaSizeBits)
|
||||
return;
|
||||
|
||||
DOUT << "PROMOTING LOAD OF WHOLE ALLOCA: " << *AI << *LI;
|
||||
@ -954,7 +954,7 @@ void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocationInst *AI,
|
||||
Layout = TD->getStructLayout(EltSTy);
|
||||
} else {
|
||||
const Type *ArrayEltTy = cast<ArrayType>(AllocaEltTy)->getElementType();
|
||||
ArrayEltBitOffset = TD->getABITypeSizeInBits(ArrayEltTy);
|
||||
ArrayEltBitOffset = TD->getTypePaddedSizeInBits(ArrayEltTy);
|
||||
}
|
||||
|
||||
Value *ResultVal = Constant::getNullValue(LI->getType());
|
||||
@ -1048,7 +1048,7 @@ static bool HasPadding(const Type *Ty, const TargetData &TD) {
|
||||
} else if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
|
||||
return HasPadding(VTy->getElementType(), TD);
|
||||
}
|
||||
return TD.getTypeSizeInBits(Ty) != TD.getABITypeSizeInBits(Ty);
|
||||
return TD.getTypeSizeInBits(Ty) != TD.getTypePaddedSizeInBits(Ty);
|
||||
}
|
||||
|
||||
/// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of
|
||||
@ -1270,7 +1270,7 @@ const Type *SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial) {
|
||||
// Check to see if this is stepping over an element: GEP Ptr, int C
|
||||
if (GEP->getNumOperands() == 2 && isa<ConstantInt>(GEP->getOperand(1))) {
|
||||
unsigned Idx = cast<ConstantInt>(GEP->getOperand(1))->getZExtValue();
|
||||
unsigned ElSize = TD->getABITypeSize(PTy->getElementType());
|
||||
unsigned ElSize = TD->getTypePaddedSize(PTy->getElementType());
|
||||
unsigned BitOffset = Idx*ElSize*8;
|
||||
if (BitOffset > 64 || !isPowerOf2_32(ElSize)) return 0;
|
||||
|
||||
@ -1279,7 +1279,7 @@ const Type *SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial) {
|
||||
if (SubElt == 0) return 0;
|
||||
if (SubElt != Type::VoidTy && SubElt->isInteger()) {
|
||||
const Type *NewTy =
|
||||
getIntAtLeastAsBigAs(TD->getABITypeSizeInBits(SubElt)+BitOffset);
|
||||
getIntAtLeastAsBigAs(TD->getTypePaddedSizeInBits(SubElt)+BitOffset);
|
||||
if (NewTy == 0 || MergeInType(NewTy, UsedType, *TD)) return 0;
|
||||
continue;
|
||||
}
|
||||
@ -1320,7 +1320,8 @@ const Type *SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial) {
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
const Type *NTy = getIntAtLeastAsBigAs(TD->getABITypeSizeInBits(AggTy));
|
||||
const Type *NTy =
|
||||
getIntAtLeastAsBigAs(TD->getTypePaddedSizeInBits(AggTy));
|
||||
if (NTy == 0 || MergeInType(NTy, UsedType, *TD)) return 0;
|
||||
const Type *SubTy = CanConvertToScalar(GEP, IsNotTrivial);
|
||||
if (SubTy == 0) return 0;
|
||||
@ -1396,7 +1397,7 @@ void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset) {
|
||||
const PointerType *AggPtrTy =
|
||||
cast<PointerType>(GEP->getOperand(0)->getType());
|
||||
unsigned AggSizeInBits =
|
||||
TD->getABITypeSizeInBits(AggPtrTy->getElementType());
|
||||
TD->getTypePaddedSizeInBits(AggPtrTy->getElementType());
|
||||
|
||||
// Check to see if this is stepping over an element: GEP Ptr, int C
|
||||
unsigned NewOffset = Offset;
|
||||
@ -1417,7 +1418,7 @@ void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset) {
|
||||
const Type *AggTy = AggPtrTy->getElementType();
|
||||
if (const SequentialType *SeqTy = dyn_cast<SequentialType>(AggTy)) {
|
||||
unsigned ElSizeBits =
|
||||
TD->getABITypeSizeInBits(SeqTy->getElementType());
|
||||
TD->getTypePaddedSizeInBits(SeqTy->getElementType());
|
||||
|
||||
NewOffset += ElSizeBits*Idx;
|
||||
} else {
|
||||
@ -1471,7 +1472,7 @@ Value *SROA::ConvertUsesOfLoadToScalar(LoadInst *LI, AllocaInst *NewAI,
|
||||
// Otherwise it must be an element access.
|
||||
unsigned Elt = 0;
|
||||
if (Offset) {
|
||||
unsigned EltSize = TD->getABITypeSizeInBits(VTy->getElementType());
|
||||
unsigned EltSize = TD->getTypePaddedSizeInBits(VTy->getElementType());
|
||||
Elt = Offset/EltSize;
|
||||
Offset -= EltSize*Elt;
|
||||
}
|
||||
@ -1557,7 +1558,7 @@ Value *SROA::ConvertUsesOfStoreToScalar(StoreInst *SI, AllocaInst *NewAI,
|
||||
SV = new BitCastInst(SV, AllocaType, SV->getName(), SI);
|
||||
} else {
|
||||
// Must be an element insertion.
|
||||
unsigned Elt = Offset/TD->getABITypeSizeInBits(PTy->getElementType());
|
||||
unsigned Elt = Offset/TD->getTypePaddedSizeInBits(PTy->getElementType());
|
||||
SV = InsertElementInst::Create(Old, SV,
|
||||
ConstantInt::get(Type::Int32Ty, Elt),
|
||||
"tmp", SI);
|
||||
|
@ -115,7 +115,8 @@ bool LowerAllocations::runOnBasicBlock(BasicBlock &BB) {
|
||||
// malloc(type) becomes sbyte *malloc(size)
|
||||
Value *MallocArg;
|
||||
if (LowerMallocArgToInteger)
|
||||
MallocArg = ConstantInt::get(Type::Int64Ty, TD.getABITypeSize(AllocTy));
|
||||
MallocArg = ConstantInt::get(Type::Int64Ty,
|
||||
TD.getTypePaddedSize(AllocTy));
|
||||
else
|
||||
MallocArg = ConstantExpr::getSizeOf(AllocTy);
|
||||
MallocArg = ConstantExpr::getTruncOrBitCast(cast<Constant>(MallocArg),
|
||||
|
@ -163,7 +163,7 @@ void CallingConvEmitter::EmitAction(Record *Action,
|
||||
O << Size << ", ";
|
||||
else
|
||||
O << "\n" << IndentStr << " State.getTarget().getTargetData()"
|
||||
"->getABITypeSize(LocVT.getTypeForMVT()), ";
|
||||
"->getTypePaddedSize(LocVT.getTypeForMVT()), ";
|
||||
if (Align)
|
||||
O << Align;
|
||||
else
|
||||
|
Loading…
x
Reference in New Issue
Block a user