2014-07-17 18:54:50 +00:00
|
|
|
//===-- RuntimeDyldMachOAArch64.h -- MachO/AArch64 specific code. -*- C++ -*-=//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2014-08-13 16:26:38 +00:00
|
|
|
#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOAARCH64_H
|
|
|
|
#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOAARCH64_H
|
2014-07-17 18:54:50 +00:00
|
|
|
|
|
|
|
#include "../RuntimeDyldMachO.h"
|
2014-07-29 19:57:15 +00:00
|
|
|
#include "llvm/Support/Endian.h"
|
2014-07-17 18:54:50 +00:00
|
|
|
|
|
|
|
#define DEBUG_TYPE "dyld"
|
|
|
|
|
|
|
|
namespace llvm {
|
|
|
|
|
|
|
|
class RuntimeDyldMachOAArch64
|
|
|
|
: public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOAArch64> {
|
|
|
|
public:
|
2014-09-04 04:53:03 +00:00
|
|
|
|
|
|
|
typedef uint64_t TargetPtrT;
|
|
|
|
|
[MCJIT][Orc] Refactor RTDyldMemoryManager, weave RuntimeDyld::SymbolInfo through
MCJIT.
This patch decouples the two responsibilities of the RTDyldMemoryManager class,
memory management and symbol resolution, into two new classes:
RuntimeDyld::MemoryManager and RuntimeDyld::SymbolResolver.
The symbol resolution interface is modified slightly, from:
uint64_t getSymbolAddress(const std::string &Name);
to:
RuntimeDyld::SymbolInfo findSymbol(const std::string &Name);
The latter passes symbol flags along with symbol addresses, allowing RuntimeDyld
and others to reason about non-strong/non-exported symbols.
The memory management interface removes the following method:
void notifyObjectLoaded(ExecutionEngine *EE,
const object::ObjectFile &) {}
as it is not related to memory management. (Note: Backwards compatibility *is*
maintained for this method in MCJIT and OrcMCJITReplacement, see below).
The RTDyldMemoryManager class remains in-tree for backwards compatibility.
It inherits directly from RuntimeDyld::SymbolResolver, and indirectly from
RuntimeDyld::MemoryManager via the new MCJITMemoryManager class, which
just subclasses RuntimeDyld::MemoryManager and reintroduces the
notifyObjectLoaded method for backwards compatibility).
The EngineBuilder class retains the existing method:
EngineBuilder&
setMCJITMemoryManager(std::unique_ptr<RTDyldMemoryManager> mcjmm);
and includes two new methods:
EngineBuilder&
setMemoryManager(std::unique_ptr<MCJITMemoryManager> MM);
EngineBuilder&
setSymbolResolver(std::unique_ptr<RuntimeDyld::SymbolResolver> SR);
Clients should use EITHER:
A single call to setMCJITMemoryManager with an RTDyldMemoryManager.
OR (exclusive)
One call each to each of setMemoryManager and setSymbolResolver.
This patch should be fully compatible with existing uses of RTDyldMemoryManager.
If it is not it should be considered a bug, and the patch either fixed or
reverted.
If clients find the new API to be an improvement the goal will be to deprecate
and eventually remove the RTDyldMemoryManager class in favor of the new classes.
llvm-svn: 233509
2015-03-30 03:37:06 +00:00
|
|
|
RuntimeDyldMachOAArch64(RuntimeDyld::MemoryManager &MM,
|
2016-08-01 20:49:11 +00:00
|
|
|
JITSymbolResolver &Resolver)
|
[MCJIT][Orc] Refactor RTDyldMemoryManager, weave RuntimeDyld::SymbolInfo through
MCJIT.
This patch decouples the two responsibilities of the RTDyldMemoryManager class,
memory management and symbol resolution, into two new classes:
RuntimeDyld::MemoryManager and RuntimeDyld::SymbolResolver.
The symbol resolution interface is modified slightly, from:
uint64_t getSymbolAddress(const std::string &Name);
to:
RuntimeDyld::SymbolInfo findSymbol(const std::string &Name);
The latter passes symbol flags along with symbol addresses, allowing RuntimeDyld
and others to reason about non-strong/non-exported symbols.
The memory management interface removes the following method:
void notifyObjectLoaded(ExecutionEngine *EE,
const object::ObjectFile &) {}
as it is not related to memory management. (Note: Backwards compatibility *is*
maintained for this method in MCJIT and OrcMCJITReplacement, see below).
The RTDyldMemoryManager class remains in-tree for backwards compatibility.
It inherits directly from RuntimeDyld::SymbolResolver, and indirectly from
RuntimeDyld::MemoryManager via the new MCJITMemoryManager class, which
just subclasses RuntimeDyld::MemoryManager and reintroduces the
notifyObjectLoaded method for backwards compatibility).
The EngineBuilder class retains the existing method:
EngineBuilder&
setMCJITMemoryManager(std::unique_ptr<RTDyldMemoryManager> mcjmm);
and includes two new methods:
EngineBuilder&
setMemoryManager(std::unique_ptr<MCJITMemoryManager> MM);
EngineBuilder&
setSymbolResolver(std::unique_ptr<RuntimeDyld::SymbolResolver> SR);
Clients should use EITHER:
A single call to setMCJITMemoryManager with an RTDyldMemoryManager.
OR (exclusive)
One call each to each of setMemoryManager and setSymbolResolver.
This patch should be fully compatible with existing uses of RTDyldMemoryManager.
If it is not it should be considered a bug, and the patch either fixed or
reverted.
If clients find the new API to be an improvement the goal will be to deprecate
and eventually remove the RTDyldMemoryManager class in favor of the new classes.
llvm-svn: 233509
2015-03-30 03:37:06 +00:00
|
|
|
: RuntimeDyldMachOCRTPBase(MM, Resolver) {}
|
2014-07-17 18:54:50 +00:00
|
|
|
|
|
|
|
unsigned getMaxStubSize() override { return 8; }
|
|
|
|
|
2014-07-17 23:11:30 +00:00
|
|
|
unsigned getStubAlignment() override { return 8; }
|
2014-07-17 18:54:50 +00:00
|
|
|
|
2014-07-22 21:42:51 +00:00
|
|
|
/// Extract the addend encoded in the instruction / memory location.
|
2014-08-08 23:12:22 +00:00
|
|
|
int64_t decodeAddend(const RelocationEntry &RE) const {
|
|
|
|
const SectionEntry &Section = Sections[RE.SectionID];
|
2015-11-23 21:47:41 +00:00
|
|
|
uint8_t *LocalAddress = Section.getAddressWithOffset(RE.Offset);
|
2014-08-08 23:12:22 +00:00
|
|
|
unsigned NumBytes = 1 << RE.Size;
|
2014-07-22 21:42:51 +00:00
|
|
|
int64_t Addend = 0;
|
|
|
|
// Verify that the relocation has the correct size and alignment.
|
2014-08-08 23:12:22 +00:00
|
|
|
switch (RE.RelType) {
|
2014-07-22 21:42:51 +00:00
|
|
|
default:
|
|
|
|
llvm_unreachable("Unsupported relocation type!");
|
|
|
|
case MachO::ARM64_RELOC_UNSIGNED:
|
2014-07-29 19:57:15 +00:00
|
|
|
assert((NumBytes == 4 || NumBytes == 8) && "Invalid relocation size.");
|
2014-07-22 21:42:51 +00:00
|
|
|
break;
|
|
|
|
case MachO::ARM64_RELOC_BRANCH26:
|
|
|
|
case MachO::ARM64_RELOC_PAGE21:
|
|
|
|
case MachO::ARM64_RELOC_PAGEOFF12:
|
|
|
|
case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
|
|
|
|
case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
|
|
|
|
assert(NumBytes == 4 && "Invalid relocation size.");
|
|
|
|
assert((((uintptr_t)LocalAddress & 0x3) == 0) &&
|
|
|
|
"Instruction address is not aligned to 4 bytes.");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2014-08-08 23:12:22 +00:00
|
|
|
switch (RE.RelType) {
|
2014-07-22 21:42:51 +00:00
|
|
|
default:
|
|
|
|
llvm_unreachable("Unsupported relocation type!");
|
|
|
|
case MachO::ARM64_RELOC_UNSIGNED:
|
2014-07-29 19:57:15 +00:00
|
|
|
// This could be an unaligned memory location.
|
|
|
|
if (NumBytes == 4)
|
|
|
|
Addend = *reinterpret_cast<support::ulittle32_t *>(LocalAddress);
|
|
|
|
else
|
|
|
|
Addend = *reinterpret_cast<support::ulittle64_t *>(LocalAddress);
|
2014-07-22 21:42:51 +00:00
|
|
|
break;
|
|
|
|
case MachO::ARM64_RELOC_BRANCH26: {
|
|
|
|
// Verify that the relocation points to the expected branch instruction.
|
2014-07-29 19:57:15 +00:00
|
|
|
auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
|
2014-07-22 21:42:51 +00:00
|
|
|
assert((*p & 0xFC000000) == 0x14000000 && "Expected branch instruction.");
|
|
|
|
|
|
|
|
// Get the 26 bit addend encoded in the branch instruction and sign-extend
|
|
|
|
// to 64 bit. The lower 2 bits are always zeros and are therefore implicit
|
|
|
|
// (<< 2).
|
|
|
|
Addend = (*p & 0x03FFFFFF) << 2;
|
|
|
|
Addend = SignExtend64(Addend, 28);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
|
|
|
|
case MachO::ARM64_RELOC_PAGE21: {
|
|
|
|
// Verify that the relocation points to the expected adrp instruction.
|
2014-07-29 19:57:15 +00:00
|
|
|
auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
|
2014-07-22 21:42:51 +00:00
|
|
|
assert((*p & 0x9F000000) == 0x90000000 && "Expected adrp instruction.");
|
|
|
|
|
|
|
|
// Get the 21 bit addend encoded in the adrp instruction and sign-extend
|
|
|
|
// to 64 bit. The lower 12 bits (4096 byte page) are always zeros and are
|
|
|
|
// therefore implicit (<< 12).
|
|
|
|
Addend = ((*p & 0x60000000) >> 29) | ((*p & 0x01FFFFE0) >> 3) << 12;
|
|
|
|
Addend = SignExtend64(Addend, 33);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: {
|
|
|
|
// Verify that the relocation points to one of the expected load / store
|
|
|
|
// instructions.
|
2014-07-29 19:57:15 +00:00
|
|
|
auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
|
2014-07-22 22:02:19 +00:00
|
|
|
(void)p;
|
2014-07-22 21:42:51 +00:00
|
|
|
assert((*p & 0x3B000000) == 0x39000000 &&
|
|
|
|
"Only expected load / store instructions.");
|
2016-08-17 05:10:15 +00:00
|
|
|
LLVM_FALLTHROUGH;
|
|
|
|
}
|
2014-07-22 21:42:51 +00:00
|
|
|
case MachO::ARM64_RELOC_PAGEOFF12: {
|
|
|
|
// Verify that the relocation points to one of the expected load / store
|
|
|
|
// or add / sub instructions.
|
2014-07-29 19:57:15 +00:00
|
|
|
auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
|
2014-07-22 21:42:51 +00:00
|
|
|
assert((((*p & 0x3B000000) == 0x39000000) ||
|
|
|
|
((*p & 0x11C00000) == 0x11000000) ) &&
|
|
|
|
"Expected load / store or add/sub instruction.");
|
|
|
|
|
|
|
|
// Get the 12 bit addend encoded in the instruction.
|
|
|
|
Addend = (*p & 0x003FFC00) >> 10;
|
|
|
|
|
|
|
|
// Check which instruction we are decoding to obtain the implicit shift
|
|
|
|
// factor of the instruction.
|
|
|
|
int ImplicitShift = 0;
|
|
|
|
if ((*p & 0x3B000000) == 0x39000000) { // << load / store
|
|
|
|
// For load / store instructions the size is encoded in bits 31:30.
|
|
|
|
ImplicitShift = ((*p >> 30) & 0x3);
|
|
|
|
if (ImplicitShift == 0) {
|
|
|
|
// Check if this a vector op to get the correct shift value.
|
|
|
|
if ((*p & 0x04800000) == 0x04800000)
|
|
|
|
ImplicitShift = 4;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Compensate for implicit shift.
|
|
|
|
Addend <<= ImplicitShift;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return Addend;
|
|
|
|
}
|
|
|
|
|
2014-07-22 21:42:55 +00:00
|
|
|
/// Extract the addend encoded in the instruction.
|
2014-07-29 19:57:15 +00:00
|
|
|
void encodeAddend(uint8_t *LocalAddress, unsigned NumBytes,
|
|
|
|
MachO::RelocationInfoType RelType, int64_t Addend) const {
|
2014-07-22 21:42:55 +00:00
|
|
|
// Verify that the relocation has the correct alignment.
|
|
|
|
switch (RelType) {
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unsupported relocation type!");
|
|
|
|
case MachO::ARM64_RELOC_UNSIGNED:
|
2014-07-29 19:57:15 +00:00
|
|
|
assert((NumBytes == 4 || NumBytes == 8) && "Invalid relocation size.");
|
|
|
|
break;
|
2014-07-22 21:42:55 +00:00
|
|
|
case MachO::ARM64_RELOC_BRANCH26:
|
|
|
|
case MachO::ARM64_RELOC_PAGE21:
|
|
|
|
case MachO::ARM64_RELOC_PAGEOFF12:
|
|
|
|
case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
|
|
|
|
case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
|
2014-07-29 19:57:15 +00:00
|
|
|
assert(NumBytes == 4 && "Invalid relocation size.");
|
2014-07-22 21:42:55 +00:00
|
|
|
assert((((uintptr_t)LocalAddress & 0x3) == 0) &&
|
|
|
|
"Instruction address is not aligned to 4 bytes.");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (RelType) {
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unsupported relocation type!");
|
2014-07-29 19:57:15 +00:00
|
|
|
case MachO::ARM64_RELOC_UNSIGNED:
|
|
|
|
// This could be an unaligned memory location.
|
|
|
|
if (NumBytes == 4)
|
|
|
|
*reinterpret_cast<support::ulittle32_t *>(LocalAddress) = Addend;
|
|
|
|
else
|
|
|
|
*reinterpret_cast<support::ulittle64_t *>(LocalAddress) = Addend;
|
|
|
|
break;
|
2014-07-22 21:42:55 +00:00
|
|
|
case MachO::ARM64_RELOC_BRANCH26: {
|
2014-07-29 19:57:15 +00:00
|
|
|
auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
|
2014-07-22 21:42:55 +00:00
|
|
|
// Verify that the relocation points to the expected branch instruction.
|
|
|
|
assert((*p & 0xFC000000) == 0x14000000 && "Expected branch instruction.");
|
|
|
|
|
|
|
|
// Verify addend value.
|
|
|
|
assert((Addend & 0x3) == 0 && "Branch target is not aligned");
|
|
|
|
assert(isInt<28>(Addend) && "Branch target is out of range.");
|
|
|
|
|
|
|
|
// Encode the addend as 26 bit immediate in the branch instruction.
|
|
|
|
*p = (*p & 0xFC000000) | ((uint32_t)(Addend >> 2) & 0x03FFFFFF);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
|
|
|
|
case MachO::ARM64_RELOC_PAGE21: {
|
|
|
|
// Verify that the relocation points to the expected adrp instruction.
|
2014-07-29 19:57:15 +00:00
|
|
|
auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
|
2014-07-22 21:42:55 +00:00
|
|
|
assert((*p & 0x9F000000) == 0x90000000 && "Expected adrp instruction.");
|
|
|
|
|
|
|
|
// Check that the addend fits into 21 bits (+ 12 lower bits).
|
|
|
|
assert((Addend & 0xFFF) == 0 && "ADRP target is not page aligned.");
|
|
|
|
assert(isInt<33>(Addend) && "Invalid page reloc value.");
|
|
|
|
|
|
|
|
// Encode the addend into the instruction.
|
2015-01-10 00:46:38 +00:00
|
|
|
uint32_t ImmLoValue = ((uint64_t)Addend << 17) & 0x60000000;
|
|
|
|
uint32_t ImmHiValue = ((uint64_t)Addend >> 9) & 0x00FFFFE0;
|
2014-07-22 21:42:55 +00:00
|
|
|
*p = (*p & 0x9F00001F) | ImmHiValue | ImmLoValue;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: {
|
|
|
|
// Verify that the relocation points to one of the expected load / store
|
|
|
|
// instructions.
|
2014-07-29 19:57:15 +00:00
|
|
|
auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
|
2014-07-22 21:42:55 +00:00
|
|
|
assert((*p & 0x3B000000) == 0x39000000 &&
|
|
|
|
"Only expected load / store instructions.");
|
2014-07-23 00:17:44 +00:00
|
|
|
(void)p;
|
2016-08-17 05:10:15 +00:00
|
|
|
LLVM_FALLTHROUGH;
|
|
|
|
}
|
2014-07-22 21:42:55 +00:00
|
|
|
case MachO::ARM64_RELOC_PAGEOFF12: {
|
|
|
|
// Verify that the relocation points to one of the expected load / store
|
|
|
|
// or add / sub instructions.
|
2014-07-29 19:57:15 +00:00
|
|
|
auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
|
2014-07-22 21:42:55 +00:00
|
|
|
assert((((*p & 0x3B000000) == 0x39000000) ||
|
|
|
|
((*p & 0x11C00000) == 0x11000000) ) &&
|
|
|
|
"Expected load / store or add/sub instruction.");
|
|
|
|
|
|
|
|
// Check which instruction we are decoding to obtain the implicit shift
|
|
|
|
// factor of the instruction and verify alignment.
|
|
|
|
int ImplicitShift = 0;
|
|
|
|
if ((*p & 0x3B000000) == 0x39000000) { // << load / store
|
|
|
|
// For load / store instructions the size is encoded in bits 31:30.
|
|
|
|
ImplicitShift = ((*p >> 30) & 0x3);
|
|
|
|
switch (ImplicitShift) {
|
|
|
|
case 0:
|
|
|
|
// Check if this a vector op to get the correct shift value.
|
|
|
|
if ((*p & 0x04800000) == 0x04800000) {
|
|
|
|
ImplicitShift = 4;
|
|
|
|
assert(((Addend & 0xF) == 0) &&
|
|
|
|
"128-bit LDR/STR not 16-byte aligned.");
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
assert(((Addend & 0x1) == 0) && "16-bit LDR/STR not 2-byte aligned.");
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
assert(((Addend & 0x3) == 0) && "32-bit LDR/STR not 4-byte aligned.");
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
assert(((Addend & 0x7) == 0) && "64-bit LDR/STR not 8-byte aligned.");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Compensate for implicit shift.
|
|
|
|
Addend >>= ImplicitShift;
|
|
|
|
assert(isUInt<12>(Addend) && "Addend cannot be encoded.");
|
|
|
|
|
|
|
|
// Encode the addend into the instruction.
|
|
|
|
*p = (*p & 0xFFC003FF) | ((uint32_t)(Addend << 10) & 0x003FFC00);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-27 20:24:48 +00:00
|
|
|
Expected<relocation_iterator>
|
2014-07-17 18:54:50 +00:00
|
|
|
processRelocationRef(unsigned SectionID, relocation_iterator RelI,
|
2014-11-26 16:54:40 +00:00
|
|
|
const ObjectFile &BaseObjT,
|
|
|
|
ObjSectionToIDMap &ObjSectionToID,
|
2014-11-27 05:40:13 +00:00
|
|
|
StubMap &Stubs) override {
|
2014-07-17 18:54:50 +00:00
|
|
|
const MachOObjectFile &Obj =
|
2014-11-26 16:54:40 +00:00
|
|
|
static_cast<const MachOObjectFile &>(BaseObjT);
|
2014-07-17 18:54:50 +00:00
|
|
|
MachO::any_relocation_info RelInfo =
|
|
|
|
Obj.getRelocation(RelI->getRawDataRefImpl());
|
|
|
|
|
2016-04-27 20:24:48 +00:00
|
|
|
if (Obj.isRelocationScattered(RelInfo))
|
|
|
|
return make_error<RuntimeDyldError>("Scattered relocations not supported "
|
|
|
|
"for MachO AArch64");
|
2014-07-17 18:54:50 +00:00
|
|
|
|
|
|
|
// ARM64 has an ARM64_RELOC_ADDEND relocation type that carries an explicit
|
|
|
|
// addend for the following relocation. If found: (1) store the associated
|
|
|
|
// addend, (2) consume the next relocation, and (3) use the stored addend to
|
|
|
|
// override the addend.
|
|
|
|
int64_t ExplicitAddend = 0;
|
|
|
|
if (Obj.getAnyRelocationType(RelInfo) == MachO::ARM64_RELOC_ADDEND) {
|
|
|
|
assert(!Obj.getPlainRelocationExternal(RelInfo));
|
|
|
|
assert(!Obj.getAnyRelocationPCRel(RelInfo));
|
|
|
|
assert(Obj.getAnyRelocationLength(RelInfo) == 2);
|
|
|
|
int64_t RawAddend = Obj.getPlainRelocationSymbolNum(RelInfo);
|
|
|
|
// Sign-extend the 24-bit to 64-bit.
|
2014-07-22 21:42:49 +00:00
|
|
|
ExplicitAddend = SignExtend64(RawAddend, 24);
|
2014-07-17 18:54:50 +00:00
|
|
|
++RelI;
|
|
|
|
RelInfo = Obj.getRelocation(RelI->getRawDataRefImpl());
|
|
|
|
}
|
|
|
|
|
2016-01-21 21:59:50 +00:00
|
|
|
if (Obj.getAnyRelocationType(RelInfo) == MachO::ARM64_RELOC_SUBTRACTOR)
|
|
|
|
return processSubtractRelocation(SectionID, RelI, Obj, ObjSectionToID);
|
|
|
|
|
2014-11-26 16:54:40 +00:00
|
|
|
RelocationEntry RE(getRelocationEntry(SectionID, Obj, RelI));
|
2014-08-08 23:12:22 +00:00
|
|
|
RE.Addend = decodeAddend(RE);
|
2014-07-17 18:54:50 +00:00
|
|
|
|
2014-07-22 21:42:49 +00:00
|
|
|
assert((ExplicitAddend == 0 || RE.Addend == 0) && "Relocation has "\
|
|
|
|
"ARM64_RELOC_ADDEND and embedded addend in the instruction.");
|
2015-08-11 06:27:53 +00:00
|
|
|
if (ExplicitAddend)
|
2014-07-18 20:29:36 +00:00
|
|
|
RE.Addend = ExplicitAddend;
|
2015-08-11 06:27:53 +00:00
|
|
|
|
2016-04-27 20:24:48 +00:00
|
|
|
RelocationValueRef Value;
|
|
|
|
if (auto ValueOrErr = getRelocationValueRef(Obj, RelI, RE, ObjSectionToID))
|
|
|
|
Value = *ValueOrErr;
|
|
|
|
else
|
|
|
|
return ValueOrErr.takeError();
|
2014-07-17 18:54:50 +00:00
|
|
|
|
|
|
|
bool IsExtern = Obj.getPlainRelocationExternal(RelInfo);
|
|
|
|
if (!IsExtern && RE.IsPCRel)
|
Remove getRelocationAddress.
Originally added in r139314.
Back then it didn't actually get the address, it got whatever value the
relocation used: address or offset.
The values in different object formats are:
* MachO: Always an offset.
* COFF: Always an address, but when talking about the virtual address of
sections it says: "for simplicity, compilers should set this to zero".
* ELF: An offset for .o files and and address for .so files. In the case of the
.so, the relocation in not linked to any section (sh_info is 0). We can't
really compute an offset.
Some API mappings would be:
* Use getAddress for everything. It would be quite cumbersome. To compute the
address elf has to follow sh_info, which can be corrupted and therefore the
method has to return an ErrorOr. The address of the section is also the same
for every relocation in a section, so we shouldn't have to check the error
and fetch the value for every relocation.
* Use a getValue and make it up to the user to know what it is getting.
* Use a getOffset and:
* Assert for dynamic ELF objects. That is a very peculiar case and it is
probably fair to ask any tool that wants to support it to use ELF.h. The
only tool we have that reads those (llvm-readobj) already does that. The
only other use case I can think of is a dynamic linker.
* Check that COFF .obj files have sections with zero virtual address spaces. If
it turns out that some assembler/compiler produces these, we can change
COFFObjectFile::getRelocationOffset to subtract it. Given COFF format,
this can be done without the need for ErrorOr.
The getRelocationAddress method was never implemented for COFF. It also
had exactly one use in a very peculiar case: a shortcut for adding the
section value to a pcrel reloc on MachO.
Given that, I don't expect that there is any use out there of the C API. If
that is not the case, let me know and I will add it back with the implementation
inlined and do a proper deprecation.
llvm-svn: 241450
2015-07-06 14:55:37 +00:00
|
|
|
makeValueAddendPCRel(Value, RelI, 1 << RE.Size);
|
2014-07-17 18:54:50 +00:00
|
|
|
|
2014-09-07 04:03:32 +00:00
|
|
|
RE.Addend = Value.Offset;
|
2014-07-17 18:54:50 +00:00
|
|
|
|
|
|
|
if (RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGE21 ||
|
|
|
|
RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12)
|
|
|
|
processGOTRelocation(RE, Value, Stubs);
|
|
|
|
else {
|
|
|
|
if (Value.SymbolName)
|
|
|
|
addRelocationForSymbol(RE, Value.SymbolName);
|
|
|
|
else
|
|
|
|
addRelocationForSection(RE, Value.SectionID);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ++RelI;
|
|
|
|
}
|
|
|
|
|
2014-09-03 11:41:21 +00:00
|
|
|
void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
|
2014-07-17 18:54:50 +00:00
|
|
|
DEBUG(dumpRelocationToResolve(RE, Value));
|
|
|
|
|
|
|
|
const SectionEntry &Section = Sections[RE.SectionID];
|
2015-11-23 21:47:41 +00:00
|
|
|
uint8_t *LocalAddress = Section.getAddressWithOffset(RE.Offset);
|
2014-07-29 19:57:11 +00:00
|
|
|
MachO::RelocationInfoType RelType =
|
|
|
|
static_cast<MachO::RelocationInfoType>(RE.RelType);
|
2014-07-17 18:54:50 +00:00
|
|
|
|
2014-07-29 19:57:11 +00:00
|
|
|
switch (RelType) {
|
2014-07-17 18:54:50 +00:00
|
|
|
default:
|
|
|
|
llvm_unreachable("Invalid relocation type!");
|
|
|
|
case MachO::ARM64_RELOC_UNSIGNED: {
|
|
|
|
assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_UNSIGNED not supported");
|
|
|
|
// Mask in the target value a byte at a time (we don't have an alignment
|
|
|
|
// guarantee for the target address, so this is safest).
|
|
|
|
if (RE.Size < 2)
|
|
|
|
llvm_unreachable("Invalid size for ARM64_RELOC_UNSIGNED");
|
|
|
|
|
2014-07-29 19:57:15 +00:00
|
|
|
encodeAddend(LocalAddress, 1 << RE.Size, RelType, Value + RE.Addend);
|
2014-07-17 18:54:50 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case MachO::ARM64_RELOC_BRANCH26: {
|
|
|
|
assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_BRANCH26 not supported");
|
|
|
|
// Check if branch is in range.
|
2015-11-23 21:47:41 +00:00
|
|
|
uint64_t FinalAddress = Section.getLoadAddressWithOffset(RE.Offset);
|
2014-07-22 21:42:55 +00:00
|
|
|
int64_t PCRelVal = Value - FinalAddress + RE.Addend;
|
2014-07-29 19:57:15 +00:00
|
|
|
encodeAddend(LocalAddress, /*Size=*/4, RelType, PCRelVal);
|
2014-07-17 18:54:50 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
|
|
|
|
case MachO::ARM64_RELOC_PAGE21: {
|
|
|
|
assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_PAGE21 not supported");
|
|
|
|
// Adjust for PC-relative relocation and offset.
|
2015-11-23 21:47:41 +00:00
|
|
|
uint64_t FinalAddress = Section.getLoadAddressWithOffset(RE.Offset);
|
2014-07-22 21:42:55 +00:00
|
|
|
int64_t PCRelVal =
|
|
|
|
((Value + RE.Addend) & (-4096)) - (FinalAddress & (-4096));
|
2014-07-29 19:57:15 +00:00
|
|
|
encodeAddend(LocalAddress, /*Size=*/4, RelType, PCRelVal);
|
2014-07-17 18:54:50 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
|
|
|
|
case MachO::ARM64_RELOC_PAGEOFF12: {
|
|
|
|
assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_PAGEOFF21 not supported");
|
|
|
|
// Add the offset from the symbol.
|
|
|
|
Value += RE.Addend;
|
|
|
|
// Mask out the page address and only use the lower 12 bits.
|
|
|
|
Value &= 0xFFF;
|
2014-07-29 19:57:15 +00:00
|
|
|
encodeAddend(LocalAddress, /*Size=*/4, RelType, Value);
|
2014-07-17 18:54:50 +00:00
|
|
|
break;
|
|
|
|
}
|
2016-01-21 21:59:50 +00:00
|
|
|
case MachO::ARM64_RELOC_SUBTRACTOR: {
|
|
|
|
uint64_t SectionABase = Sections[RE.Sections.SectionA].getLoadAddress();
|
|
|
|
uint64_t SectionBBase = Sections[RE.Sections.SectionB].getLoadAddress();
|
|
|
|
assert((Value == SectionABase || Value == SectionBBase) &&
|
|
|
|
"Unexpected SUBTRACTOR relocation value.");
|
|
|
|
Value = SectionABase - SectionBBase + RE.Addend;
|
|
|
|
writeBytesUnaligned(Value, LocalAddress, 1 << RE.Size);
|
|
|
|
break;
|
|
|
|
}
|
2014-07-17 18:54:50 +00:00
|
|
|
case MachO::ARM64_RELOC_POINTER_TO_GOT:
|
|
|
|
case MachO::ARM64_RELOC_TLVP_LOAD_PAGE21:
|
|
|
|
case MachO::ARM64_RELOC_TLVP_LOAD_PAGEOFF12:
|
2014-07-22 21:42:55 +00:00
|
|
|
llvm_unreachable("Relocation type not yet implemented!");
|
2014-07-17 18:54:50 +00:00
|
|
|
case MachO::ARM64_RELOC_ADDEND:
|
|
|
|
llvm_unreachable("ARM64_RELOC_ADDEND should have been handeled by "
|
|
|
|
"processRelocationRef!");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-27 20:24:48 +00:00
|
|
|
Error finalizeSection(const ObjectFile &Obj, unsigned SectionID,
|
|
|
|
const SectionRef &Section) {
|
|
|
|
return Error::success();
|
|
|
|
}
|
2014-07-17 18:54:50 +00:00
|
|
|
|
|
|
|
private:
|
|
|
|
void processGOTRelocation(const RelocationEntry &RE,
|
|
|
|
RelocationValueRef &Value, StubMap &Stubs) {
|
|
|
|
assert(RE.Size == 2);
|
|
|
|
SectionEntry &Section = Sections[RE.SectionID];
|
|
|
|
StubMap::const_iterator i = Stubs.find(Value);
|
2014-10-21 23:41:15 +00:00
|
|
|
int64_t Offset;
|
2014-07-17 18:54:50 +00:00
|
|
|
if (i != Stubs.end())
|
2014-10-21 23:41:15 +00:00
|
|
|
Offset = static_cast<int64_t>(i->second);
|
2014-07-17 18:54:50 +00:00
|
|
|
else {
|
|
|
|
// FIXME: There must be a better way to do this then to check and fix the
|
|
|
|
// alignment every time!!!
|
2015-11-23 21:47:41 +00:00
|
|
|
uintptr_t BaseAddress = uintptr_t(Section.getAddress());
|
2014-07-17 18:54:50 +00:00
|
|
|
uintptr_t StubAlignment = getStubAlignment();
|
|
|
|
uintptr_t StubAddress =
|
2015-11-23 21:47:41 +00:00
|
|
|
(BaseAddress + Section.getStubOffset() + StubAlignment - 1) &
|
2014-07-17 18:54:50 +00:00
|
|
|
-StubAlignment;
|
|
|
|
unsigned StubOffset = StubAddress - BaseAddress;
|
|
|
|
Stubs[Value] = StubOffset;
|
|
|
|
assert(((StubAddress % getStubAlignment()) == 0) &&
|
|
|
|
"GOT entry not aligned");
|
|
|
|
RelocationEntry GOTRE(RE.SectionID, StubOffset,
|
2014-09-07 04:03:32 +00:00
|
|
|
MachO::ARM64_RELOC_UNSIGNED, Value.Offset,
|
2014-07-17 18:54:50 +00:00
|
|
|
/*IsPCRel=*/false, /*Size=*/3);
|
|
|
|
if (Value.SymbolName)
|
|
|
|
addRelocationForSymbol(GOTRE, Value.SymbolName);
|
|
|
|
else
|
|
|
|
addRelocationForSection(GOTRE, Value.SectionID);
|
2015-11-23 21:47:41 +00:00
|
|
|
Section.advanceStubOffset(getMaxStubSize());
|
2014-10-21 23:41:15 +00:00
|
|
|
Offset = static_cast<int64_t>(StubOffset);
|
2014-07-17 18:54:50 +00:00
|
|
|
}
|
2014-10-21 23:41:15 +00:00
|
|
|
RelocationEntry TargetRE(RE.SectionID, RE.Offset, RE.RelType, Offset,
|
2014-07-17 18:54:50 +00:00
|
|
|
RE.IsPCRel, RE.Size);
|
2014-10-21 23:41:15 +00:00
|
|
|
addRelocationForSection(TargetRE, RE.SectionID);
|
2014-07-17 18:54:50 +00:00
|
|
|
}
|
2016-01-21 21:59:50 +00:00
|
|
|
|
2016-05-18 05:31:24 +00:00
|
|
|
Expected<relocation_iterator>
|
2016-01-21 21:59:50 +00:00
|
|
|
processSubtractRelocation(unsigned SectionID, relocation_iterator RelI,
|
|
|
|
const ObjectFile &BaseObjT,
|
|
|
|
ObjSectionToIDMap &ObjSectionToID) {
|
|
|
|
const MachOObjectFile &Obj =
|
|
|
|
static_cast<const MachOObjectFile&>(BaseObjT);
|
|
|
|
MachO::any_relocation_info RE =
|
|
|
|
Obj.getRelocation(RelI->getRawDataRefImpl());
|
|
|
|
|
|
|
|
unsigned Size = Obj.getAnyRelocationLength(RE);
|
|
|
|
uint64_t Offset = RelI->getOffset();
|
|
|
|
uint8_t *LocalAddress = Sections[SectionID].getAddressWithOffset(Offset);
|
|
|
|
unsigned NumBytes = 1 << Size;
|
|
|
|
|
2016-04-20 21:24:34 +00:00
|
|
|
Expected<StringRef> SubtrahendNameOrErr = RelI->getSymbol()->getName();
|
2016-05-18 05:31:24 +00:00
|
|
|
if (!SubtrahendNameOrErr)
|
|
|
|
return SubtrahendNameOrErr.takeError();
|
2016-01-21 21:59:50 +00:00
|
|
|
auto SubtrahendI = GlobalSymbolTable.find(*SubtrahendNameOrErr);
|
|
|
|
unsigned SectionBID = SubtrahendI->second.getSectionID();
|
|
|
|
uint64_t SectionBOffset = SubtrahendI->second.getOffset();
|
|
|
|
int64_t Addend =
|
|
|
|
SignExtend64(readBytesUnaligned(LocalAddress, NumBytes), NumBytes * 8);
|
|
|
|
|
|
|
|
++RelI;
|
2016-04-20 21:24:34 +00:00
|
|
|
Expected<StringRef> MinuendNameOrErr = RelI->getSymbol()->getName();
|
2016-05-18 05:31:24 +00:00
|
|
|
if (!MinuendNameOrErr)
|
|
|
|
return MinuendNameOrErr.takeError();
|
2016-01-21 21:59:50 +00:00
|
|
|
auto MinuendI = GlobalSymbolTable.find(*MinuendNameOrErr);
|
|
|
|
unsigned SectionAID = MinuendI->second.getSectionID();
|
|
|
|
uint64_t SectionAOffset = MinuendI->second.getOffset();
|
|
|
|
|
|
|
|
RelocationEntry R(SectionID, Offset, MachO::ARM64_RELOC_SUBTRACTOR, (uint64_t)Addend,
|
|
|
|
SectionAID, SectionAOffset, SectionBID, SectionBOffset,
|
|
|
|
false, Size);
|
|
|
|
|
|
|
|
addRelocationForSection(R, SectionAID);
|
|
|
|
|
|
|
|
return ++RelI;
|
|
|
|
}
|
|
|
|
|
2014-07-17 18:54:50 +00:00
|
|
|
};
|
2015-06-23 09:49:53 +00:00
|
|
|
}
|
2014-07-17 18:54:50 +00:00
|
|
|
|
|
|
|
#undef DEBUG_TYPE
|
|
|
|
|
2014-08-13 16:26:38 +00:00
|
|
|
#endif
|