1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-19 11:02:59 +02:00

[X86] Avoid generating invalid R_X86_64_GOTPCRELX relocations

We need to make sure not to emit R_X86_64_GOTPCRELX relocations for
instructions that use a REX prefix. If a REX prefix is present, we need to
instead use a R_X86_64_REX_GOTPCRELX relocation. The existing logic for
CALL64m, JMP64m, etc. already handles this by checking the HasREX parameter
and using it to determine which relocation type to use. Do this for all
instructions that can use relaxed relocations.

Reviewed By: MaskRay

Differential Revision: https://reviews.llvm.org/D93561
This commit is contained in:
Harald van Dijk 2020-12-18 23:38:38 +00:00
parent 39e945b403
commit 87ab7fb0ff
3 changed files with 71 additions and 43 deletions

View File

@ -409,6 +409,12 @@ void X86MCCodeEmitter::emitMemModRMByte(const MCInst &MI, unsigned Op,
switch (Opcode) {
default:
return X86::reloc_riprel_4byte;
case X86::MOV64rm:
// movq loads is a subset of reloc_riprel_4byte_relax_rex. It is a
// special case because COFF and Mach-O don't support ELF's more
// flexible R_X86_64_REX_GOTPCRELX relaxation.
assert(HasREX);
return X86::reloc_riprel_4byte_movq_load;
case X86::ADC32rm:
case X86::ADD32rm:
case X86::AND32rm:
@ -419,13 +425,6 @@ void X86MCCodeEmitter::emitMemModRMByte(const MCInst &MI, unsigned Op,
case X86::SUB32rm:
case X86::TEST32mr:
case X86::XOR32rm:
return X86::reloc_riprel_4byte_relax;
case X86::MOV64rm:
// movq loads is a subset of reloc_riprel_4byte_relax_rex. It is a
// special case because COFF and Mach-O don't support ELF's more
// flexible R_X86_64_REX_GOTPCRELX relaxation.
assert(HasREX);
return X86::reloc_riprel_4byte_movq_load;
case X86::CALL64m:
case X86::JMP64m:
case X86::TAILJMPm64:

View File

@ -1,36 +0,0 @@
// RUN: llvm-mc -filetype=obj -triple x86_64-pc-linux %s -o - | llvm-readobj -r - | FileCheck %s
// these should produce R_X86_64_REX_GOTPCRELX
movq mov@GOTPCREL(%rip), %rax
test %rax, test@GOTPCREL(%rip)
adc adc@GOTPCREL(%rip), %rax
add add@GOTPCREL(%rip), %rax
and and@GOTPCREL(%rip), %rax
cmp cmp@GOTPCREL(%rip), %rax
or or@GOTPCREL(%rip), %rax
sbb sbb@GOTPCREL(%rip), %rax
sub sub@GOTPCREL(%rip), %rax
xor xor@GOTPCREL(%rip), %rax
.section .norelax,"ax"
## This expression loads the GOT entry with an offset.
## Don't emit R_X86_64_REX_GOTPCRELX.
movq mov@GOTPCREL+1(%rip), %rax
// CHECK: Relocations [
// CHECK-NEXT: Section ({{.*}}) .rela.text {
// CHECK-NEXT: R_X86_64_REX_GOTPCRELX mov
// CHECK-NEXT: R_X86_64_REX_GOTPCRELX test
// CHECK-NEXT: R_X86_64_REX_GOTPCRELX adc
// CHECK-NEXT: R_X86_64_REX_GOTPCRELX add
// CHECK-NEXT: R_X86_64_REX_GOTPCRELX and
// CHECK-NEXT: R_X86_64_REX_GOTPCRELX cmp
// CHECK-NEXT: R_X86_64_REX_GOTPCRELX or
// CHECK-NEXT: R_X86_64_REX_GOTPCRELX sbb
// CHECK-NEXT: R_X86_64_REX_GOTPCRELX sub
// CHECK-NEXT: R_X86_64_REX_GOTPCRELX xor
// CHECK-NEXT: }
// CHECK-NEXT: Section ({{.*}}) .rela.norelax {
// CHECK-NEXT: R_X86_64_GOTPCREL mov
// CHECK-NEXT: }

View File

@ -17,6 +17,26 @@
# CHECK-NEXT: R_X86_64_GOTPCRELX xor
# CHECK-NEXT: R_X86_64_GOTPCRELX call
# CHECK-NEXT: R_X86_64_GOTPCRELX jmp
# CHECK-NEXT: R_X86_64_REX_GOTPCRELX mov
# CHECK-NEXT: R_X86_64_REX_GOTPCRELX test
# CHECK-NEXT: R_X86_64_REX_GOTPCRELX adc
# CHECK-NEXT: R_X86_64_REX_GOTPCRELX add
# CHECK-NEXT: R_X86_64_REX_GOTPCRELX and
# CHECK-NEXT: R_X86_64_REX_GOTPCRELX cmp
# CHECK-NEXT: R_X86_64_REX_GOTPCRELX or
# CHECK-NEXT: R_X86_64_REX_GOTPCRELX sbb
# CHECK-NEXT: R_X86_64_REX_GOTPCRELX sub
# CHECK-NEXT: R_X86_64_REX_GOTPCRELX xor
# CHECK-NEXT: R_X86_64_REX_GOTPCRELX mov
# CHECK-NEXT: R_X86_64_REX_GOTPCRELX test
# CHECK-NEXT: R_X86_64_REX_GOTPCRELX adc
# CHECK-NEXT: R_X86_64_REX_GOTPCRELX add
# CHECK-NEXT: R_X86_64_REX_GOTPCRELX and
# CHECK-NEXT: R_X86_64_REX_GOTPCRELX cmp
# CHECK-NEXT: R_X86_64_REX_GOTPCRELX or
# CHECK-NEXT: R_X86_64_REX_GOTPCRELX sbb
# CHECK-NEXT: R_X86_64_REX_GOTPCRELX sub
# CHECK-NEXT: R_X86_64_REX_GOTPCRELX xor
# CHECK-NEXT: }
# NORELAX-NEXT: R_X86_64_GOTPCREL mov
@ -31,6 +51,26 @@
# NORELAX-NEXT: R_X86_64_GOTPCREL xor
# NORELAX-NEXT: R_X86_64_GOTPCREL call
# NORELAX-NEXT: R_X86_64_GOTPCREL jmp
# NORELAX-NEXT: R_X86_64_GOTPCREL mov
# NORELAX-NEXT: R_X86_64_GOTPCREL test
# NORELAX-NEXT: R_X86_64_GOTPCREL adc
# NORELAX-NEXT: R_X86_64_GOTPCREL add
# NORELAX-NEXT: R_X86_64_GOTPCREL and
# NORELAX-NEXT: R_X86_64_GOTPCREL cmp
# NORELAX-NEXT: R_X86_64_GOTPCREL or
# NORELAX-NEXT: R_X86_64_GOTPCREL sbb
# NORELAX-NEXT: R_X86_64_GOTPCREL sub
# NORELAX-NEXT: R_X86_64_GOTPCREL xor
# NORELAX-NEXT: R_X86_64_GOTPCREL mov
# NORELAX-NEXT: R_X86_64_GOTPCREL test
# NORELAX-NEXT: R_X86_64_GOTPCREL adc
# NORELAX-NEXT: R_X86_64_GOTPCREL add
# NORELAX-NEXT: R_X86_64_GOTPCREL and
# NORELAX-NEXT: R_X86_64_GOTPCREL cmp
# NORELAX-NEXT: R_X86_64_GOTPCREL or
# NORELAX-NEXT: R_X86_64_GOTPCREL sbb
# NORELAX-NEXT: R_X86_64_GOTPCREL sub
# NORELAX-NEXT: R_X86_64_GOTPCREL xor
# NORELAX-NEXT: }
movl mov@GOTPCREL(%rip), %eax
@ -46,8 +86,31 @@ xor xor@GOTPCREL(%rip), %eax
call *call@GOTPCREL(%rip)
jmp *jmp@GOTPCREL(%rip)
movl mov@GOTPCREL(%rip), %r8d
test %r8d, test@GOTPCREL(%rip)
adc adc@GOTPCREL(%rip), %r8d
add add@GOTPCREL(%rip), %r8d
and and@GOTPCREL(%rip), %r8d
cmp cmp@GOTPCREL(%rip), %r8d
or or@GOTPCREL(%rip), %r8d
sbb sbb@GOTPCREL(%rip), %r8d
sub sub@GOTPCREL(%rip), %r8d
xor xor@GOTPCREL(%rip), %r8d
movq mov@GOTPCREL(%rip), %rax
test %rax, test@GOTPCREL(%rip)
adc adc@GOTPCREL(%rip), %rax
add add@GOTPCREL(%rip), %rax
and and@GOTPCREL(%rip), %rax
cmp cmp@GOTPCREL(%rip), %rax
or or@GOTPCREL(%rip), %rax
sbb sbb@GOTPCREL(%rip), %rax
sub sub@GOTPCREL(%rip), %rax
xor xor@GOTPCREL(%rip), %rax
# COMMON-NEXT: Section ({{.*}}) .rela.norelax {
# COMMON-NEXT: R_X86_64_GOTPCREL mov 0x0
# COMMON-NEXT: R_X86_64_GOTPCREL mov 0xFFFFFFFFFFFFFFFD
# COMMON-NEXT: R_X86_64_GOTPCREL mov 0xFFFFFFFFFFFFFFFC
# COMMON-NEXT: }
# COMMON-NEXT: ]
@ -56,5 +119,7 @@ jmp *jmp@GOTPCREL(%rip)
## Clang may emit this expression to load the high 32-bit of the GOT entry.
## Don't emit R_X86_64_GOTPCRELX.
movl mov@GOTPCREL+4(%rip), %eax
## Don't emit R_X86_64_GOTPCRELX.
movq mov@GOTPCREL+1(%rip), %rax
## We could emit R_X86_64_GOTPCRELX, but it is probably unnecessary.
movl mov@GOTPCREL+0(%rip), %eax