mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-10-23 21:13:02 +02:00
a42dec166e
The x86_64 ABI requires that the stack is 16 byte aligned on function calls. Thus, the 8-byte error code, which is pushed by the CPU for certain exceptions, leads to a misaligned stack. This results in bugs such as Bug 26413, where misaligned movaps instructions are generated. This commit fixes the misalignment by adjusting the stack pointer in these cases. The adjustment is done at the beginning of the prologue generation by subtracting another 8 bytes from the stack pointer. These additional bytes are popped again in the function epilogue. Fixes Bug 26413 Patch by Philipp Oppermann. Differential Revision: https://reviews.llvm.org/D30049 llvm-svn: 299383
21 lines
819 B
LLVM
21 lines
819 B
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=-sse < %s | FileCheck %s
|
|
|
|
%struct.interrupt_frame = type { i64, i64, i64, i64, i64 }
|
|
|
|
@llvm.used = appending global [1 x i8*] [i8* bitcast (void (%struct.interrupt_frame*, i64)* @test_isr_sse_clobbers to i8*)], section "llvm.metadata"
|
|
|
|
; Clobbered SSE must not be saved when the target doesn't support SSE
|
|
define x86_intrcc void @test_isr_sse_clobbers(%struct.interrupt_frame* %frame, i64 %ecode) {
|
|
; CHECK-LABEL: test_isr_sse_clobbers:
|
|
; CHECK: # BB#0:
|
|
; CHECK-NEXT: pushq %rax
|
|
; CHECK-NEXT: cld
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: addq $16, %rsp
|
|
; CHECK-NEXT: iretq
|
|
call void asm sideeffect "", "~{xmm0},~{xmm6}"()
|
|
ret void
|
|
}
|