1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-22 20:43:44 +02:00
llvm-mirror/test/CodeGen/X86/partial-fold64.ll
Michael Kuperstein fb1214dfc3 [X86] Allow folding of stack reloads when loading a subreg of the spilled reg
We did not support subregs in InlineSpiller:foldMemoryOperand() because targets
may not deal with them correctly.

This adds a target hook to let the spiller know that a target can handle
subregs, and actually enables it for x86 for the case of stack slot reloads.
This fixes PR30832.

Differential Revision: https://reviews.llvm.org/D26521

llvm-svn: 287792
2016-11-23 18:33:49 +00:00

42 lines
1.8 KiB
LLVM

; RUN: llc -mtriple=x86_64-unknown-linux-gnu -enable-misched=false < %s | FileCheck %s
define i32 @fold64to32(i64 %add, i32 %spill) {
; CHECK-LABEL: fold64to32:
; CHECK: movq %rdi, -{{[0-9]+}}(%rsp) # 8-byte Spill
; CHECK: subl -{{[0-9]+}}(%rsp), %esi # 4-byte Folded Reload
entry:
tail call void asm sideeffect "", "~{rax},~{rbx},~{rcx},~{rdx},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15},~{dirflag},~{fpsr},~{flags}"()
%trunc = trunc i64 %add to i32
%sub = sub i32 %spill, %trunc
ret i32 %sub
}
define i8 @fold64to8(i64 %add, i8 %spill) {
; CHECK-LABEL: fold64to8:
; CHECK: movq %rdi, -{{[0-9]+}}(%rsp) # 8-byte Spill
; CHECK: subb -{{[0-9]+}}(%rsp), %sil # 1-byte Folded Reload
entry:
tail call void asm sideeffect "", "~{rax},~{rbx},~{rcx},~{rdx},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15},~{dirflag},~{fpsr},~{flags}"()
%trunc = trunc i64 %add to i8
%sub = sub i8 %spill, %trunc
ret i8 %sub
}
; Do not fold a 4-byte store into a 8-byte spill slot
; CHECK-LABEL: nofold
; CHECK: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill
; CHECK: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
; CHECK: subl %edi, %eax
; CHECK: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
; CHECK: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
define i32 @nofold(i64 %add, i64 %spill) {
entry:
tail call void asm sideeffect "", "~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15},~{dirflag},~{fpsr},~{flags}"()
%trunc = trunc i64 %add to i32
%truncspill = trunc i64 %spill to i32
%sub = sub i32 %truncspill, %trunc
tail call void asm sideeffect "", "~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15},~{dirflag},~{fpsr},~{flags}"()
ret i32 %sub
}