mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-02-01 05:01:59 +01:00
c5dcb602f8
Move store merge to happen after intrinsic lowering to allow lowered stores to be merged. Some regressions due in MergeConsecutiveStores to missing insert_subvector that are addressed in follow up patch. Reviewers: craig.topper, efriedma, RKSimon Subscribers: llvm-commits Differential Revision: https://reviews.llvm.org/D34559 llvm-svn: 310710
23 lines
769 B
LLVM
23 lines
769 B
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,-slow-unaligned-mem-32 | FileCheck %s --check-prefix=FAST
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+slow-unaligned-mem-32 | FileCheck %s --check-prefix=SLOW
|
|
|
|
define i256 @foo(<8 x i32> %a) {
|
|
; FAST-LABEL: foo:
|
|
; FAST: # BB#0:
|
|
; FAST-NEXT: vmovups %ymm0, (%rdi)
|
|
; FAST-NEXT: movq %rdi, %rax
|
|
; FAST-NEXT: vzeroupper
|
|
; FAST-NEXT: retq
|
|
;
|
|
; SLOW-LABEL: foo:
|
|
; SLOW: # BB#0:
|
|
; SLOW-NEXT: vextractf128 $1, %ymm0, 16(%rdi)
|
|
; SLOW-NEXT: vmovups %xmm0, (%rdi)
|
|
; SLOW-NEXT: movq %rdi, %rax
|
|
; SLOW-NEXT: vzeroupper
|
|
; SLOW-NEXT: retq
|
|
%r = bitcast <8 x i32> %a to i256
|
|
ret i256 %r
|
|
}
|