1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 11:13:28 +01:00
llvm-mirror/test/CodeGen/X86/vmovq.ll
Sanjay Patel 6f5f66da62 [x86] favor vector constant load to avoid GPR to XMM transfer
This build vector lowering pattern came up in D79886.
I've tried to limit the improvement to cases where it looks
clearly better to load, but we could remove the 'TODO'
predicates already if we are willing to overlook some
corner cases.

Differential Revision: https://reviews.llvm.org/D80013
2020-05-17 11:56:26 -04:00

23 lines
742 B
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.1 | FileCheck %s --check-prefix=SSE
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s --check-prefix=AVX
define <2 x i64> @PR25554(<2 x i64> %v0, <2 x i64> %v1) {
; SSE-LABEL: PR25554:
; SSE: # %bb.0:
; SSE-NEXT: por {{.*}}(%rip), %xmm0
; SSE-NEXT: paddq {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: PR25554:
; AVX: # %bb.0:
; AVX-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vpaddq {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%c1 = or <2 x i64> %v0, <i64 1, i64 0>
%c2 = add <2 x i64> %c1, <i64 0, i64 1>
ret <2 x i64> %c2
}