mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-01-31 12:41:49 +01:00
[X86][SSE] Added nontemporal load tests
These currently all lower to regular loads, generic nontemporal load support will be added in a future patch llvm-svn: 271659
This commit is contained in:
parent
d549af1cc6
commit
42c22dd5cc
776
test/CodeGen/X86/nontemporal-loads.ll
Normal file
776
test/CodeGen/X86/nontemporal-loads.ll
Normal file
@ -0,0 +1,776 @@
|
||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefix=VLX
|
||||
|
||||
; FIXME: Tests for nontemporal load support which was introduced in SSE41
|
||||
|
||||
define <4 x float> @test_v4f32(<4 x float>* %src) {
|
||||
; SSE-LABEL: test_v4f32:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movaps (%rdi), %xmm0
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: test_v4f32:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovaps (%rdi), %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; VLX-LABEL: test_v4f32:
|
||||
; VLX: # BB#0:
|
||||
; VLX-NEXT: vmovaps (%rdi), %xmm0
|
||||
; VLX-NEXT: retq
|
||||
%1 = load <4 x float>, <4 x float>* %src, align 16, !nontemporal !1
|
||||
ret <4 x float> %1
|
||||
}
|
||||
|
||||
define <4 x i32> @test_v4i32(<4 x i32>* %src) {
|
||||
; SSE-LABEL: test_v4i32:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movaps (%rdi), %xmm0
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: test_v4i32:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovaps (%rdi), %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; VLX-LABEL: test_v4i32:
|
||||
; VLX: # BB#0:
|
||||
; VLX-NEXT: vmovdqa32 (%rdi), %xmm0
|
||||
; VLX-NEXT: retq
|
||||
%1 = load <4 x i32>, <4 x i32>* %src, align 16, !nontemporal !1
|
||||
ret <4 x i32> %1
|
||||
}
|
||||
|
||||
define <2 x double> @test_v2f64(<2 x double>* %src) {
|
||||
; SSE-LABEL: test_v2f64:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movaps (%rdi), %xmm0
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: test_v2f64:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovaps (%rdi), %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; VLX-LABEL: test_v2f64:
|
||||
; VLX: # BB#0:
|
||||
; VLX-NEXT: vmovapd (%rdi), %xmm0
|
||||
; VLX-NEXT: retq
|
||||
%1 = load <2 x double>, <2 x double>* %src, align 16, !nontemporal !1
|
||||
ret <2 x double> %1
|
||||
}
|
||||
|
||||
define <2 x i64> @test_v2i64(<2 x i64>* %src) {
|
||||
; SSE-LABEL: test_v2i64:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movaps (%rdi), %xmm0
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: test_v2i64:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovaps (%rdi), %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; VLX-LABEL: test_v2i64:
|
||||
; VLX: # BB#0:
|
||||
; VLX-NEXT: vmovdqa64 (%rdi), %xmm0
|
||||
; VLX-NEXT: retq
|
||||
%1 = load <2 x i64>, <2 x i64>* %src, align 16, !nontemporal !1
|
||||
ret <2 x i64> %1
|
||||
}
|
||||
|
||||
define <8 x i16> @test_v8i16(<8 x i16>* %src) {
|
||||
; SSE-LABEL: test_v8i16:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movaps (%rdi), %xmm0
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: test_v8i16:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovaps (%rdi), %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; VLX-LABEL: test_v8i16:
|
||||
; VLX: # BB#0:
|
||||
; VLX-NEXT: vmovdqa64 (%rdi), %xmm0
|
||||
; VLX-NEXT: retq
|
||||
%1 = load <8 x i16>, <8 x i16>* %src, align 16, !nontemporal !1
|
||||
ret <8 x i16> %1
|
||||
}
|
||||
|
||||
define <16 x i8> @test_v16i8(<16 x i8>* %src) {
|
||||
; SSE-LABEL: test_v16i8:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movaps (%rdi), %xmm0
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: test_v16i8:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovaps (%rdi), %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; VLX-LABEL: test_v16i8:
|
||||
; VLX: # BB#0:
|
||||
; VLX-NEXT: vmovdqa64 (%rdi), %xmm0
|
||||
; VLX-NEXT: retq
|
||||
%1 = load <16 x i8>, <16 x i8>* %src, align 16, !nontemporal !1
|
||||
ret <16 x i8> %1
|
||||
}
|
||||
|
||||
; And now YMM versions.
|
||||
|
||||
define <8 x float> @test_v8f32(<8 x float>* %src) {
|
||||
; SSE-LABEL: test_v8f32:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movaps (%rdi), %xmm0
|
||||
; SSE-NEXT: movaps 16(%rdi), %xmm1
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: test_v8f32:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovaps (%rdi), %ymm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; VLX-LABEL: test_v8f32:
|
||||
; VLX: # BB#0:
|
||||
; VLX-NEXT: vmovaps (%rdi), %ymm0
|
||||
; VLX-NEXT: retq
|
||||
%1 = load <8 x float>, <8 x float>* %src, align 32, !nontemporal !1
|
||||
ret <8 x float> %1
|
||||
}
|
||||
|
||||
define <8 x i32> @test_v8i32(<8 x i32>* %src) {
|
||||
; SSE-LABEL: test_v8i32:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movaps (%rdi), %xmm0
|
||||
; SSE-NEXT: movaps 16(%rdi), %xmm1
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: test_v8i32:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovaps (%rdi), %ymm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; VLX-LABEL: test_v8i32:
|
||||
; VLX: # BB#0:
|
||||
; VLX-NEXT: vmovdqa32 (%rdi), %ymm0
|
||||
; VLX-NEXT: retq
|
||||
%1 = load <8 x i32>, <8 x i32>* %src, align 32, !nontemporal !1
|
||||
ret <8 x i32> %1
|
||||
}
|
||||
|
||||
define <4 x double> @test_v4f64(<4 x double>* %src) {
|
||||
; SSE-LABEL: test_v4f64:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movaps (%rdi), %xmm0
|
||||
; SSE-NEXT: movaps 16(%rdi), %xmm1
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: test_v4f64:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovaps (%rdi), %ymm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; VLX-LABEL: test_v4f64:
|
||||
; VLX: # BB#0:
|
||||
; VLX-NEXT: vmovapd (%rdi), %ymm0
|
||||
; VLX-NEXT: retq
|
||||
%1 = load <4 x double>, <4 x double>* %src, align 32, !nontemporal !1
|
||||
ret <4 x double> %1
|
||||
}
|
||||
|
||||
define <4 x i64> @test_v4i64(<4 x i64>* %src) {
|
||||
; SSE-LABEL: test_v4i64:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movaps (%rdi), %xmm0
|
||||
; SSE-NEXT: movaps 16(%rdi), %xmm1
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: test_v4i64:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovaps (%rdi), %ymm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; VLX-LABEL: test_v4i64:
|
||||
; VLX: # BB#0:
|
||||
; VLX-NEXT: vmovdqa64 (%rdi), %ymm0
|
||||
; VLX-NEXT: retq
|
||||
%1 = load <4 x i64>, <4 x i64>* %src, align 32, !nontemporal !1
|
||||
ret <4 x i64> %1
|
||||
}
|
||||
|
||||
define <16 x i16> @test_v16i16(<16 x i16>* %src) {
|
||||
; SSE-LABEL: test_v16i16:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movaps (%rdi), %xmm0
|
||||
; SSE-NEXT: movaps 16(%rdi), %xmm1
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: test_v16i16:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovaps (%rdi), %ymm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; VLX-LABEL: test_v16i16:
|
||||
; VLX: # BB#0:
|
||||
; VLX-NEXT: vmovdqa64 (%rdi), %ymm0
|
||||
; VLX-NEXT: retq
|
||||
%1 = load <16 x i16>, <16 x i16>* %src, align 32, !nontemporal !1
|
||||
ret <16 x i16> %1
|
||||
}
|
||||
|
||||
define <32 x i8> @test_v32i8(<32 x i8>* %src) {
|
||||
; SSE-LABEL: test_v32i8:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movaps (%rdi), %xmm0
|
||||
; SSE-NEXT: movaps 16(%rdi), %xmm1
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: test_v32i8:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovaps (%rdi), %ymm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; VLX-LABEL: test_v32i8:
|
||||
; VLX: # BB#0:
|
||||
; VLX-NEXT: vmovdqa64 (%rdi), %ymm0
|
||||
; VLX-NEXT: retq
|
||||
%1 = load <32 x i8>, <32 x i8>* %src, align 32, !nontemporal !1
|
||||
ret <32 x i8> %1
|
||||
}
|
||||
|
||||
|
||||
; Check cases where the load would be folded.
|
||||
|
||||
define <4 x float> @test_arg_v4f32(<4 x float> %arg, <4 x float>* %src) {
|
||||
; SSE-LABEL: test_arg_v4f32:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: addps (%rdi), %xmm0
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: test_arg_v4f32:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vaddps (%rdi), %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; VLX-LABEL: test_arg_v4f32:
|
||||
; VLX: # BB#0:
|
||||
; VLX-NEXT: vaddps (%rdi), %xmm0, %xmm0
|
||||
; VLX-NEXT: retq
|
||||
%1 = load <4 x float>, <4 x float>* %src, align 16, !nontemporal !1
|
||||
%2 = fadd <4 x float> %arg, %1
|
||||
ret <4 x float> %2
|
||||
}
|
||||
|
||||
define <4 x i32> @test_arg_v4i32(<4 x i32> %arg, <4 x i32>* %src) {
|
||||
; SSE-LABEL: test_arg_v4i32:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: paddd (%rdi), %xmm0
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: test_arg_v4i32:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vpaddd (%rdi), %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; VLX-LABEL: test_arg_v4i32:
|
||||
; VLX: # BB#0:
|
||||
; VLX-NEXT: vpaddd (%rdi), %xmm0, %xmm0
|
||||
; VLX-NEXT: retq
|
||||
%1 = load <4 x i32>, <4 x i32>* %src, align 16, !nontemporal !1
|
||||
%2 = add <4 x i32> %arg, %1
|
||||
ret <4 x i32> %2
|
||||
}
|
||||
|
||||
define <2 x double> @test_arg_v2f64(<2 x double> %arg, <2 x double>* %src) {
|
||||
; SSE-LABEL: test_arg_v2f64:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: addpd (%rdi), %xmm0
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: test_arg_v2f64:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vaddpd (%rdi), %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; VLX-LABEL: test_arg_v2f64:
|
||||
; VLX: # BB#0:
|
||||
; VLX-NEXT: vaddpd (%rdi), %xmm0, %xmm0
|
||||
; VLX-NEXT: retq
|
||||
%1 = load <2 x double>, <2 x double>* %src, align 16, !nontemporal !1
|
||||
%2 = fadd <2 x double> %arg, %1
|
||||
ret <2 x double> %2
|
||||
}
|
||||
|
||||
define <2 x i64> @test_arg_v2i64(<2 x i64> %arg, <2 x i64>* %src) {
|
||||
; SSE-LABEL: test_arg_v2i64:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: paddq (%rdi), %xmm0
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: test_arg_v2i64:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vpaddq (%rdi), %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; VLX-LABEL: test_arg_v2i64:
|
||||
; VLX: # BB#0:
|
||||
; VLX-NEXT: vpaddq (%rdi), %xmm0, %xmm0
|
||||
; VLX-NEXT: retq
|
||||
%1 = load <2 x i64>, <2 x i64>* %src, align 16, !nontemporal !1
|
||||
%2 = add <2 x i64> %arg, %1
|
||||
ret <2 x i64> %2
|
||||
}
|
||||
|
||||
define <8 x i16> @test_arg_v8i16(<8 x i16> %arg, <8 x i16>* %src) {
|
||||
; SSE-LABEL: test_arg_v8i16:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: paddw (%rdi), %xmm0
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: test_arg_v8i16:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vpaddw (%rdi), %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; VLX-LABEL: test_arg_v8i16:
|
||||
; VLX: # BB#0:
|
||||
; VLX-NEXT: vpaddw (%rdi), %xmm0, %xmm0
|
||||
; VLX-NEXT: retq
|
||||
%1 = load <8 x i16>, <8 x i16>* %src, align 16, !nontemporal !1
|
||||
%2 = add <8 x i16> %arg, %1
|
||||
ret <8 x i16> %2
|
||||
}
|
||||
|
||||
define <16 x i8> @test_arg_v16i8(<16 x i8> %arg, <16 x i8>* %src) {
|
||||
; SSE-LABEL: test_arg_v16i8:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: paddb (%rdi), %xmm0
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: test_arg_v16i8:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vpaddb (%rdi), %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; VLX-LABEL: test_arg_v16i8:
|
||||
; VLX: # BB#0:
|
||||
; VLX-NEXT: vpaddb (%rdi), %xmm0, %xmm0
|
||||
; VLX-NEXT: retq
|
||||
%1 = load <16 x i8>, <16 x i8>* %src, align 16, !nontemporal !1
|
||||
%2 = add <16 x i8> %arg, %1
|
||||
ret <16 x i8> %2
|
||||
}
|
||||
|
||||
; And now YMM versions.
|
||||
|
||||
define <8 x float> @test_arg_v8f32(<8 x float> %arg, <8 x float>* %src) {
|
||||
; SSE-LABEL: test_arg_v8f32:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: addps (%rdi), %xmm0
|
||||
; SSE-NEXT: addps 16(%rdi), %xmm1
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: test_arg_v8f32:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vaddps (%rdi), %ymm0, %ymm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; VLX-LABEL: test_arg_v8f32:
|
||||
; VLX: # BB#0:
|
||||
; VLX-NEXT: vaddps (%rdi), %ymm0, %ymm0
|
||||
; VLX-NEXT: retq
|
||||
%1 = load <8 x float>, <8 x float>* %src, align 32, !nontemporal !1
|
||||
%2 = fadd <8 x float> %arg, %1
|
||||
ret <8 x float> %2
|
||||
}
|
||||
|
||||
define <8 x i32> @test_arg_v8i32(<8 x i32> %arg, <8 x i32>* %src) {
|
||||
; SSE-LABEL: test_arg_v8i32:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: paddd (%rdi), %xmm0
|
||||
; SSE-NEXT: paddd 16(%rdi), %xmm1
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX1-LABEL: test_arg_v8i32:
|
||||
; AVX1: # BB#0:
|
||||
; AVX1-NEXT: vmovaps (%rdi), %ymm1
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
|
||||
; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: test_arg_v8i32:
|
||||
; AVX2: # BB#0:
|
||||
; AVX2-NEXT: vpaddd (%rdi), %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; VLX-LABEL: test_arg_v8i32:
|
||||
; VLX: # BB#0:
|
||||
; VLX-NEXT: vpaddd (%rdi), %ymm0, %ymm0
|
||||
; VLX-NEXT: retq
|
||||
%1 = load <8 x i32>, <8 x i32>* %src, align 32, !nontemporal !1
|
||||
%2 = add <8 x i32> %arg, %1
|
||||
ret <8 x i32> %2
|
||||
}
|
||||
|
||||
define <4 x double> @test_arg_v4f64(<4 x double> %arg, <4 x double>* %src) {
|
||||
; SSE-LABEL: test_arg_v4f64:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: addpd (%rdi), %xmm0
|
||||
; SSE-NEXT: addpd 16(%rdi), %xmm1
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: test_arg_v4f64:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vaddpd (%rdi), %ymm0, %ymm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; VLX-LABEL: test_arg_v4f64:
|
||||
; VLX: # BB#0:
|
||||
; VLX-NEXT: vaddpd (%rdi), %ymm0, %ymm0
|
||||
; VLX-NEXT: retq
|
||||
%1 = load <4 x double>, <4 x double>* %src, align 32, !nontemporal !1
|
||||
%2 = fadd <4 x double> %arg, %1
|
||||
ret <4 x double> %2
|
||||
}
|
||||
|
||||
define <4 x i64> @test_arg_v4i64(<4 x i64> %arg, <4 x i64>* %src) {
|
||||
; SSE-LABEL: test_arg_v4i64:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: paddq (%rdi), %xmm0
|
||||
; SSE-NEXT: paddq 16(%rdi), %xmm1
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX1-LABEL: test_arg_v4i64:
|
||||
; AVX1: # BB#0:
|
||||
; AVX1-NEXT: vmovaps (%rdi), %ymm1
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
|
||||
; AVX1-NEXT: vpaddq %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: test_arg_v4i64:
|
||||
; AVX2: # BB#0:
|
||||
; AVX2-NEXT: vpaddq (%rdi), %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; VLX-LABEL: test_arg_v4i64:
|
||||
; VLX: # BB#0:
|
||||
; VLX-NEXT: vpaddq (%rdi), %ymm0, %ymm0
|
||||
; VLX-NEXT: retq
|
||||
%1 = load <4 x i64>, <4 x i64>* %src, align 32, !nontemporal !1
|
||||
%2 = add <4 x i64> %arg, %1
|
||||
ret <4 x i64> %2
|
||||
}
|
||||
|
||||
define <16 x i16> @test_arg_v16i16(<16 x i16> %arg, <16 x i16>* %src) {
|
||||
; SSE-LABEL: test_arg_v16i16:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: paddw (%rdi), %xmm0
|
||||
; SSE-NEXT: paddw 16(%rdi), %xmm1
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX1-LABEL: test_arg_v16i16:
|
||||
; AVX1: # BB#0:
|
||||
; AVX1-NEXT: vmovaps (%rdi), %ymm1
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
|
||||
; AVX1-NEXT: vpaddw %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: test_arg_v16i16:
|
||||
; AVX2: # BB#0:
|
||||
; AVX2-NEXT: vpaddw (%rdi), %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; VLX-LABEL: test_arg_v16i16:
|
||||
; VLX: # BB#0:
|
||||
; VLX-NEXT: vpaddw (%rdi), %ymm0, %ymm0
|
||||
; VLX-NEXT: retq
|
||||
%1 = load <16 x i16>, <16 x i16>* %src, align 32, !nontemporal !1
|
||||
%2 = add <16 x i16> %arg, %1
|
||||
ret <16 x i16> %2
|
||||
}
|
||||
|
||||
define <32 x i8> @test_arg_v32i8(<32 x i8> %arg, <32 x i8>* %src) {
|
||||
; SSE-LABEL: test_arg_v32i8:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: paddb (%rdi), %xmm0
|
||||
; SSE-NEXT: paddb 16(%rdi), %xmm1
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX1-LABEL: test_arg_v32i8:
|
||||
; AVX1: # BB#0:
|
||||
; AVX1-NEXT: vmovaps (%rdi), %ymm1
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
|
||||
; AVX1-NEXT: vpaddb %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: test_arg_v32i8:
|
||||
; AVX2: # BB#0:
|
||||
; AVX2-NEXT: vpaddb (%rdi), %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; VLX-LABEL: test_arg_v32i8:
|
||||
; VLX: # BB#0:
|
||||
; VLX-NEXT: vpaddb (%rdi), %ymm0, %ymm0
|
||||
; VLX-NEXT: retq
|
||||
%1 = load <32 x i8>, <32 x i8>* %src, align 32, !nontemporal !1
|
||||
%2 = add <32 x i8> %arg, %1
|
||||
ret <32 x i8> %2
|
||||
}
|
||||
|
||||
|
||||
; Unaligned non-temporal loads (not supported)
|
||||
|
||||
define <4 x float> @test_unaligned_v4f32(<4 x float>* %src) {
|
||||
; SSE-LABEL: test_unaligned_v4f32:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movups (%rdi), %xmm0
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: test_unaligned_v4f32:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovups (%rdi), %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; VLX-LABEL: test_unaligned_v4f32:
|
||||
; VLX: # BB#0:
|
||||
; VLX-NEXT: vmovups (%rdi), %xmm0
|
||||
; VLX-NEXT: retq
|
||||
%1 = load <4 x float>, <4 x float>* %src, align 1, !nontemporal !1
|
||||
ret <4 x float> %1
|
||||
}
|
||||
|
||||
define <4 x i32> @test_unaligned_v4i32(<4 x i32>* %src) {
|
||||
; SSE-LABEL: test_unaligned_v4i32:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movups (%rdi), %xmm0
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: test_unaligned_v4i32:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovups (%rdi), %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; VLX-LABEL: test_unaligned_v4i32:
|
||||
; VLX: # BB#0:
|
||||
; VLX-NEXT: vmovdqu32 (%rdi), %xmm0
|
||||
; VLX-NEXT: retq
|
||||
%1 = load <4 x i32>, <4 x i32>* %src, align 1, !nontemporal !1
|
||||
ret <4 x i32> %1
|
||||
}
|
||||
|
||||
define <2 x double> @test_unaligned_v2f64(<2 x double>* %src) {
|
||||
; SSE-LABEL: test_unaligned_v2f64:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movups (%rdi), %xmm0
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: test_unaligned_v2f64:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovups (%rdi), %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; VLX-LABEL: test_unaligned_v2f64:
|
||||
; VLX: # BB#0:
|
||||
; VLX-NEXT: vmovupd (%rdi), %xmm0
|
||||
; VLX-NEXT: retq
|
||||
%1 = load <2 x double>, <2 x double>* %src, align 1, !nontemporal !1
|
||||
ret <2 x double> %1
|
||||
}
|
||||
|
||||
define <2 x i64> @test_unaligned_v2i64(<2 x i64>* %src) {
|
||||
; SSE-LABEL: test_unaligned_v2i64:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movups (%rdi), %xmm0
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: test_unaligned_v2i64:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovups (%rdi), %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; VLX-LABEL: test_unaligned_v2i64:
|
||||
; VLX: # BB#0:
|
||||
; VLX-NEXT: vmovdqu64 (%rdi), %xmm0
|
||||
; VLX-NEXT: retq
|
||||
%1 = load <2 x i64>, <2 x i64>* %src, align 1, !nontemporal !1
|
||||
ret <2 x i64> %1
|
||||
}
|
||||
|
||||
define <8 x i16> @test_unaligned_v8i16(<8 x i16>* %src) {
|
||||
; SSE-LABEL: test_unaligned_v8i16:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movups (%rdi), %xmm0
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: test_unaligned_v8i16:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovups (%rdi), %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; VLX-LABEL: test_unaligned_v8i16:
|
||||
; VLX: # BB#0:
|
||||
; VLX-NEXT: vmovdqu64 (%rdi), %xmm0
|
||||
; VLX-NEXT: retq
|
||||
%1 = load <8 x i16>, <8 x i16>* %src, align 1, !nontemporal !1
|
||||
ret <8 x i16> %1
|
||||
}
|
||||
|
||||
define <16 x i8> @test_unaligned_v16i8(<16 x i8>* %src) {
|
||||
; SSE-LABEL: test_unaligned_v16i8:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movups (%rdi), %xmm0
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: test_unaligned_v16i8:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovups (%rdi), %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; VLX-LABEL: test_unaligned_v16i8:
|
||||
; VLX: # BB#0:
|
||||
; VLX-NEXT: vmovdqu64 (%rdi), %xmm0
|
||||
; VLX-NEXT: retq
|
||||
%1 = load <16 x i8>, <16 x i8>* %src, align 1, !nontemporal !1
|
||||
ret <16 x i8> %1
|
||||
}
|
||||
|
||||
; And now YMM versions.
|
||||
|
||||
define <8 x float> @test_unaligned_v8f32(<8 x float>* %src) {
|
||||
; SSE-LABEL: test_unaligned_v8f32:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movups (%rdi), %xmm0
|
||||
; SSE-NEXT: movups 16(%rdi), %xmm1
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: test_unaligned_v8f32:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovups (%rdi), %ymm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; VLX-LABEL: test_unaligned_v8f32:
|
||||
; VLX: # BB#0:
|
||||
; VLX-NEXT: vmovups (%rdi), %ymm0
|
||||
; VLX-NEXT: retq
|
||||
%1 = load <8 x float>, <8 x float>* %src, align 1, !nontemporal !1
|
||||
ret <8 x float> %1
|
||||
}
|
||||
|
||||
define <8 x i32> @test_unaligned_v8i32(<8 x i32>* %src) {
|
||||
; SSE-LABEL: test_unaligned_v8i32:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movups (%rdi), %xmm0
|
||||
; SSE-NEXT: movups 16(%rdi), %xmm1
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: test_unaligned_v8i32:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovups (%rdi), %ymm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; VLX-LABEL: test_unaligned_v8i32:
|
||||
; VLX: # BB#0:
|
||||
; VLX-NEXT: vmovdqu32 (%rdi), %ymm0
|
||||
; VLX-NEXT: retq
|
||||
%1 = load <8 x i32>, <8 x i32>* %src, align 1, !nontemporal !1
|
||||
ret <8 x i32> %1
|
||||
}
|
||||
|
||||
define <4 x double> @test_unaligned_v4f64(<4 x double>* %src) {
|
||||
; SSE-LABEL: test_unaligned_v4f64:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movups (%rdi), %xmm0
|
||||
; SSE-NEXT: movups 16(%rdi), %xmm1
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: test_unaligned_v4f64:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovups (%rdi), %ymm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; VLX-LABEL: test_unaligned_v4f64:
|
||||
; VLX: # BB#0:
|
||||
; VLX-NEXT: vmovupd (%rdi), %ymm0
|
||||
; VLX-NEXT: retq
|
||||
%1 = load <4 x double>, <4 x double>* %src, align 1, !nontemporal !1
|
||||
ret <4 x double> %1
|
||||
}
|
||||
|
||||
define <4 x i64> @test_unaligned_v4i64(<4 x i64>* %src) {
|
||||
; SSE-LABEL: test_unaligned_v4i64:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movups (%rdi), %xmm0
|
||||
; SSE-NEXT: movups 16(%rdi), %xmm1
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: test_unaligned_v4i64:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovups (%rdi), %ymm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; VLX-LABEL: test_unaligned_v4i64:
|
||||
; VLX: # BB#0:
|
||||
; VLX-NEXT: vmovdqu64 (%rdi), %ymm0
|
||||
; VLX-NEXT: retq
|
||||
%1 = load <4 x i64>, <4 x i64>* %src, align 1, !nontemporal !1
|
||||
ret <4 x i64> %1
|
||||
}
|
||||
|
||||
define <16 x i16> @test_unaligned_v16i16(<16 x i16>* %src) {
|
||||
; SSE-LABEL: test_unaligned_v16i16:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movups (%rdi), %xmm0
|
||||
; SSE-NEXT: movups 16(%rdi), %xmm1
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: test_unaligned_v16i16:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovups (%rdi), %ymm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; VLX-LABEL: test_unaligned_v16i16:
|
||||
; VLX: # BB#0:
|
||||
; VLX-NEXT: vmovdqu64 (%rdi), %ymm0
|
||||
; VLX-NEXT: retq
|
||||
%1 = load <16 x i16>, <16 x i16>* %src, align 1, !nontemporal !1
|
||||
ret <16 x i16> %1
|
||||
}
|
||||
|
||||
define <32 x i8> @test_unaligned_v32i8(<32 x i8>* %src) {
|
||||
; SSE-LABEL: test_unaligned_v32i8:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movups (%rdi), %xmm0
|
||||
; SSE-NEXT: movups 16(%rdi), %xmm1
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: test_unaligned_v32i8:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovups (%rdi), %ymm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; VLX-LABEL: test_unaligned_v32i8:
|
||||
; VLX: # BB#0:
|
||||
; VLX-NEXT: vmovdqu64 (%rdi), %ymm0
|
||||
; VLX-NEXT: retq
|
||||
%1 = load <32 x i8>, <32 x i8>* %src, align 1, !nontemporal !1
|
||||
ret <32 x i8> %1
|
||||
}
|
||||
|
||||
!1 = !{i32 1}
|
Loading…
x
Reference in New Issue
Block a user