mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-23 19:23:23 +01:00
0986b8f2e8
This moves v32i16/v64i8 to a model consistent with how we treat integer types with avx1. This does change the ABI for types vXi16/vXi8 vectors larger than 512 bits to pass in multiple zmms instead of multiple ymms. We'd already hacked some code to make v64i8/v32i16 pass in zmm. Cost model is still a bit of a mess. In some place I tried to match existing behavior. But really we need to account for splitting and concating costs. Cost model for shuffles is especially pessimistic. Differential Revision: https://reviews.llvm.org/D76212
1888 lines
55 KiB
LLVM
1888 lines
55 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512F
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512BW
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512VL
|
|
|
|
define <4 x float> @test_v4f32(<4 x float>* %src) {
|
|
; SSE2-LABEL: test_v4f32:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movaps (%rdi), %xmm0
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v4f32:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm0
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_v4f32:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vmovntdqa (%rdi), %xmm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v4f32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %xmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <4 x float>, <4 x float>* %src, align 16, !nontemporal !1
|
|
ret <4 x float> %1
|
|
}
|
|
|
|
define <4 x i32> @test_v4i32(<4 x i32>* %src) {
|
|
; SSE2-LABEL: test_v4i32:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movaps (%rdi), %xmm0
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v4i32:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm0
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_v4i32:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vmovntdqa (%rdi), %xmm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v4i32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %xmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <4 x i32>, <4 x i32>* %src, align 16, !nontemporal !1
|
|
ret <4 x i32> %1
|
|
}
|
|
|
|
define <2 x double> @test_v2f64(<2 x double>* %src) {
|
|
; SSE2-LABEL: test_v2f64:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movaps (%rdi), %xmm0
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v2f64:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm0
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_v2f64:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vmovntdqa (%rdi), %xmm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v2f64:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %xmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <2 x double>, <2 x double>* %src, align 16, !nontemporal !1
|
|
ret <2 x double> %1
|
|
}
|
|
|
|
define <2 x i64> @test_v2i64(<2 x i64>* %src) {
|
|
; SSE2-LABEL: test_v2i64:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movaps (%rdi), %xmm0
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v2i64:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm0
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_v2i64:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vmovntdqa (%rdi), %xmm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v2i64:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %xmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <2 x i64>, <2 x i64>* %src, align 16, !nontemporal !1
|
|
ret <2 x i64> %1
|
|
}
|
|
|
|
define <8 x i16> @test_v8i16(<8 x i16>* %src) {
|
|
; SSE2-LABEL: test_v8i16:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movaps (%rdi), %xmm0
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v8i16:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm0
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_v8i16:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vmovntdqa (%rdi), %xmm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v8i16:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %xmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <8 x i16>, <8 x i16>* %src, align 16, !nontemporal !1
|
|
ret <8 x i16> %1
|
|
}
|
|
|
|
define <16 x i8> @test_v16i8(<16 x i8>* %src) {
|
|
; SSE2-LABEL: test_v16i8:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movaps (%rdi), %xmm0
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v16i8:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm0
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_v16i8:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vmovntdqa (%rdi), %xmm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v16i8:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %xmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <16 x i8>, <16 x i8>* %src, align 16, !nontemporal !1
|
|
ret <16 x i8> %1
|
|
}
|
|
|
|
; And now YMM versions.
|
|
|
|
define <8 x float> @test_v8f32(<8 x float>* %src) {
|
|
; SSE2-LABEL: test_v8f32:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movaps (%rdi), %xmm0
|
|
; SSE2-NEXT: movaps 16(%rdi), %xmm1
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v8f32:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm0
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v8f32:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v8f32:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v8f32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %ymm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <8 x float>, <8 x float>* %src, align 32, !nontemporal !1
|
|
ret <8 x float> %1
|
|
}
|
|
|
|
define <8 x i32> @test_v8i32(<8 x i32>* %src) {
|
|
; SSE2-LABEL: test_v8i32:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movaps (%rdi), %xmm0
|
|
; SSE2-NEXT: movaps 16(%rdi), %xmm1
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v8i32:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm0
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v8i32:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v8i32:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v8i32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %ymm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <8 x i32>, <8 x i32>* %src, align 32, !nontemporal !1
|
|
ret <8 x i32> %1
|
|
}
|
|
|
|
define <4 x double> @test_v4f64(<4 x double>* %src) {
|
|
; SSE2-LABEL: test_v4f64:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movaps (%rdi), %xmm0
|
|
; SSE2-NEXT: movaps 16(%rdi), %xmm1
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v4f64:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm0
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v4f64:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v4f64:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v4f64:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %ymm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <4 x double>, <4 x double>* %src, align 32, !nontemporal !1
|
|
ret <4 x double> %1
|
|
}
|
|
|
|
define <4 x i64> @test_v4i64(<4 x i64>* %src) {
|
|
; SSE2-LABEL: test_v4i64:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movaps (%rdi), %xmm0
|
|
; SSE2-NEXT: movaps 16(%rdi), %xmm1
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v4i64:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm0
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v4i64:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v4i64:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v4i64:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %ymm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <4 x i64>, <4 x i64>* %src, align 32, !nontemporal !1
|
|
ret <4 x i64> %1
|
|
}
|
|
|
|
define <16 x i16> @test_v16i16(<16 x i16>* %src) {
|
|
; SSE2-LABEL: test_v16i16:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movaps (%rdi), %xmm0
|
|
; SSE2-NEXT: movaps 16(%rdi), %xmm1
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v16i16:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm0
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v16i16:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v16i16:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v16i16:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %ymm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <16 x i16>, <16 x i16>* %src, align 32, !nontemporal !1
|
|
ret <16 x i16> %1
|
|
}
|
|
|
|
define <32 x i8> @test_v32i8(<32 x i8>* %src) {
|
|
; SSE2-LABEL: test_v32i8:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movaps (%rdi), %xmm0
|
|
; SSE2-NEXT: movaps 16(%rdi), %xmm1
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v32i8:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm0
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v32i8:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v32i8:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v32i8:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %ymm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <32 x i8>, <32 x i8>* %src, align 32, !nontemporal !1
|
|
ret <32 x i8> %1
|
|
}
|
|
|
|
; And now ZMM versions.
|
|
|
|
define <16 x float> @test_v16f32(<16 x float>* %src) {
|
|
; SSE2-LABEL: test_v16f32:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movaps (%rdi), %xmm0
|
|
; SSE2-NEXT: movaps 16(%rdi), %xmm1
|
|
; SSE2-NEXT: movaps 32(%rdi), %xmm2
|
|
; SSE2-NEXT: movaps 48(%rdi), %xmm3
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v16f32:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm0
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
|
|
; SSE41-NEXT: movntdqa 32(%rdi), %xmm2
|
|
; SSE41-NEXT: movntdqa 48(%rdi), %xmm3
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v16f32:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm1
|
|
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v16f32:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
|
|
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm1
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v16f32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %zmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <16 x float>, <16 x float>* %src, align 64, !nontemporal !1
|
|
ret <16 x float> %1
|
|
}
|
|
|
|
define <16 x i32> @test_v16i32(<16 x i32>* %src) {
|
|
; SSE2-LABEL: test_v16i32:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movaps (%rdi), %xmm0
|
|
; SSE2-NEXT: movaps 16(%rdi), %xmm1
|
|
; SSE2-NEXT: movaps 32(%rdi), %xmm2
|
|
; SSE2-NEXT: movaps 48(%rdi), %xmm3
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v16i32:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm0
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
|
|
; SSE41-NEXT: movntdqa 32(%rdi), %xmm2
|
|
; SSE41-NEXT: movntdqa 48(%rdi), %xmm3
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v16i32:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm1
|
|
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v16i32:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
|
|
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm1
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v16i32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %zmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <16 x i32>, <16 x i32>* %src, align 64, !nontemporal !1
|
|
ret <16 x i32> %1
|
|
}
|
|
|
|
define <8 x double> @test_v8f64(<8 x double>* %src) {
|
|
; SSE2-LABEL: test_v8f64:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movaps (%rdi), %xmm0
|
|
; SSE2-NEXT: movaps 16(%rdi), %xmm1
|
|
; SSE2-NEXT: movaps 32(%rdi), %xmm2
|
|
; SSE2-NEXT: movaps 48(%rdi), %xmm3
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v8f64:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm0
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
|
|
; SSE41-NEXT: movntdqa 32(%rdi), %xmm2
|
|
; SSE41-NEXT: movntdqa 48(%rdi), %xmm3
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v8f64:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm1
|
|
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v8f64:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
|
|
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm1
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v8f64:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %zmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <8 x double>, <8 x double>* %src, align 64, !nontemporal !1
|
|
ret <8 x double> %1
|
|
}
|
|
|
|
define <8 x i64> @test_v8i64(<8 x i64>* %src) {
|
|
; SSE2-LABEL: test_v8i64:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movaps (%rdi), %xmm0
|
|
; SSE2-NEXT: movaps 16(%rdi), %xmm1
|
|
; SSE2-NEXT: movaps 32(%rdi), %xmm2
|
|
; SSE2-NEXT: movaps 48(%rdi), %xmm3
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v8i64:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm0
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
|
|
; SSE41-NEXT: movntdqa 32(%rdi), %xmm2
|
|
; SSE41-NEXT: movntdqa 48(%rdi), %xmm3
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v8i64:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm1
|
|
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v8i64:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
|
|
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm1
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v8i64:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %zmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <8 x i64>, <8 x i64>* %src, align 64, !nontemporal !1
|
|
ret <8 x i64> %1
|
|
}
|
|
|
|
define <32 x i16> @test_v32i16(<32 x i16>* %src) {
|
|
; SSE2-LABEL: test_v32i16:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movaps (%rdi), %xmm0
|
|
; SSE2-NEXT: movaps 16(%rdi), %xmm1
|
|
; SSE2-NEXT: movaps 32(%rdi), %xmm2
|
|
; SSE2-NEXT: movaps 48(%rdi), %xmm3
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v32i16:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm0
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
|
|
; SSE41-NEXT: movntdqa 32(%rdi), %xmm2
|
|
; SSE41-NEXT: movntdqa 48(%rdi), %xmm3
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v32i16:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm1
|
|
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v32i16:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
|
|
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm1
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v32i16:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %zmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <32 x i16>, <32 x i16>* %src, align 64, !nontemporal !1
|
|
ret <32 x i16> %1
|
|
}
|
|
|
|
define <64 x i8> @test_v64i8(<64 x i8>* %src) {
|
|
; SSE2-LABEL: test_v64i8:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movaps (%rdi), %xmm0
|
|
; SSE2-NEXT: movaps 16(%rdi), %xmm1
|
|
; SSE2-NEXT: movaps 32(%rdi), %xmm2
|
|
; SSE2-NEXT: movaps 48(%rdi), %xmm3
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_v64i8:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm0
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm1
|
|
; SSE41-NEXT: movntdqa 32(%rdi), %xmm2
|
|
; SSE41-NEXT: movntdqa 48(%rdi), %xmm3
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_v64i8:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm1
|
|
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_v64i8:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm0
|
|
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm1
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_v64i8:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %zmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <64 x i8>, <64 x i8>* %src, align 64, !nontemporal !1
|
|
ret <64 x i8> %1
|
|
}
|
|
|
|
|
|
; Check cases where the load would be folded.
|
|
|
|
define <4 x float> @test_arg_v4f32(<4 x float> %arg, <4 x float>* %src) {
|
|
; SSE2-LABEL: test_arg_v4f32:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: addps (%rdi), %xmm0
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_arg_v4f32:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm1
|
|
; SSE41-NEXT: addps %xmm1, %xmm0
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_arg_v4f32:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vmovntdqa (%rdi), %xmm1
|
|
; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_arg_v4f32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %xmm1
|
|
; AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <4 x float>, <4 x float>* %src, align 16, !nontemporal !1
|
|
%2 = fadd <4 x float> %arg, %1
|
|
ret <4 x float> %2
|
|
}
|
|
|
|
define <4 x i32> @test_arg_v4i32(<4 x i32> %arg, <4 x i32>* %src) {
|
|
; SSE2-LABEL: test_arg_v4i32:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: paddd (%rdi), %xmm0
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_arg_v4i32:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm1
|
|
; SSE41-NEXT: paddd %xmm1, %xmm0
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_arg_v4i32:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vmovntdqa (%rdi), %xmm1
|
|
; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_arg_v4i32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %xmm1
|
|
; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <4 x i32>, <4 x i32>* %src, align 16, !nontemporal !1
|
|
%2 = add <4 x i32> %arg, %1
|
|
ret <4 x i32> %2
|
|
}
|
|
|
|
define <2 x double> @test_arg_v2f64(<2 x double> %arg, <2 x double>* %src) {
|
|
; SSE2-LABEL: test_arg_v2f64:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: addpd (%rdi), %xmm0
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_arg_v2f64:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm1
|
|
; SSE41-NEXT: addpd %xmm1, %xmm0
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_arg_v2f64:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vmovntdqa (%rdi), %xmm1
|
|
; AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_arg_v2f64:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %xmm1
|
|
; AVX512-NEXT: vaddpd %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <2 x double>, <2 x double>* %src, align 16, !nontemporal !1
|
|
%2 = fadd <2 x double> %arg, %1
|
|
ret <2 x double> %2
|
|
}
|
|
|
|
define <2 x i64> @test_arg_v2i64(<2 x i64> %arg, <2 x i64>* %src) {
|
|
; SSE2-LABEL: test_arg_v2i64:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: paddq (%rdi), %xmm0
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_arg_v2i64:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm1
|
|
; SSE41-NEXT: paddq %xmm1, %xmm0
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_arg_v2i64:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vmovntdqa (%rdi), %xmm1
|
|
; AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_arg_v2i64:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %xmm1
|
|
; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <2 x i64>, <2 x i64>* %src, align 16, !nontemporal !1
|
|
%2 = add <2 x i64> %arg, %1
|
|
ret <2 x i64> %2
|
|
}
|
|
|
|
define <8 x i16> @test_arg_v8i16(<8 x i16> %arg, <8 x i16>* %src) {
|
|
; SSE2-LABEL: test_arg_v8i16:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: paddw (%rdi), %xmm0
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_arg_v8i16:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm1
|
|
; SSE41-NEXT: paddw %xmm1, %xmm0
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_arg_v8i16:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vmovntdqa (%rdi), %xmm1
|
|
; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_arg_v8i16:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %xmm1
|
|
; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <8 x i16>, <8 x i16>* %src, align 16, !nontemporal !1
|
|
%2 = add <8 x i16> %arg, %1
|
|
ret <8 x i16> %2
|
|
}
|
|
|
|
define <16 x i8> @test_arg_v16i8(<16 x i8> %arg, <16 x i8>* %src) {
|
|
; SSE2-LABEL: test_arg_v16i8:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: paddb (%rdi), %xmm0
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_arg_v16i8:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm1
|
|
; SSE41-NEXT: paddb %xmm1, %xmm0
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_arg_v16i8:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vmovntdqa (%rdi), %xmm1
|
|
; AVX-NEXT: vpaddb %xmm1, %xmm0, %xmm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_arg_v16i8:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %xmm1
|
|
; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <16 x i8>, <16 x i8>* %src, align 16, !nontemporal !1
|
|
%2 = add <16 x i8> %arg, %1
|
|
ret <16 x i8> %2
|
|
}
|
|
|
|
; And now YMM versions.
|
|
|
|
define <8 x float> @test_arg_v8f32(<8 x float> %arg, <8 x float>* %src) {
|
|
; SSE2-LABEL: test_arg_v8f32:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: addps (%rdi), %xmm0
|
|
; SSE2-NEXT: addps 16(%rdi), %xmm1
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_arg_v8f32:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm2
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm3
|
|
; SSE41-NEXT: addps %xmm3, %xmm0
|
|
; SSE41-NEXT: addps %xmm2, %xmm1
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_arg_v8f32:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm1
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm2
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
|
|
; AVX1-NEXT: vaddps %ymm1, %ymm0, %ymm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_arg_v8f32:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm1
|
|
; AVX2-NEXT: vaddps %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_arg_v8f32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %ymm1
|
|
; AVX512-NEXT: vaddps %ymm1, %ymm0, %ymm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <8 x float>, <8 x float>* %src, align 32, !nontemporal !1
|
|
%2 = fadd <8 x float> %arg, %1
|
|
ret <8 x float> %2
|
|
}
|
|
|
|
define <8 x i32> @test_arg_v8i32(<8 x i32> %arg, <8 x i32>* %src) {
|
|
; SSE2-LABEL: test_arg_v8i32:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: paddd (%rdi), %xmm0
|
|
; SSE2-NEXT: paddd 16(%rdi), %xmm1
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_arg_v8i32:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm2
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm3
|
|
; SSE41-NEXT: paddd %xmm3, %xmm0
|
|
; SSE41-NEXT: paddd %xmm2, %xmm1
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_arg_v8i32:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
; AVX1-NEXT: vpaddd %xmm1, %xmm2, %xmm1
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm2
|
|
; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_arg_v8i32:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm1
|
|
; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_arg_v8i32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %ymm1
|
|
; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <8 x i32>, <8 x i32>* %src, align 32, !nontemporal !1
|
|
%2 = add <8 x i32> %arg, %1
|
|
ret <8 x i32> %2
|
|
}
|
|
|
|
define <4 x double> @test_arg_v4f64(<4 x double> %arg, <4 x double>* %src) {
|
|
; SSE2-LABEL: test_arg_v4f64:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: addpd (%rdi), %xmm0
|
|
; SSE2-NEXT: addpd 16(%rdi), %xmm1
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_arg_v4f64:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm2
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm3
|
|
; SSE41-NEXT: addpd %xmm3, %xmm0
|
|
; SSE41-NEXT: addpd %xmm2, %xmm1
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_arg_v4f64:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm1
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm2
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
|
|
; AVX1-NEXT: vaddpd %ymm1, %ymm0, %ymm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_arg_v4f64:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm1
|
|
; AVX2-NEXT: vaddpd %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_arg_v4f64:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %ymm1
|
|
; AVX512-NEXT: vaddpd %ymm1, %ymm0, %ymm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <4 x double>, <4 x double>* %src, align 32, !nontemporal !1
|
|
%2 = fadd <4 x double> %arg, %1
|
|
ret <4 x double> %2
|
|
}
|
|
|
|
define <4 x i64> @test_arg_v4i64(<4 x i64> %arg, <4 x i64>* %src) {
|
|
; SSE2-LABEL: test_arg_v4i64:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: paddq (%rdi), %xmm0
|
|
; SSE2-NEXT: paddq 16(%rdi), %xmm1
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_arg_v4i64:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm2
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm3
|
|
; SSE41-NEXT: paddq %xmm3, %xmm0
|
|
; SSE41-NEXT: paddq %xmm2, %xmm1
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_arg_v4i64:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
; AVX1-NEXT: vpaddq %xmm1, %xmm2, %xmm1
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm2
|
|
; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_arg_v4i64:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm1
|
|
; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_arg_v4i64:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %ymm1
|
|
; AVX512-NEXT: vpaddq %ymm1, %ymm0, %ymm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <4 x i64>, <4 x i64>* %src, align 32, !nontemporal !1
|
|
%2 = add <4 x i64> %arg, %1
|
|
ret <4 x i64> %2
|
|
}
|
|
|
|
define <16 x i16> @test_arg_v16i16(<16 x i16> %arg, <16 x i16>* %src) {
|
|
; SSE2-LABEL: test_arg_v16i16:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: paddw (%rdi), %xmm0
|
|
; SSE2-NEXT: paddw 16(%rdi), %xmm1
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_arg_v16i16:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm2
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm3
|
|
; SSE41-NEXT: paddw %xmm3, %xmm0
|
|
; SSE41-NEXT: paddw %xmm2, %xmm1
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_arg_v16i16:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
; AVX1-NEXT: vpaddw %xmm1, %xmm2, %xmm1
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm2
|
|
; AVX1-NEXT: vpaddw %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_arg_v16i16:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm1
|
|
; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_arg_v16i16:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %ymm1
|
|
; AVX512-NEXT: vpaddw %ymm1, %ymm0, %ymm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <16 x i16>, <16 x i16>* %src, align 32, !nontemporal !1
|
|
%2 = add <16 x i16> %arg, %1
|
|
ret <16 x i16> %2
|
|
}
|
|
|
|
define <32 x i8> @test_arg_v32i8(<32 x i8> %arg, <32 x i8>* %src) {
|
|
; SSE2-LABEL: test_arg_v32i8:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: paddb (%rdi), %xmm0
|
|
; SSE2-NEXT: paddb 16(%rdi), %xmm1
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_arg_v32i8:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm2
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm3
|
|
; SSE41-NEXT: paddb %xmm3, %xmm0
|
|
; SSE41-NEXT: paddb %xmm2, %xmm1
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_arg_v32i8:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm1
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
; AVX1-NEXT: vpaddb %xmm1, %xmm2, %xmm1
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm2
|
|
; AVX1-NEXT: vpaddb %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_arg_v32i8:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm1
|
|
; AVX2-NEXT: vpaddb %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_arg_v32i8:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %ymm1
|
|
; AVX512-NEXT: vpaddb %ymm1, %ymm0, %ymm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <32 x i8>, <32 x i8>* %src, align 32, !nontemporal !1
|
|
%2 = add <32 x i8> %arg, %1
|
|
ret <32 x i8> %2
|
|
}
|
|
|
|
; And now ZMM versions.
|
|
|
|
define <16 x float> @test_arg_v16f32(<16 x float> %arg, <16 x float>* %src) {
|
|
; SSE2-LABEL: test_arg_v16f32:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: addps (%rdi), %xmm0
|
|
; SSE2-NEXT: addps 16(%rdi), %xmm1
|
|
; SSE2-NEXT: addps 32(%rdi), %xmm2
|
|
; SSE2-NEXT: addps 48(%rdi), %xmm3
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_arg_v16f32:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movntdqa 48(%rdi), %xmm4
|
|
; SSE41-NEXT: movntdqa 32(%rdi), %xmm5
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm6
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm7
|
|
; SSE41-NEXT: addps %xmm7, %xmm0
|
|
; SSE41-NEXT: addps %xmm6, %xmm1
|
|
; SSE41-NEXT: addps %xmm5, %xmm2
|
|
; SSE41-NEXT: addps %xmm4, %xmm3
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_arg_v16f32:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2
|
|
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm3
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm3
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vaddps %ymm3, %ymm0, %ymm0
|
|
; AVX1-NEXT: vaddps %ymm2, %ymm1, %ymm1
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_arg_v16f32:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm2
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm3
|
|
; AVX2-NEXT: vaddps %ymm3, %ymm0, %ymm0
|
|
; AVX2-NEXT: vaddps %ymm2, %ymm1, %ymm1
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_arg_v16f32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %zmm1
|
|
; AVX512-NEXT: vaddps %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <16 x float>, <16 x float>* %src, align 64, !nontemporal !1
|
|
%2 = fadd <16 x float> %arg, %1
|
|
ret <16 x float> %2
|
|
}
|
|
|
|
define <16 x i32> @test_arg_v16i32(<16 x i32> %arg, <16 x i32>* %src) {
|
|
; SSE2-LABEL: test_arg_v16i32:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: paddd (%rdi), %xmm0
|
|
; SSE2-NEXT: paddd 16(%rdi), %xmm1
|
|
; SSE2-NEXT: paddd 32(%rdi), %xmm2
|
|
; SSE2-NEXT: paddd 48(%rdi), %xmm3
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_arg_v16i32:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movntdqa 48(%rdi), %xmm4
|
|
; SSE41-NEXT: movntdqa 32(%rdi), %xmm5
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm6
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm7
|
|
; SSE41-NEXT: paddd %xmm7, %xmm0
|
|
; SSE41-NEXT: paddd %xmm6, %xmm1
|
|
; SSE41-NEXT: paddd %xmm5, %xmm2
|
|
; SSE41-NEXT: paddd %xmm4, %xmm3
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_arg_v16i32:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm2
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
|
; AVX1-NEXT: vpaddd %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm3
|
|
; AVX1-NEXT: vpaddd %xmm3, %xmm0, %xmm0
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
|
|
; AVX1-NEXT: vpaddd %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm3
|
|
; AVX1-NEXT: vpaddd %xmm3, %xmm1, %xmm1
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_arg_v16i32:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm2
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm3
|
|
; AVX2-NEXT: vpaddd %ymm3, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpaddd %ymm2, %ymm1, %ymm1
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_arg_v16i32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %zmm1
|
|
; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <16 x i32>, <16 x i32>* %src, align 64, !nontemporal !1
|
|
%2 = add <16 x i32> %arg, %1
|
|
ret <16 x i32> %2
|
|
}
|
|
|
|
define <8 x double> @test_arg_v8f64(<8 x double> %arg, <8 x double>* %src) {
|
|
; SSE2-LABEL: test_arg_v8f64:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: addpd (%rdi), %xmm0
|
|
; SSE2-NEXT: addpd 16(%rdi), %xmm1
|
|
; SSE2-NEXT: addpd 32(%rdi), %xmm2
|
|
; SSE2-NEXT: addpd 48(%rdi), %xmm3
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_arg_v8f64:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movntdqa 48(%rdi), %xmm4
|
|
; SSE41-NEXT: movntdqa 32(%rdi), %xmm5
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm6
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm7
|
|
; SSE41-NEXT: addpd %xmm7, %xmm0
|
|
; SSE41-NEXT: addpd %xmm6, %xmm1
|
|
; SSE41-NEXT: addpd %xmm5, %xmm2
|
|
; SSE41-NEXT: addpd %xmm4, %xmm3
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_arg_v8f64:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2
|
|
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm3
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm3
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vaddpd %ymm3, %ymm0, %ymm0
|
|
; AVX1-NEXT: vaddpd %ymm2, %ymm1, %ymm1
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_arg_v8f64:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm2
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm3
|
|
; AVX2-NEXT: vaddpd %ymm3, %ymm0, %ymm0
|
|
; AVX2-NEXT: vaddpd %ymm2, %ymm1, %ymm1
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_arg_v8f64:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %zmm1
|
|
; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <8 x double>, <8 x double>* %src, align 64, !nontemporal !1
|
|
%2 = fadd <8 x double> %arg, %1
|
|
ret <8 x double> %2
|
|
}
|
|
|
|
define <8 x i64> @test_arg_v8i64(<8 x i64> %arg, <8 x i64>* %src) {
|
|
; SSE2-LABEL: test_arg_v8i64:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: paddq (%rdi), %xmm0
|
|
; SSE2-NEXT: paddq 16(%rdi), %xmm1
|
|
; SSE2-NEXT: paddq 32(%rdi), %xmm2
|
|
; SSE2-NEXT: paddq 48(%rdi), %xmm3
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_arg_v8i64:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movntdqa 48(%rdi), %xmm4
|
|
; SSE41-NEXT: movntdqa 32(%rdi), %xmm5
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm6
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm7
|
|
; SSE41-NEXT: paddq %xmm7, %xmm0
|
|
; SSE41-NEXT: paddq %xmm6, %xmm1
|
|
; SSE41-NEXT: paddq %xmm5, %xmm2
|
|
; SSE41-NEXT: paddq %xmm4, %xmm3
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_arg_v8i64:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm2
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
|
; AVX1-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm3
|
|
; AVX1-NEXT: vpaddq %xmm3, %xmm0, %xmm0
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
|
|
; AVX1-NEXT: vpaddq %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm3
|
|
; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_arg_v8i64:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm2
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm3
|
|
; AVX2-NEXT: vpaddq %ymm3, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpaddq %ymm2, %ymm1, %ymm1
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_arg_v8i64:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %zmm1
|
|
; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <8 x i64>, <8 x i64>* %src, align 64, !nontemporal !1
|
|
%2 = add <8 x i64> %arg, %1
|
|
ret <8 x i64> %2
|
|
}
|
|
|
|
define <32 x i16> @test_arg_v32i16(<32 x i16> %arg, <32 x i16>* %src) {
|
|
; SSE2-LABEL: test_arg_v32i16:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: paddw (%rdi), %xmm0
|
|
; SSE2-NEXT: paddw 16(%rdi), %xmm1
|
|
; SSE2-NEXT: paddw 32(%rdi), %xmm2
|
|
; SSE2-NEXT: paddw 48(%rdi), %xmm3
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_arg_v32i16:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movntdqa 48(%rdi), %xmm4
|
|
; SSE41-NEXT: movntdqa 32(%rdi), %xmm5
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm6
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm7
|
|
; SSE41-NEXT: paddw %xmm7, %xmm0
|
|
; SSE41-NEXT: paddw %xmm6, %xmm1
|
|
; SSE41-NEXT: paddw %xmm5, %xmm2
|
|
; SSE41-NEXT: paddw %xmm4, %xmm3
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_arg_v32i16:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm2
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
|
; AVX1-NEXT: vpaddw %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm3
|
|
; AVX1-NEXT: vpaddw %xmm3, %xmm0, %xmm0
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
|
|
; AVX1-NEXT: vpaddw %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm3
|
|
; AVX1-NEXT: vpaddw %xmm3, %xmm1, %xmm1
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_arg_v32i16:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm2
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm3
|
|
; AVX2-NEXT: vpaddw %ymm3, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpaddw %ymm2, %ymm1, %ymm1
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512F-LABEL: test_arg_v32i16:
|
|
; AVX512F: # %bb.0:
|
|
; AVX512F-NEXT: vmovntdqa 32(%rdi), %ymm1
|
|
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm2
|
|
; AVX512F-NEXT: vpaddw %ymm1, %ymm2, %ymm1
|
|
; AVX512F-NEXT: vmovntdqa (%rdi), %ymm2
|
|
; AVX512F-NEXT: vpaddw %ymm2, %ymm0, %ymm0
|
|
; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
|
|
; AVX512F-NEXT: retq
|
|
;
|
|
; AVX512BW-LABEL: test_arg_v32i16:
|
|
; AVX512BW: # %bb.0:
|
|
; AVX512BW-NEXT: vmovntdqa (%rdi), %zmm1
|
|
; AVX512BW-NEXT: vpaddw %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: retq
|
|
;
|
|
; AVX512VL-LABEL: test_arg_v32i16:
|
|
; AVX512VL: # %bb.0:
|
|
; AVX512VL-NEXT: vmovntdqa 32(%rdi), %ymm1
|
|
; AVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm2
|
|
; AVX512VL-NEXT: vpaddw %ymm1, %ymm2, %ymm1
|
|
; AVX512VL-NEXT: vmovntdqa (%rdi), %ymm2
|
|
; AVX512VL-NEXT: vpaddw %ymm2, %ymm0, %ymm0
|
|
; AVX512VL-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
|
|
; AVX512VL-NEXT: retq
|
|
%1 = load <32 x i16>, <32 x i16>* %src, align 64, !nontemporal !1
|
|
%2 = add <32 x i16> %arg, %1
|
|
ret <32 x i16> %2
|
|
}
|
|
|
|
define <64 x i8> @test_arg_v64i8(<64 x i8> %arg, <64 x i8>* %src) {
|
|
; SSE2-LABEL: test_arg_v64i8:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: paddb (%rdi), %xmm0
|
|
; SSE2-NEXT: paddb 16(%rdi), %xmm1
|
|
; SSE2-NEXT: paddb 32(%rdi), %xmm2
|
|
; SSE2-NEXT: paddb 48(%rdi), %xmm3
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_arg_v64i8:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movntdqa 48(%rdi), %xmm4
|
|
; SSE41-NEXT: movntdqa 32(%rdi), %xmm5
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm6
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm7
|
|
; SSE41-NEXT: paddb %xmm7, %xmm0
|
|
; SSE41-NEXT: paddb %xmm6, %xmm1
|
|
; SSE41-NEXT: paddb %xmm5, %xmm2
|
|
; SSE41-NEXT: paddb %xmm4, %xmm3
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_arg_v64i8:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm2
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
|
|
; AVX1-NEXT: vpaddb %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm3
|
|
; AVX1-NEXT: vpaddb %xmm3, %xmm0, %xmm0
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
|
|
; AVX1-NEXT: vpaddb %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm3
|
|
; AVX1-NEXT: vpaddb %xmm3, %xmm1, %xmm1
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_arg_v64i8:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm2
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm3
|
|
; AVX2-NEXT: vpaddb %ymm3, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpaddb %ymm2, %ymm1, %ymm1
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512F-LABEL: test_arg_v64i8:
|
|
; AVX512F: # %bb.0:
|
|
; AVX512F-NEXT: vmovntdqa 32(%rdi), %ymm1
|
|
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm2
|
|
; AVX512F-NEXT: vpaddb %ymm1, %ymm2, %ymm1
|
|
; AVX512F-NEXT: vmovntdqa (%rdi), %ymm2
|
|
; AVX512F-NEXT: vpaddb %ymm2, %ymm0, %ymm0
|
|
; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
|
|
; AVX512F-NEXT: retq
|
|
;
|
|
; AVX512BW-LABEL: test_arg_v64i8:
|
|
; AVX512BW: # %bb.0:
|
|
; AVX512BW-NEXT: vmovntdqa (%rdi), %zmm1
|
|
; AVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: retq
|
|
;
|
|
; AVX512VL-LABEL: test_arg_v64i8:
|
|
; AVX512VL: # %bb.0:
|
|
; AVX512VL-NEXT: vmovntdqa 32(%rdi), %ymm1
|
|
; AVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm2
|
|
; AVX512VL-NEXT: vpaddb %ymm1, %ymm2, %ymm1
|
|
; AVX512VL-NEXT: vmovntdqa (%rdi), %ymm2
|
|
; AVX512VL-NEXT: vpaddb %ymm2, %ymm0, %ymm0
|
|
; AVX512VL-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
|
|
; AVX512VL-NEXT: retq
|
|
%1 = load <64 x i8>, <64 x i8>* %src, align 64, !nontemporal !1
|
|
%2 = add <64 x i8> %arg, %1
|
|
ret <64 x i8> %2
|
|
}
|
|
|
|
|
|
; Unaligned non-temporal loads (not supported)
|
|
|
|
define <4 x float> @test_unaligned_v4f32(<4 x float>* %src) {
|
|
; SSE-LABEL: test_unaligned_v4f32:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: movups (%rdi), %xmm0
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_unaligned_v4f32:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vmovups (%rdi), %xmm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_unaligned_v4f32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovups (%rdi), %xmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <4 x float>, <4 x float>* %src, align 1, !nontemporal !1
|
|
ret <4 x float> %1
|
|
}
|
|
|
|
define <4 x i32> @test_unaligned_v4i32(<4 x i32>* %src) {
|
|
; SSE-LABEL: test_unaligned_v4i32:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: movups (%rdi), %xmm0
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_unaligned_v4i32:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vmovups (%rdi), %xmm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_unaligned_v4i32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovups (%rdi), %xmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <4 x i32>, <4 x i32>* %src, align 1, !nontemporal !1
|
|
ret <4 x i32> %1
|
|
}
|
|
|
|
define <2 x double> @test_unaligned_v2f64(<2 x double>* %src) {
|
|
; SSE-LABEL: test_unaligned_v2f64:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: movups (%rdi), %xmm0
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_unaligned_v2f64:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vmovups (%rdi), %xmm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_unaligned_v2f64:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovups (%rdi), %xmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <2 x double>, <2 x double>* %src, align 1, !nontemporal !1
|
|
ret <2 x double> %1
|
|
}
|
|
|
|
define <2 x i64> @test_unaligned_v2i64(<2 x i64>* %src) {
|
|
; SSE-LABEL: test_unaligned_v2i64:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: movups (%rdi), %xmm0
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_unaligned_v2i64:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vmovups (%rdi), %xmm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_unaligned_v2i64:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovups (%rdi), %xmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <2 x i64>, <2 x i64>* %src, align 1, !nontemporal !1
|
|
ret <2 x i64> %1
|
|
}
|
|
|
|
define <8 x i16> @test_unaligned_v8i16(<8 x i16>* %src) {
|
|
; SSE-LABEL: test_unaligned_v8i16:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: movups (%rdi), %xmm0
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_unaligned_v8i16:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vmovups (%rdi), %xmm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_unaligned_v8i16:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovups (%rdi), %xmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <8 x i16>, <8 x i16>* %src, align 1, !nontemporal !1
|
|
ret <8 x i16> %1
|
|
}
|
|
|
|
define <16 x i8> @test_unaligned_v16i8(<16 x i8>* %src) {
|
|
; SSE-LABEL: test_unaligned_v16i8:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: movups (%rdi), %xmm0
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_unaligned_v16i8:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vmovups (%rdi), %xmm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_unaligned_v16i8:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovups (%rdi), %xmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <16 x i8>, <16 x i8>* %src, align 1, !nontemporal !1
|
|
ret <16 x i8> %1
|
|
}
|
|
|
|
; And now YMM versions.
|
|
|
|
define <8 x float> @test_unaligned_v8f32(<8 x float>* %src) {
|
|
; SSE-LABEL: test_unaligned_v8f32:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: movups (%rdi), %xmm0
|
|
; SSE-NEXT: movups 16(%rdi), %xmm1
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_unaligned_v8f32:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vmovups (%rdi), %ymm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_unaligned_v8f32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovups (%rdi), %ymm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <8 x float>, <8 x float>* %src, align 1, !nontemporal !1
|
|
ret <8 x float> %1
|
|
}
|
|
|
|
define <8 x i32> @test_unaligned_v8i32(<8 x i32>* %src) {
|
|
; SSE-LABEL: test_unaligned_v8i32:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: movups (%rdi), %xmm0
|
|
; SSE-NEXT: movups 16(%rdi), %xmm1
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_unaligned_v8i32:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vmovups (%rdi), %ymm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_unaligned_v8i32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovups (%rdi), %ymm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <8 x i32>, <8 x i32>* %src, align 1, !nontemporal !1
|
|
ret <8 x i32> %1
|
|
}
|
|
|
|
define <4 x double> @test_unaligned_v4f64(<4 x double>* %src) {
|
|
; SSE-LABEL: test_unaligned_v4f64:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: movups (%rdi), %xmm0
|
|
; SSE-NEXT: movups 16(%rdi), %xmm1
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_unaligned_v4f64:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vmovups (%rdi), %ymm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_unaligned_v4f64:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovups (%rdi), %ymm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <4 x double>, <4 x double>* %src, align 1, !nontemporal !1
|
|
ret <4 x double> %1
|
|
}
|
|
|
|
define <4 x i64> @test_unaligned_v4i64(<4 x i64>* %src) {
|
|
; SSE-LABEL: test_unaligned_v4i64:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: movups (%rdi), %xmm0
|
|
; SSE-NEXT: movups 16(%rdi), %xmm1
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_unaligned_v4i64:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vmovups (%rdi), %ymm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_unaligned_v4i64:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovups (%rdi), %ymm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <4 x i64>, <4 x i64>* %src, align 1, !nontemporal !1
|
|
ret <4 x i64> %1
|
|
}
|
|
|
|
define <16 x i16> @test_unaligned_v16i16(<16 x i16>* %src) {
|
|
; SSE-LABEL: test_unaligned_v16i16:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: movups (%rdi), %xmm0
|
|
; SSE-NEXT: movups 16(%rdi), %xmm1
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_unaligned_v16i16:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vmovups (%rdi), %ymm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_unaligned_v16i16:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovups (%rdi), %ymm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <16 x i16>, <16 x i16>* %src, align 1, !nontemporal !1
|
|
ret <16 x i16> %1
|
|
}
|
|
|
|
define <32 x i8> @test_unaligned_v32i8(<32 x i8>* %src) {
|
|
; SSE-LABEL: test_unaligned_v32i8:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: movups (%rdi), %xmm0
|
|
; SSE-NEXT: movups 16(%rdi), %xmm1
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_unaligned_v32i8:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vmovups (%rdi), %ymm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_unaligned_v32i8:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovups (%rdi), %ymm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <32 x i8>, <32 x i8>* %src, align 1, !nontemporal !1
|
|
ret <32 x i8> %1
|
|
}
|
|
|
|
; And now ZMM versions.
|
|
|
|
define <16 x float> @test_unaligned_v16f32(<16 x float>* %src) {
|
|
; SSE-LABEL: test_unaligned_v16f32:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: movups (%rdi), %xmm0
|
|
; SSE-NEXT: movups 16(%rdi), %xmm1
|
|
; SSE-NEXT: movups 32(%rdi), %xmm2
|
|
; SSE-NEXT: movups 48(%rdi), %xmm3
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_unaligned_v16f32:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vmovups (%rdi), %ymm0
|
|
; AVX-NEXT: vmovups 32(%rdi), %ymm1
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_unaligned_v16f32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovups (%rdi), %zmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <16 x float>, <16 x float>* %src, align 1, !nontemporal !1
|
|
ret <16 x float> %1
|
|
}
|
|
|
|
define <16 x i32> @test_unaligned_v16i32(<16 x i32>* %src) {
|
|
; SSE-LABEL: test_unaligned_v16i32:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: movups (%rdi), %xmm0
|
|
; SSE-NEXT: movups 16(%rdi), %xmm1
|
|
; SSE-NEXT: movups 32(%rdi), %xmm2
|
|
; SSE-NEXT: movups 48(%rdi), %xmm3
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_unaligned_v16i32:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vmovups (%rdi), %ymm0
|
|
; AVX-NEXT: vmovups 32(%rdi), %ymm1
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_unaligned_v16i32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovups (%rdi), %zmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <16 x i32>, <16 x i32>* %src, align 1, !nontemporal !1
|
|
ret <16 x i32> %1
|
|
}
|
|
|
|
define <8 x double> @test_unaligned_v8f64(<8 x double>* %src) {
|
|
; SSE-LABEL: test_unaligned_v8f64:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: movups (%rdi), %xmm0
|
|
; SSE-NEXT: movups 16(%rdi), %xmm1
|
|
; SSE-NEXT: movups 32(%rdi), %xmm2
|
|
; SSE-NEXT: movups 48(%rdi), %xmm3
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_unaligned_v8f64:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vmovups (%rdi), %ymm0
|
|
; AVX-NEXT: vmovups 32(%rdi), %ymm1
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_unaligned_v8f64:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovups (%rdi), %zmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <8 x double>, <8 x double>* %src, align 1, !nontemporal !1
|
|
ret <8 x double> %1
|
|
}
|
|
|
|
define <8 x i64> @test_unaligned_v8i64(<8 x i64>* %src) {
|
|
; SSE-LABEL: test_unaligned_v8i64:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: movups (%rdi), %xmm0
|
|
; SSE-NEXT: movups 16(%rdi), %xmm1
|
|
; SSE-NEXT: movups 32(%rdi), %xmm2
|
|
; SSE-NEXT: movups 48(%rdi), %xmm3
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_unaligned_v8i64:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vmovups (%rdi), %ymm0
|
|
; AVX-NEXT: vmovups 32(%rdi), %ymm1
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_unaligned_v8i64:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovups (%rdi), %zmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <8 x i64>, <8 x i64>* %src, align 1, !nontemporal !1
|
|
ret <8 x i64> %1
|
|
}
|
|
|
|
define <32 x i16> @test_unaligned_v32i16(<32 x i16>* %src) {
|
|
; SSE-LABEL: test_unaligned_v32i16:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: movups (%rdi), %xmm0
|
|
; SSE-NEXT: movups 16(%rdi), %xmm1
|
|
; SSE-NEXT: movups 32(%rdi), %xmm2
|
|
; SSE-NEXT: movups 48(%rdi), %xmm3
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_unaligned_v32i16:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vmovups (%rdi), %ymm0
|
|
; AVX-NEXT: vmovups 32(%rdi), %ymm1
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_unaligned_v32i16:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovups (%rdi), %zmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <32 x i16>, <32 x i16>* %src, align 1, !nontemporal !1
|
|
ret <32 x i16> %1
|
|
}
|
|
|
|
define <64 x i8> @test_unaligned_v64i8(<64 x i8>* %src) {
|
|
; SSE-LABEL: test_unaligned_v64i8:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: movups (%rdi), %xmm0
|
|
; SSE-NEXT: movups 16(%rdi), %xmm1
|
|
; SSE-NEXT: movups 32(%rdi), %xmm2
|
|
; SSE-NEXT: movups 48(%rdi), %xmm3
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: test_unaligned_v64i8:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vmovups (%rdi), %ymm0
|
|
; AVX-NEXT: vmovups 32(%rdi), %ymm1
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_unaligned_v64i8:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovups (%rdi), %zmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <64 x i8>, <64 x i8>* %src, align 1, !nontemporal !1
|
|
ret <64 x i8> %1
|
|
}
|
|
|
|
define <16 x i32> @test_masked_v16i32(i8 * %addr, <16 x i32> %old, <16 x i32> %mask1) {
|
|
; SSE2-LABEL: test_masked_v16i32:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: pxor %xmm8, %xmm8
|
|
; SSE2-NEXT: pcmpeqd %xmm8, %xmm7
|
|
; SSE2-NEXT: pcmpeqd %xmm8, %xmm6
|
|
; SSE2-NEXT: pcmpeqd %xmm8, %xmm5
|
|
; SSE2-NEXT: pcmpeqd %xmm8, %xmm4
|
|
; SSE2-NEXT: pand %xmm4, %xmm0
|
|
; SSE2-NEXT: pandn (%rdi), %xmm4
|
|
; SSE2-NEXT: por %xmm4, %xmm0
|
|
; SSE2-NEXT: pand %xmm5, %xmm1
|
|
; SSE2-NEXT: pandn 16(%rdi), %xmm5
|
|
; SSE2-NEXT: por %xmm5, %xmm1
|
|
; SSE2-NEXT: pand %xmm6, %xmm2
|
|
; SSE2-NEXT: pandn 32(%rdi), %xmm6
|
|
; SSE2-NEXT: por %xmm6, %xmm2
|
|
; SSE2-NEXT: pand %xmm7, %xmm3
|
|
; SSE2-NEXT: pandn 48(%rdi), %xmm7
|
|
; SSE2-NEXT: por %xmm7, %xmm3
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: test_masked_v16i32:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movdqa %xmm7, %xmm9
|
|
; SSE41-NEXT: movdqa %xmm6, %xmm10
|
|
; SSE41-NEXT: movdqa %xmm5, %xmm11
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm8
|
|
; SSE41-NEXT: pxor %xmm0, %xmm0
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm9
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm10
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm11
|
|
; SSE41-NEXT: pcmpeqd %xmm4, %xmm0
|
|
; SSE41-NEXT: movntdqa 48(%rdi), %xmm4
|
|
; SSE41-NEXT: movntdqa 32(%rdi), %xmm7
|
|
; SSE41-NEXT: movntdqa 16(%rdi), %xmm6
|
|
; SSE41-NEXT: movntdqa (%rdi), %xmm5
|
|
; SSE41-NEXT: blendvps %xmm0, %xmm8, %xmm5
|
|
; SSE41-NEXT: movdqa %xmm11, %xmm0
|
|
; SSE41-NEXT: blendvps %xmm0, %xmm1, %xmm6
|
|
; SSE41-NEXT: movdqa %xmm10, %xmm0
|
|
; SSE41-NEXT: blendvps %xmm0, %xmm2, %xmm7
|
|
; SSE41-NEXT: movdqa %xmm9, %xmm0
|
|
; SSE41-NEXT: blendvps %xmm0, %xmm3, %xmm4
|
|
; SSE41-NEXT: movaps %xmm5, %xmm0
|
|
; SSE41-NEXT: movaps %xmm6, %xmm1
|
|
; SSE41-NEXT: movaps %xmm7, %xmm2
|
|
; SSE41-NEXT: movaps %xmm4, %xmm3
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: test_masked_v16i32:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4
|
|
; AVX1-NEXT: vpxor %xmm5, %xmm5, %xmm5
|
|
; AVX1-NEXT: vpcmpeqd %xmm5, %xmm4, %xmm4
|
|
; AVX1-NEXT: vpcmpeqd %xmm5, %xmm3, %xmm3
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
|
|
; AVX1-NEXT: vpcmpeqd %xmm5, %xmm4, %xmm4
|
|
; AVX1-NEXT: vpcmpeqd %xmm5, %xmm2, %xmm2
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm2, %ymm2
|
|
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm4
|
|
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm5
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm4, %ymm4
|
|
; AVX1-NEXT: vblendvps %ymm3, %ymm1, %ymm4, %ymm1
|
|
; AVX1-NEXT: vmovntdqa (%rdi), %xmm3
|
|
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vblendvps %ymm2, %ymm0, %ymm3, %ymm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: test_masked_v16i32:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpxor %xmm4, %xmm4, %xmm4
|
|
; AVX2-NEXT: vpcmpeqd %ymm4, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpcmpeqd %ymm4, %ymm2, %ymm2
|
|
; AVX2-NEXT: vmovntdqa 32(%rdi), %ymm4
|
|
; AVX2-NEXT: vblendvps %ymm3, %ymm1, %ymm4, %ymm1
|
|
; AVX2-NEXT: vmovntdqa (%rdi), %ymm3
|
|
; AVX2-NEXT: vblendvps %ymm2, %ymm0, %ymm3, %ymm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: test_masked_v16i32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vptestmd %zmm1, %zmm1, %k1
|
|
; AVX512-NEXT: vmovntdqa (%rdi), %zmm1
|
|
; AVX512-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
|
|
; AVX512-NEXT: retq
|
|
%mask = icmp ne <16 x i32> %mask1, zeroinitializer
|
|
%vaddr = bitcast i8* %addr to <16 x i32>*
|
|
%r = load <16 x i32>, <16 x i32>* %vaddr, align 64, !nontemporal !1
|
|
%res = select <16 x i1> %mask, <16 x i32> %r, <16 x i32> %old
|
|
ret <16 x i32>%res
|
|
}
|
|
|
|
; Reduced from https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=10895
|
|
define i32 @PR39256(float* %ptr) {
|
|
; SSE-LABEL: PR39256:
|
|
; SSE: # %bb.0: # %entry
|
|
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
|
; SSE-NEXT: ucomiss {{.*}}(%rip), %xmm0
|
|
; SSE-NEXT: setb (%rax)
|
|
; SSE-NEXT: movl $-2147483648, %eax # imm = 0x80000000
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: PR39256:
|
|
; AVX: # %bb.0: # %entry
|
|
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
|
; AVX-NEXT: vucomiss {{.*}}(%rip), %xmm0
|
|
; AVX-NEXT: setb (%rax)
|
|
; AVX-NEXT: movl $-2147483648, %eax # imm = 0x80000000
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: PR39256:
|
|
; AVX512: # %bb.0: # %entry
|
|
; AVX512-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
|
; AVX512-NEXT: vucomiss {{.*}}(%rip), %xmm0
|
|
; AVX512-NEXT: setb (%rax)
|
|
; AVX512-NEXT: movl $-2147483648, %eax # imm = 0x80000000
|
|
; AVX512-NEXT: retq
|
|
entry:
|
|
%l = load float, float* %ptr, !nontemporal !1
|
|
%C = fcmp ult float %l, 0x36A0000000000000
|
|
store i1 %C, i1* undef
|
|
ret i32 -2147483648
|
|
}
|
|
|
|
!1 = !{i32 1}
|