mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 10:42:39 +01:00
e8e79f569b
v2f16 is a special case in NVPTX. v4f16 may be loaded as a pair of v2f16 and that was not previously handled correctly by tryLDGLDU() Differential Revision: https://reviews.llvm.org/D45339 llvm-svn: 329456
97 lines
3.2 KiB
LLVM
97 lines
3.2 KiB
LLVM
; RUN: llc < %s -march=nvptx64 -mcpu=sm_35 | FileCheck %s
|
|
|
|
; Check that invariant loads from the global addrspace are lowered to
|
|
; ld.global.nc.
|
|
|
|
; CHECK-LABEL: @ld_global
|
|
define i32 @ld_global(i32 addrspace(1)* %ptr) {
|
|
; CHECK: ld.global.nc.{{[a-z]}}32
|
|
%a = load i32, i32 addrspace(1)* %ptr, !invariant.load !0
|
|
ret i32 %a
|
|
}
|
|
|
|
; CHECK-LABEL: @ld_global_v2f16
|
|
define half @ld_global_v2f16(<2 x half> addrspace(1)* %ptr) {
|
|
; Load of v2f16 is weird. We consider it to be a legal type, which happens to be
|
|
; loaded/stored as a 32-bit scalar.
|
|
; CHECK: ld.global.nc.b32
|
|
%a = load <2 x half>, <2 x half> addrspace(1)* %ptr, !invariant.load !0
|
|
%v1 = extractelement <2 x half> %a, i32 0
|
|
%v2 = extractelement <2 x half> %a, i32 1
|
|
%sum = fadd half %v1, %v2
|
|
ret half %sum
|
|
}
|
|
|
|
; CHECK-LABEL: @ld_global_v4f16
|
|
define half @ld_global_v4f16(<4 x half> addrspace(1)* %ptr) {
|
|
; Larger f16 vectors may be split into individual f16 elements and multiple
|
|
; loads/stores may be vectorized using f16 element type. Practically it's
|
|
; limited to v4 variant only.
|
|
; CHECK: ld.global.nc.v4.b16
|
|
%a = load <4 x half>, <4 x half> addrspace(1)* %ptr, !invariant.load !0
|
|
%v1 = extractelement <4 x half> %a, i32 0
|
|
%v2 = extractelement <4 x half> %a, i32 1
|
|
%v3 = extractelement <4 x half> %a, i32 2
|
|
%v4 = extractelement <4 x half> %a, i32 3
|
|
%sum1 = fadd half %v1, %v2
|
|
%sum2 = fadd half %v3, %v4
|
|
%sum = fadd half %sum1, %sum2
|
|
ret half %sum
|
|
}
|
|
|
|
; CHECK-LABEL: @ld_global_v8f16
|
|
define half @ld_global_v8f16(<8 x half> addrspace(1)* %ptr) {
|
|
; Larger vectors are, again, loaded as v4i32. PTX has no v8 variants of loads/stores,
|
|
; so load/store vectorizer has to convert v8f16 -> v4 x v2f16.
|
|
; CHECK: ld.global.nc.v4.b32
|
|
%a = load <8 x half>, <8 x half> addrspace(1)* %ptr, !invariant.load !0
|
|
%v1 = extractelement <8 x half> %a, i32 0
|
|
%v2 = extractelement <8 x half> %a, i32 2
|
|
%v3 = extractelement <8 x half> %a, i32 4
|
|
%v4 = extractelement <8 x half> %a, i32 6
|
|
%sum1 = fadd half %v1, %v2
|
|
%sum2 = fadd half %v3, %v4
|
|
%sum = fadd half %sum1, %sum2
|
|
ret half %sum
|
|
}
|
|
|
|
; CHECK-LABEL: @ld_global_v2i32
|
|
define i32 @ld_global_v2i32(<2 x i32> addrspace(1)* %ptr) {
|
|
; CHECK: ld.global.nc.v2.{{[a-z]}}32
|
|
%a = load <2 x i32>, <2 x i32> addrspace(1)* %ptr, !invariant.load !0
|
|
%v1 = extractelement <2 x i32> %a, i32 0
|
|
%v2 = extractelement <2 x i32> %a, i32 1
|
|
%sum = add i32 %v1, %v2
|
|
ret i32 %sum
|
|
}
|
|
|
|
; CHECK-LABEL: @ld_global_v4i32
|
|
define i32 @ld_global_v4i32(<4 x i32> addrspace(1)* %ptr) {
|
|
; CHECK: ld.global.nc.v4.{{[a-z]}}32
|
|
%a = load <4 x i32>, <4 x i32> addrspace(1)* %ptr, !invariant.load !0
|
|
%v1 = extractelement <4 x i32> %a, i32 0
|
|
%v2 = extractelement <4 x i32> %a, i32 1
|
|
%v3 = extractelement <4 x i32> %a, i32 2
|
|
%v4 = extractelement <4 x i32> %a, i32 3
|
|
%sum1 = add i32 %v1, %v2
|
|
%sum2 = add i32 %v3, %v4
|
|
%sum3 = add i32 %sum1, %sum2
|
|
ret i32 %sum3
|
|
}
|
|
|
|
; CHECK-LABEL: @ld_not_invariant
|
|
define i32 @ld_not_invariant(i32 addrspace(1)* %ptr) {
|
|
; CHECK: ld.global.{{[a-z]}}32
|
|
%a = load i32, i32 addrspace(1)* %ptr
|
|
ret i32 %a
|
|
}
|
|
|
|
; CHECK-LABEL: @ld_not_global_addrspace
|
|
define i32 @ld_not_global_addrspace(i32 addrspace(0)* %ptr) {
|
|
; CHECK: ld.{{[a-z]}}32
|
|
%a = load i32, i32 addrspace(0)* %ptr
|
|
ret i32 %a
|
|
}
|
|
|
|
!0 = !{}
|