1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 19:23:23 +01:00
llvm-mirror/test/CodeGen/NVPTX/ld-addrspace.ll
David Blaikie ab043ff680 [opaque pointer type] Add textual IR support for explicit type parameter to load instruction
Essentially the same as the GEP change in r230786.

A similar migration script can be used to update test cases, though a few more
test case improvements/changes were required this time around: (r229269-r229278)

import fileinput
import sys
import re

pat = re.compile(r"((?:=|:|^)\s*load (?:atomic )?(?:volatile )?(.*?))(| addrspace\(\d+\) *)\*($| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$)")

for line in sys.stdin:
  sys.stdout.write(re.sub(pat, r"\1, \2\3*\4", line))

Reviewers: rafael, dexonsmith, grosser

Differential Revision: http://reviews.llvm.org/D7649

llvm-svn: 230794
2015-02-27 21:17:42 +00:00

172 lines
4.4 KiB
LLVM

; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s --check-prefix=PTX32
; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s --check-prefix=PTX64
;; i8
define i8 @ld_global_i8(i8 addrspace(1)* %ptr) {
; PTX32: ld.global.u8 %r{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
; PTX64: ld.global.u8 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load i8, i8 addrspace(1)* %ptr
ret i8 %a
}
define i8 @ld_shared_i8(i8 addrspace(3)* %ptr) {
; PTX32: ld.shared.u8 %r{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
; PTX64: ld.shared.u8 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load i8, i8 addrspace(3)* %ptr
ret i8 %a
}
define i8 @ld_local_i8(i8 addrspace(5)* %ptr) {
; PTX32: ld.local.u8 %r{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
; PTX64: ld.local.u8 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load i8, i8 addrspace(5)* %ptr
ret i8 %a
}
;; i16
define i16 @ld_global_i16(i16 addrspace(1)* %ptr) {
; PTX32: ld.global.u16 %r{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
; PTX64: ld.global.u16 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load i16, i16 addrspace(1)* %ptr
ret i16 %a
}
define i16 @ld_shared_i16(i16 addrspace(3)* %ptr) {
; PTX32: ld.shared.u16 %r{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
; PTX64: ld.shared.u16 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load i16, i16 addrspace(3)* %ptr
ret i16 %a
}
define i16 @ld_local_i16(i16 addrspace(5)* %ptr) {
; PTX32: ld.local.u16 %r{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
; PTX64: ld.local.u16 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load i16, i16 addrspace(5)* %ptr
ret i16 %a
}
;; i32
define i32 @ld_global_i32(i32 addrspace(1)* %ptr) {
; PTX32: ld.global.u32 %r{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
; PTX64: ld.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load i32, i32 addrspace(1)* %ptr
ret i32 %a
}
define i32 @ld_shared_i32(i32 addrspace(3)* %ptr) {
; PTX32: ld.shared.u32 %r{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
; PTX64: ld.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load i32, i32 addrspace(3)* %ptr
ret i32 %a
}
define i32 @ld_local_i32(i32 addrspace(5)* %ptr) {
; PTX32: ld.local.u32 %r{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
; PTX64: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load i32, i32 addrspace(5)* %ptr
ret i32 %a
}
;; i64
define i64 @ld_global_i64(i64 addrspace(1)* %ptr) {
; PTX32: ld.global.u64 %rd{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
; PTX64: ld.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load i64, i64 addrspace(1)* %ptr
ret i64 %a
}
define i64 @ld_shared_i64(i64 addrspace(3)* %ptr) {
; PTX32: ld.shared.u64 %rd{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
; PTX64: ld.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load i64, i64 addrspace(3)* %ptr
ret i64 %a
}
define i64 @ld_local_i64(i64 addrspace(5)* %ptr) {
; PTX32: ld.local.u64 %rd{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
; PTX64: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load i64, i64 addrspace(5)* %ptr
ret i64 %a
}
;; f32
define float @ld_global_f32(float addrspace(1)* %ptr) {
; PTX32: ld.global.f32 %f{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
; PTX64: ld.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load float, float addrspace(1)* %ptr
ret float %a
}
define float @ld_shared_f32(float addrspace(3)* %ptr) {
; PTX32: ld.shared.f32 %f{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
; PTX64: ld.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load float, float addrspace(3)* %ptr
ret float %a
}
define float @ld_local_f32(float addrspace(5)* %ptr) {
; PTX32: ld.local.f32 %f{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
; PTX64: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load float, float addrspace(5)* %ptr
ret float %a
}
;; f64
define double @ld_global_f64(double addrspace(1)* %ptr) {
; PTX32: ld.global.f64 %fd{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
; PTX64: ld.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load double, double addrspace(1)* %ptr
ret double %a
}
define double @ld_shared_f64(double addrspace(3)* %ptr) {
; PTX32: ld.shared.f64 %fd{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
; PTX64: ld.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load double, double addrspace(3)* %ptr
ret double %a
}
define double @ld_local_f64(double addrspace(5)* %ptr) {
; PTX32: ld.local.f64 %fd{{[0-9]+}}, [%r{{[0-9]+}}]
; PTX32: ret
; PTX64: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}]
; PTX64: ret
%a = load double, double addrspace(5)* %ptr
ret double %a
}