1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 03:02:36 +01:00
llvm-mirror/test/CodeGen/AMDGPU/GlobalISel/legalize-zextload-flat.mir
Matt Arsenault cc12b285b6 CodeGen: Print/parse LLTs in MachineMemOperands
This will currently accept the old number of bytes syntax, and convert
it to a scalar. This should be removed in the near future (I think I
converted all of the tests already, but likely missed a few).

Not sure what the exact syntax and policy should be. We can continue
printing the number of bytes for non-generic instructions to avoid
test churn and only allow non-scalar types for generic instructions.

This will currently print the LLT in parentheses, but accept parsing
the existing integers and implicitly converting to scalar. The
parentheses are a bit ugly, but the parser logic seems unable to deal
without either parentheses or some keyword to indicate the start of a
type.
2021-06-30 16:54:13 -04:00

121 lines
4.6 KiB
YAML

# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -run-pass=legalizer -o - %s | FileCheck %s --check-prefix=SI
# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -run-pass=legalizer -o - %s | FileCheck %s --check-prefix=VI
---
name: test_zextload_flat_i32_i8
body: |
bb.0:
liveins: $vgpr0_vgpr1
; SI-LABEL: name: test_zextload_flat_i32_i8
; SI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; SI: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; SI: $vgpr0 = COPY [[ZEXTLOAD]](s32)
; VI-LABEL: name: test_zextload_flat_i32_i8
; VI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; VI: $vgpr0 = COPY [[ZEXTLOAD]](s32)
%0:_(p0) = COPY $vgpr0_vgpr1
%1:_(s32) = G_ZEXTLOAD %0 :: (load (s8), addrspace 0)
$vgpr0 = COPY %1
...
---
name: test_zextload_flat_i32_i16
body: |
bb.0:
liveins: $vgpr0_vgpr1
; SI-LABEL: name: test_zextload_flat_i32_i16
; SI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; SI: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; SI: $vgpr0 = COPY [[ZEXTLOAD]](s32)
; VI-LABEL: name: test_zextload_flat_i32_i16
; VI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; VI: $vgpr0 = COPY [[ZEXTLOAD]](s32)
%0:_(p0) = COPY $vgpr0_vgpr1
%1:_(s32) = G_ZEXTLOAD %0 :: (load (s16), addrspace 0)
$vgpr0 = COPY %1
...
---
name: test_zextload_flat_i31_i8
body: |
bb.0:
liveins: $vgpr0_vgpr1
; SI-LABEL: name: test_zextload_flat_i31_i8
; SI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; SI: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[ZEXTLOAD]](s32)
; SI: $vgpr0 = COPY [[COPY1]](s32)
; VI-LABEL: name: test_zextload_flat_i31_i8
; VI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[ZEXTLOAD]](s32)
; VI: $vgpr0 = COPY [[COPY1]](s32)
%0:_(p0) = COPY $vgpr0_vgpr1
%1:_(s31) = G_ZEXTLOAD %0 :: (load (s8), addrspace 0)
%2:_(s32) = G_ANYEXT %1
$vgpr0 = COPY %2
...
---
name: test_zextload_flat_i64_i8
body: |
bb.0:
liveins: $vgpr0_vgpr1
; SI-LABEL: name: test_zextload_flat_i64_i8
; SI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; SI: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; SI: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[ZEXTLOAD]](s32)
; SI: $vgpr0_vgpr1 = COPY [[ZEXT]](s64)
; VI-LABEL: name: test_zextload_flat_i64_i8
; VI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; VI: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[ZEXTLOAD]](s32)
; VI: $vgpr0_vgpr1 = COPY [[ZEXT]](s64)
%0:_(p0) = COPY $vgpr0_vgpr1
%1:_(s64) = G_ZEXTLOAD %0 :: (load (s8), addrspace 0)
$vgpr0_vgpr1 = COPY %1
...
---
name: test_zextload_flat_i64_i16
body: |
bb.0:
liveins: $vgpr0_vgpr1
; SI-LABEL: name: test_zextload_flat_i64_i16
; SI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; SI: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; SI: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[ZEXTLOAD]](s32)
; SI: $vgpr0_vgpr1 = COPY [[ZEXT]](s64)
; VI-LABEL: name: test_zextload_flat_i64_i16
; VI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
; VI: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[ZEXTLOAD]](s32)
; VI: $vgpr0_vgpr1 = COPY [[ZEXT]](s64)
%0:_(p0) = COPY $vgpr0_vgpr1
%1:_(s64) = G_ZEXTLOAD %0 :: (load (s16), addrspace 0)
$vgpr0_vgpr1 = COPY %1
...
---
name: test_zextload_flat_i64_i32
body: |
bb.0:
liveins: $vgpr0_vgpr1
; SI-LABEL: name: test_zextload_flat_i64_i32
; SI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; SI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
; SI: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[LOAD]](s32)
; SI: $vgpr0_vgpr1 = COPY [[ZEXT]](s64)
; VI-LABEL: name: test_zextload_flat_i64_i32
; VI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
; VI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
; VI: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[LOAD]](s32)
; VI: $vgpr0_vgpr1 = COPY [[ZEXT]](s64)
%0:_(p0) = COPY $vgpr0_vgpr1
%1:_(s64) = G_ZEXTLOAD %0 :: (load (s32), addrspace 0)
$vgpr0_vgpr1 = COPY %1
...