mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 18:54:02 +01:00
196e7f3138
Replace individual operands GLC, SLC, and DLC with a single cache_policy bitmask operand. This will reduce the number of operands in MIR and I hope the amount of code. These operands are mostly 0 anyway. Additional advantage that parser will accept these flags in any order unlike now. Differential Revision: https://reviews.llvm.org/D96469
137 lines
5.4 KiB
YAML
137 lines
5.4 KiB
YAML
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
|
|
# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -run-pass=instruction-select -verify-machineinstrs -o - %s | FileCheck -check-prefix=GFX7 %s
|
|
|
|
---
|
|
|
|
name: fminnum_ieee_f32_f64_ieee_mode_on
|
|
legalized: true
|
|
regBankSelected: true
|
|
machineFunctionInfo:
|
|
mode:
|
|
ieee: true
|
|
|
|
body: |
|
|
bb.0:
|
|
liveins: $sgpr0, $vgpr0, $vgpr1, $vgpr3_vgpr4, $sgpr10_sgpr11, $vgpr10_vgpr11, $vgpr12_vgpr13
|
|
; GFX7-LABEL: name: fminnum_ieee_f32_f64_ieee_mode_on
|
|
; GFX7: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
|
|
; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
; GFX7: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
|
|
; GFX7: [[COPY3:%[0-9]+]]:vreg_64 = COPY $vgpr3_vgpr4
|
|
; GFX7: [[COPY4:%[0-9]+]]:sreg_64 = COPY $sgpr10_sgpr11
|
|
; GFX7: [[COPY5:%[0-9]+]]:vreg_64 = COPY $vgpr10_vgpr11
|
|
; GFX7: [[COPY6:%[0-9]+]]:vreg_64 = COPY $vgpr12_vgpr13
|
|
; GFX7: %7:vgpr_32 = nofpexcept V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
|
|
; GFX7: %8:vgpr_32 = nofpexcept V_MIN_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
|
|
; GFX7: %9:vgpr_32 = nofpexcept V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
|
|
; GFX7: FLAT_STORE_DWORD [[COPY3]], %7, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
|
|
; GFX7: FLAT_STORE_DWORD [[COPY3]], %8, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
|
|
; GFX7: FLAT_STORE_DWORD [[COPY3]], %9, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
|
|
; GFX7: %10:vreg_64 = nofpexcept V_MIN_F64_e64 0, [[COPY4]], 0, [[COPY5]], 0, 0, implicit $mode, implicit $exec
|
|
; GFX7: %11:vreg_64 = nofpexcept V_MIN_F64_e64 0, [[COPY5]], 0, [[COPY4]], 0, 0, implicit $mode, implicit $exec
|
|
; GFX7: %12:vreg_64 = nofpexcept V_MIN_F64_e64 0, [[COPY5]], 0, [[COPY6]], 0, 0, implicit $mode, implicit $exec
|
|
; GFX7: S_ENDPGM 0, implicit %10, implicit %11, implicit %12
|
|
%0:sgpr(s32) = COPY $sgpr0
|
|
%1:vgpr(s32) = COPY $vgpr0
|
|
%2:vgpr(s32) = COPY $vgpr1
|
|
%3:vgpr(p1) = COPY $vgpr3_vgpr4
|
|
|
|
%10:sgpr(s64) = COPY $sgpr10_sgpr11
|
|
%11:vgpr(s64) = COPY $vgpr10_vgpr11
|
|
%12:vgpr(s64) = COPY $vgpr12_vgpr13
|
|
|
|
; minnum_ieee vs
|
|
%4:vgpr(s32) = G_FMINNUM_IEEE %1, %0
|
|
|
|
; minnum_ieee sv
|
|
%5:vgpr(s32) = G_FMINNUM_IEEE %0, %1
|
|
|
|
; minnum_ieee vv
|
|
%6:vgpr(s32) = G_FMINNUM_IEEE %1, %2
|
|
|
|
G_STORE %4, %3 :: (store 4, addrspace 1)
|
|
G_STORE %5, %3 :: (store 4, addrspace 1)
|
|
G_STORE %6, %3 :: (store 4, addrspace 1)
|
|
|
|
; 64-bit
|
|
|
|
; minnum_ieee vs
|
|
%14:vgpr(s64) = G_FMINNUM_IEEE %10, %11
|
|
|
|
; minnum_ieee sv
|
|
%15:vgpr(s64) = G_FMINNUM_IEEE %11, %10
|
|
|
|
; minnum_ieee vv
|
|
%16:vgpr(s64) = G_FMINNUM_IEEE %11, %12
|
|
|
|
S_ENDPGM 0, implicit %14, implicit %15, implicit %16
|
|
...
|
|
|
|
# FIXME: Ideally this would fail to select with ieee mode disabled
|
|
|
|
---
|
|
|
|
name: fminnum_ieee_f32_f64_ieee_mode_off
|
|
legalized: true
|
|
regBankSelected: true
|
|
machineFunctionInfo:
|
|
mode:
|
|
ieee: false
|
|
|
|
body: |
|
|
bb.0:
|
|
liveins: $sgpr0, $vgpr0, $vgpr1, $vgpr3_vgpr4, $sgpr10_sgpr11, $vgpr10_vgpr11, $vgpr12_vgpr13
|
|
; GFX7-LABEL: name: fminnum_ieee_f32_f64_ieee_mode_off
|
|
; GFX7: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
|
|
; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
; GFX7: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
|
|
; GFX7: [[COPY3:%[0-9]+]]:vreg_64 = COPY $vgpr3_vgpr4
|
|
; GFX7: [[COPY4:%[0-9]+]]:sreg_64 = COPY $sgpr10_sgpr11
|
|
; GFX7: [[COPY5:%[0-9]+]]:vreg_64 = COPY $vgpr10_vgpr11
|
|
; GFX7: [[COPY6:%[0-9]+]]:vreg_64 = COPY $vgpr12_vgpr13
|
|
; GFX7: %7:vgpr_32 = nofpexcept V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
|
|
; GFX7: %8:vgpr_32 = nofpexcept V_MIN_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
|
|
; GFX7: %9:vgpr_32 = nofpexcept V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
|
|
; GFX7: FLAT_STORE_DWORD [[COPY3]], %7, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
|
|
; GFX7: FLAT_STORE_DWORD [[COPY3]], %8, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
|
|
; GFX7: FLAT_STORE_DWORD [[COPY3]], %9, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
|
|
; GFX7: %10:vreg_64 = nofpexcept V_MIN_F64_e64 0, [[COPY4]], 0, [[COPY5]], 0, 0, implicit $mode, implicit $exec
|
|
; GFX7: %11:vreg_64 = nofpexcept V_MIN_F64_e64 0, [[COPY5]], 0, [[COPY4]], 0, 0, implicit $mode, implicit $exec
|
|
; GFX7: %12:vreg_64 = nofpexcept V_MIN_F64_e64 0, [[COPY5]], 0, [[COPY6]], 0, 0, implicit $mode, implicit $exec
|
|
; GFX7: S_ENDPGM 0, implicit %10, implicit %11, implicit %12
|
|
%0:sgpr(s32) = COPY $sgpr0
|
|
%1:vgpr(s32) = COPY $vgpr0
|
|
%2:vgpr(s32) = COPY $vgpr1
|
|
%3:vgpr(p1) = COPY $vgpr3_vgpr4
|
|
|
|
%10:sgpr(s64) = COPY $sgpr10_sgpr11
|
|
%11:vgpr(s64) = COPY $vgpr10_vgpr11
|
|
%12:vgpr(s64) = COPY $vgpr12_vgpr13
|
|
|
|
; minnum_ieee vs
|
|
%4:vgpr(s32) = G_FMINNUM_IEEE %1, %0
|
|
|
|
; minnum_ieee sv
|
|
%5:vgpr(s32) = G_FMINNUM_IEEE %0, %1
|
|
|
|
; minnum_ieee vv
|
|
%6:vgpr(s32) = G_FMINNUM_IEEE %1, %2
|
|
|
|
G_STORE %4, %3 :: (store 4, addrspace 1)
|
|
G_STORE %5, %3 :: (store 4, addrspace 1)
|
|
G_STORE %6, %3 :: (store 4, addrspace 1)
|
|
|
|
; 64-bit
|
|
|
|
; minnum_ieee vs
|
|
%14:vgpr(s64) = G_FMINNUM_IEEE %10, %11
|
|
|
|
; minnum_ieee sv
|
|
%15:vgpr(s64) = G_FMINNUM_IEEE %11, %10
|
|
|
|
; minnum_ieee vv
|
|
%16:vgpr(s64) = G_FMINNUM_IEEE %11, %12
|
|
|
|
S_ENDPGM 0, implicit %14, implicit %15, implicit %16
|
|
...
|