mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-23 11:13:28 +01:00
d1f23a1772
Support for XNACK and SRAMECC is not static on some GPUs. We must be able to differentiate between different scenarios for these dynamic subtarget features. The possible settings are: - Unsupported: The GPU has no support for XNACK/SRAMECC. - Any: Preference is unspecified. Use conservative settings that can run anywhere. - Off: Request support for XNACK/SRAMECC Off - On: Request support for XNACK/SRAMECC On GCNSubtarget will track the four options based on the following criteria. If the subtarget does not support XNACK/SRAMECC we say the setting is "Unsupported". If no subtarget features for XNACK/SRAMECC are requested we must support "Any" mode. If the subtarget features XNACK/SRAMECC exist in the feature string when initializing the subtarget, the settings are "On/Off". The defaults are updated to be conservatively correct, meaning if no setting for XNACK or SRAMECC is explicitly requested, defaults will be used which generate code that can be run anywhere. This corresponds to the "Any" setting. Differential Revision: https://reviews.llvm.org/D85882
2006 lines
80 KiB
LLVM
2006 lines
80 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -amdgpu-scalarize-global-loads=false -enable-misched=0 -march=amdgcn -mattr=+mad-mac-f32-insts -verify-machineinstrs < %s | FileCheck --check-prefix=SI %s
|
|
; RUN: llc -amdgpu-scalarize-global-loads=false -enable-misched=0 -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck --check-prefix=CI %s
|
|
; RUN: llc -amdgpu-scalarize-global-loads=false -enable-misched=0 -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck --check-prefix=VI %s
|
|
|
|
define amdgpu_kernel void @frem_f16(half addrspace(1)* %out, half addrspace(1)* %in1,
|
|
; SI-LABEL: frem_f16:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: s_mov_b32 s10, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s8, s4
|
|
; SI-NEXT: s_mov_b32 s9, s5
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s7
|
|
; SI-NEXT: s_mov_b32 s6, s10
|
|
; SI-NEXT: s_mov_b32 s7, s11
|
|
; SI-NEXT: s_mov_b32 s2, s10
|
|
; SI-NEXT: s_mov_b32 s3, s11
|
|
; SI-NEXT: buffer_load_ushort v0, off, s[4:7], 0
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
|
|
; SI-NEXT: buffer_load_ushort v1, off, s[0:3], 0 offset:8
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
|
|
; SI-NEXT: v_div_scale_f32 v2, vcc, v0, v1, v0
|
|
; SI-NEXT: v_div_scale_f32 v3, s[0:1], v1, v1, v0
|
|
; SI-NEXT: v_rcp_f32_e32 v4, v3
|
|
; SI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
|
|
; SI-NEXT: v_fma_f32 v5, -v3, v4, 1.0
|
|
; SI-NEXT: v_fma_f32 v4, v5, v4, v4
|
|
; SI-NEXT: v_mul_f32_e32 v5, v2, v4
|
|
; SI-NEXT: v_fma_f32 v6, -v3, v5, v2
|
|
; SI-NEXT: v_fma_f32 v5, v6, v4, v5
|
|
; SI-NEXT: v_fma_f32 v2, -v3, v5, v2
|
|
; SI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
|
|
; SI-NEXT: v_div_fmas_f32 v2, v2, v4, v5
|
|
; SI-NEXT: v_div_fixup_f32 v2, v2, v1, v0
|
|
; SI-NEXT: v_trunc_f32_e32 v2, v2
|
|
; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
|
|
; SI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 2), 0
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; SI-NEXT: buffer_store_short v0, off, s[8:11], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; CI-LABEL: frem_f16:
|
|
; CI: ; %bb.0:
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
; CI-NEXT: s_mov_b32 s11, 0xf000
|
|
; CI-NEXT: s_mov_b32 s10, -1
|
|
; CI-NEXT: s_mov_b32 s2, s10
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: s_mov_b32 s8, s4
|
|
; CI-NEXT: s_mov_b32 s9, s5
|
|
; CI-NEXT: s_mov_b32 s4, s6
|
|
; CI-NEXT: s_mov_b32 s5, s7
|
|
; CI-NEXT: s_mov_b32 s3, s11
|
|
; CI-NEXT: s_mov_b32 s6, s10
|
|
; CI-NEXT: s_mov_b32 s7, s11
|
|
; CI-NEXT: buffer_load_ushort v0, off, s[4:7], 0
|
|
; CI-NEXT: buffer_load_ushort v1, off, s[0:3], 0 offset:8
|
|
; CI-NEXT: s_waitcnt vmcnt(1)
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
|
|
; CI-NEXT: s_waitcnt vmcnt(0)
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v1, v1
|
|
; CI-NEXT: v_div_scale_f32 v3, s[0:1], v1, v1, v0
|
|
; CI-NEXT: v_div_scale_f32 v2, vcc, v0, v1, v0
|
|
; CI-NEXT: v_rcp_f32_e32 v4, v3
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
|
|
; CI-NEXT: v_fma_f32 v5, -v3, v4, 1.0
|
|
; CI-NEXT: v_fma_f32 v4, v5, v4, v4
|
|
; CI-NEXT: v_mul_f32_e32 v5, v2, v4
|
|
; CI-NEXT: v_fma_f32 v6, -v3, v5, v2
|
|
; CI-NEXT: v_fma_f32 v5, v6, v4, v5
|
|
; CI-NEXT: v_fma_f32 v2, -v3, v5, v2
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
|
|
; CI-NEXT: v_div_fmas_f32 v2, v2, v4, v5
|
|
; CI-NEXT: v_div_fixup_f32 v2, v2, v1, v0
|
|
; CI-NEXT: v_trunc_f32_e32 v2, v2
|
|
; CI-NEXT: v_fma_f32 v0, -v2, v1, v0
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 2), 0
|
|
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; CI-NEXT: buffer_store_short v0, off, s[8:11], 0
|
|
; CI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: frem_f16:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
; VI-NEXT: s_add_u32 s0, s0, 8
|
|
; VI-NEXT: v_mov_b32_e32 v3, s7
|
|
; VI-NEXT: s_addc_u32 s1, s1, 0
|
|
; VI-NEXT: flat_load_ushort v4, v[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v3, s1
|
|
; VI-NEXT: v_mov_b32_e32 v2, s0
|
|
; VI-NEXT: flat_load_ushort v2, v[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: s_waitcnt vmcnt(1)
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v3, v4
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v5, v2
|
|
; VI-NEXT: v_rcp_f32_e32 v5, v5
|
|
; VI-NEXT: v_mul_f32_e32 v3, v3, v5
|
|
; VI-NEXT: v_cvt_f16_f32_e32 v3, v3
|
|
; VI-NEXT: v_div_fixup_f16 v3, v3, v2, v4
|
|
; VI-NEXT: v_trunc_f16_e32 v3, v3
|
|
; VI-NEXT: v_fma_f16 v2, -v3, v2, v4
|
|
; VI-NEXT: flat_store_short v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
half addrspace(1)* %in2) #0 {
|
|
%gep2 = getelementptr half, half addrspace(1)* %in2, i32 4
|
|
%r0 = load half, half addrspace(1)* %in1, align 4
|
|
%r1 = load half, half addrspace(1)* %gep2, align 4
|
|
%r2 = frem half %r0, %r1
|
|
store half %r2, half addrspace(1)* %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @fast_frem_f16(half addrspace(1)* %out, half addrspace(1)* %in1,
|
|
; SI-LABEL: fast_frem_f16:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: s_mov_b32 s10, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s8, s4
|
|
; SI-NEXT: s_mov_b32 s9, s5
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s7
|
|
; SI-NEXT: s_mov_b32 s6, s10
|
|
; SI-NEXT: s_mov_b32 s7, s11
|
|
; SI-NEXT: s_mov_b32 s2, s10
|
|
; SI-NEXT: s_mov_b32 s3, s11
|
|
; SI-NEXT: buffer_load_ushort v0, off, s[4:7], 0
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
|
|
; SI-NEXT: buffer_load_ushort v1, off, s[0:3], 0 offset:8
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
|
|
; SI-NEXT: v_rcp_f32_e32 v2, v1
|
|
; SI-NEXT: v_mul_f32_e32 v2, v0, v2
|
|
; SI-NEXT: v_trunc_f32_e32 v2, v2
|
|
; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; SI-NEXT: buffer_store_short v0, off, s[8:11], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; CI-LABEL: fast_frem_f16:
|
|
; CI: ; %bb.0:
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
; CI-NEXT: s_mov_b32 s11, 0xf000
|
|
; CI-NEXT: s_mov_b32 s10, -1
|
|
; CI-NEXT: s_mov_b32 s2, s10
|
|
; CI-NEXT: s_mov_b32 s3, s11
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: buffer_load_ushort v1, off, s[0:3], 0 offset:8
|
|
; CI-NEXT: s_mov_b32 s8, s4
|
|
; CI-NEXT: s_mov_b32 s9, s5
|
|
; CI-NEXT: s_mov_b32 s4, s6
|
|
; CI-NEXT: s_mov_b32 s5, s7
|
|
; CI-NEXT: s_mov_b32 s6, s10
|
|
; CI-NEXT: s_mov_b32 s7, s11
|
|
; CI-NEXT: buffer_load_ushort v0, off, s[4:7], 0
|
|
; CI-NEXT: s_waitcnt vmcnt(1)
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v1, v1
|
|
; CI-NEXT: v_rcp_f32_e32 v2, v1
|
|
; CI-NEXT: s_waitcnt vmcnt(0)
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
|
|
; CI-NEXT: v_mul_f32_e32 v2, v0, v2
|
|
; CI-NEXT: v_trunc_f32_e32 v2, v2
|
|
; CI-NEXT: v_fma_f32 v0, -v2, v1, v0
|
|
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; CI-NEXT: buffer_store_short v0, off, s[8:11], 0
|
|
; CI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: fast_frem_f16:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
; VI-NEXT: s_add_u32 s0, s0, 8
|
|
; VI-NEXT: v_mov_b32_e32 v3, s7
|
|
; VI-NEXT: s_addc_u32 s1, s1, 0
|
|
; VI-NEXT: flat_load_ushort v4, v[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v3, s1
|
|
; VI-NEXT: v_mov_b32_e32 v2, s0
|
|
; VI-NEXT: flat_load_ushort v2, v[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_rcp_f16_e32 v3, v2
|
|
; VI-NEXT: v_mul_f16_e32 v3, v4, v3
|
|
; VI-NEXT: v_trunc_f16_e32 v3, v3
|
|
; VI-NEXT: v_fma_f16 v2, -v3, v2, v4
|
|
; VI-NEXT: flat_store_short v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
half addrspace(1)* %in2) #0 {
|
|
%gep2 = getelementptr half, half addrspace(1)* %in2, i32 4
|
|
%r0 = load half, half addrspace(1)* %in1, align 4
|
|
%r1 = load half, half addrspace(1)* %gep2, align 4
|
|
%r2 = frem fast half %r0, %r1
|
|
store half %r2, half addrspace(1)* %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @unsafe_frem_f16(half addrspace(1)* %out, half addrspace(1)* %in1,
|
|
; SI-LABEL: unsafe_frem_f16:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: s_mov_b32 s10, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s8, s4
|
|
; SI-NEXT: s_mov_b32 s9, s5
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s7
|
|
; SI-NEXT: s_mov_b32 s6, s10
|
|
; SI-NEXT: s_mov_b32 s7, s11
|
|
; SI-NEXT: s_mov_b32 s2, s10
|
|
; SI-NEXT: s_mov_b32 s3, s11
|
|
; SI-NEXT: buffer_load_ushort v0, off, s[4:7], 0
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
|
|
; SI-NEXT: buffer_load_ushort v1, off, s[0:3], 0 offset:8
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
|
|
; SI-NEXT: v_rcp_f32_e32 v2, v1
|
|
; SI-NEXT: v_mul_f32_e32 v2, v0, v2
|
|
; SI-NEXT: v_trunc_f32_e32 v2, v2
|
|
; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; SI-NEXT: buffer_store_short v0, off, s[8:11], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; CI-LABEL: unsafe_frem_f16:
|
|
; CI: ; %bb.0:
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
; CI-NEXT: s_mov_b32 s11, 0xf000
|
|
; CI-NEXT: s_mov_b32 s10, -1
|
|
; CI-NEXT: s_mov_b32 s2, s10
|
|
; CI-NEXT: s_mov_b32 s3, s11
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: buffer_load_ushort v1, off, s[0:3], 0 offset:8
|
|
; CI-NEXT: s_mov_b32 s8, s4
|
|
; CI-NEXT: s_mov_b32 s9, s5
|
|
; CI-NEXT: s_mov_b32 s4, s6
|
|
; CI-NEXT: s_mov_b32 s5, s7
|
|
; CI-NEXT: s_mov_b32 s6, s10
|
|
; CI-NEXT: s_mov_b32 s7, s11
|
|
; CI-NEXT: buffer_load_ushort v0, off, s[4:7], 0
|
|
; CI-NEXT: s_waitcnt vmcnt(1)
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v1, v1
|
|
; CI-NEXT: v_rcp_f32_e32 v2, v1
|
|
; CI-NEXT: s_waitcnt vmcnt(0)
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
|
|
; CI-NEXT: v_mul_f32_e32 v2, v0, v2
|
|
; CI-NEXT: v_trunc_f32_e32 v2, v2
|
|
; CI-NEXT: v_fma_f32 v0, -v2, v1, v0
|
|
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; CI-NEXT: buffer_store_short v0, off, s[8:11], 0
|
|
; CI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: unsafe_frem_f16:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
; VI-NEXT: s_add_u32 s0, s0, 8
|
|
; VI-NEXT: v_mov_b32_e32 v3, s7
|
|
; VI-NEXT: s_addc_u32 s1, s1, 0
|
|
; VI-NEXT: flat_load_ushort v4, v[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v3, s1
|
|
; VI-NEXT: v_mov_b32_e32 v2, s0
|
|
; VI-NEXT: flat_load_ushort v2, v[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_rcp_f16_e32 v3, v2
|
|
; VI-NEXT: v_mul_f16_e32 v3, v4, v3
|
|
; VI-NEXT: v_trunc_f16_e32 v3, v3
|
|
; VI-NEXT: v_fma_f16 v2, -v3, v2, v4
|
|
; VI-NEXT: flat_store_short v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
half addrspace(1)* %in2) #1 {
|
|
%gep2 = getelementptr half, half addrspace(1)* %in2, i32 4
|
|
%r0 = load half, half addrspace(1)* %in1, align 4
|
|
%r1 = load half, half addrspace(1)* %gep2, align 4
|
|
%r2 = frem afn half %r0, %r1
|
|
store half %r2, half addrspace(1)* %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @frem_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
|
|
; SI-LABEL: frem_f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: s_mov_b32 s10, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s8, s4
|
|
; SI-NEXT: s_mov_b32 s9, s5
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s7
|
|
; SI-NEXT: s_mov_b32 s6, s10
|
|
; SI-NEXT: s_mov_b32 s7, s11
|
|
; SI-NEXT: s_mov_b32 s2, s10
|
|
; SI-NEXT: s_mov_b32 s3, s11
|
|
; SI-NEXT: buffer_load_dword v0, off, s[4:7], 0
|
|
; SI-NEXT: buffer_load_dword v1, off, s[0:3], 0 offset:16
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_div_scale_f32 v2, vcc, v0, v1, v0
|
|
; SI-NEXT: v_div_scale_f32 v3, s[0:1], v1, v1, v0
|
|
; SI-NEXT: v_rcp_f32_e32 v4, v3
|
|
; SI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
|
|
; SI-NEXT: v_fma_f32 v5, -v3, v4, 1.0
|
|
; SI-NEXT: v_fma_f32 v4, v5, v4, v4
|
|
; SI-NEXT: v_mul_f32_e32 v5, v2, v4
|
|
; SI-NEXT: v_fma_f32 v6, -v3, v5, v2
|
|
; SI-NEXT: v_fma_f32 v5, v6, v4, v5
|
|
; SI-NEXT: v_fma_f32 v2, -v3, v5, v2
|
|
; SI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
|
|
; SI-NEXT: v_div_fmas_f32 v2, v2, v4, v5
|
|
; SI-NEXT: v_div_fixup_f32 v2, v2, v1, v0
|
|
; SI-NEXT: v_trunc_f32_e32 v2, v2
|
|
; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
|
|
; SI-NEXT: buffer_store_dword v0, off, s[8:11], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; CI-LABEL: frem_f32:
|
|
; CI: ; %bb.0:
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
; CI-NEXT: s_mov_b32 s11, 0xf000
|
|
; CI-NEXT: s_mov_b32 s10, -1
|
|
; CI-NEXT: s_mov_b32 s2, s10
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: s_mov_b32 s8, s4
|
|
; CI-NEXT: s_mov_b32 s9, s5
|
|
; CI-NEXT: s_mov_b32 s4, s6
|
|
; CI-NEXT: s_mov_b32 s5, s7
|
|
; CI-NEXT: s_mov_b32 s6, s10
|
|
; CI-NEXT: s_mov_b32 s7, s11
|
|
; CI-NEXT: s_mov_b32 s3, s11
|
|
; CI-NEXT: buffer_load_dword v0, off, s[4:7], 0
|
|
; CI-NEXT: buffer_load_dword v1, off, s[0:3], 0 offset:16
|
|
; CI-NEXT: s_waitcnt vmcnt(0)
|
|
; CI-NEXT: v_div_scale_f32 v3, s[0:1], v1, v1, v0
|
|
; CI-NEXT: v_div_scale_f32 v2, vcc, v0, v1, v0
|
|
; CI-NEXT: v_rcp_f32_e32 v4, v3
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
|
|
; CI-NEXT: v_fma_f32 v5, -v3, v4, 1.0
|
|
; CI-NEXT: v_fma_f32 v4, v5, v4, v4
|
|
; CI-NEXT: v_mul_f32_e32 v5, v2, v4
|
|
; CI-NEXT: v_fma_f32 v6, -v3, v5, v2
|
|
; CI-NEXT: v_fma_f32 v5, v6, v4, v5
|
|
; CI-NEXT: v_fma_f32 v2, -v3, v5, v2
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
|
|
; CI-NEXT: v_div_fmas_f32 v2, v2, v4, v5
|
|
; CI-NEXT: v_div_fixup_f32 v2, v2, v1, v0
|
|
; CI-NEXT: v_trunc_f32_e32 v2, v2
|
|
; CI-NEXT: v_fma_f32 v0, -v2, v1, v0
|
|
; CI-NEXT: buffer_store_dword v0, off, s[8:11], 0
|
|
; CI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: frem_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
; VI-NEXT: s_add_u32 s0, s0, 16
|
|
; VI-NEXT: v_mov_b32_e32 v3, s7
|
|
; VI-NEXT: s_addc_u32 s1, s1, 0
|
|
; VI-NEXT: flat_load_dword v4, v[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v3, s1
|
|
; VI-NEXT: v_mov_b32_e32 v2, s0
|
|
; VI-NEXT: flat_load_dword v2, v[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_div_scale_f32 v5, s[0:1], v2, v2, v4
|
|
; VI-NEXT: v_div_scale_f32 v3, vcc, v4, v2, v4
|
|
; VI-NEXT: v_rcp_f32_e32 v6, v5
|
|
; VI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
|
|
; VI-NEXT: v_fma_f32 v7, -v5, v6, 1.0
|
|
; VI-NEXT: v_fma_f32 v6, v7, v6, v6
|
|
; VI-NEXT: v_mul_f32_e32 v7, v3, v6
|
|
; VI-NEXT: v_fma_f32 v8, -v5, v7, v3
|
|
; VI-NEXT: v_fma_f32 v7, v8, v6, v7
|
|
; VI-NEXT: v_fma_f32 v3, -v5, v7, v3
|
|
; VI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
|
|
; VI-NEXT: v_div_fmas_f32 v3, v3, v6, v7
|
|
; VI-NEXT: v_div_fixup_f32 v3, v3, v2, v4
|
|
; VI-NEXT: v_trunc_f32_e32 v3, v3
|
|
; VI-NEXT: v_fma_f32 v2, -v3, v2, v4
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
float addrspace(1)* %in2) #0 {
|
|
%gep2 = getelementptr float, float addrspace(1)* %in2, i32 4
|
|
%r0 = load float, float addrspace(1)* %in1, align 4
|
|
%r1 = load float, float addrspace(1)* %gep2, align 4
|
|
%r2 = frem float %r0, %r1
|
|
store float %r2, float addrspace(1)* %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @fast_frem_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
|
|
; SI-LABEL: fast_frem_f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: s_mov_b32 s10, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s8, s4
|
|
; SI-NEXT: s_mov_b32 s9, s5
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s7
|
|
; SI-NEXT: s_mov_b32 s6, s10
|
|
; SI-NEXT: s_mov_b32 s7, s11
|
|
; SI-NEXT: s_mov_b32 s2, s10
|
|
; SI-NEXT: s_mov_b32 s3, s11
|
|
; SI-NEXT: buffer_load_dword v0, off, s[4:7], 0
|
|
; SI-NEXT: buffer_load_dword v1, off, s[0:3], 0 offset:16
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_rcp_f32_e32 v2, v1
|
|
; SI-NEXT: v_mul_f32_e32 v2, v0, v2
|
|
; SI-NEXT: v_trunc_f32_e32 v2, v2
|
|
; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
|
|
; SI-NEXT: buffer_store_dword v0, off, s[8:11], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; CI-LABEL: fast_frem_f32:
|
|
; CI: ; %bb.0:
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
; CI-NEXT: s_mov_b32 s11, 0xf000
|
|
; CI-NEXT: s_mov_b32 s10, -1
|
|
; CI-NEXT: s_mov_b32 s2, s10
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: s_mov_b32 s8, s4
|
|
; CI-NEXT: s_mov_b32 s9, s5
|
|
; CI-NEXT: s_mov_b32 s4, s6
|
|
; CI-NEXT: s_mov_b32 s5, s7
|
|
; CI-NEXT: s_mov_b32 s6, s10
|
|
; CI-NEXT: s_mov_b32 s7, s11
|
|
; CI-NEXT: s_mov_b32 s3, s11
|
|
; CI-NEXT: buffer_load_dword v0, off, s[4:7], 0
|
|
; CI-NEXT: buffer_load_dword v1, off, s[0:3], 0 offset:16
|
|
; CI-NEXT: s_waitcnt vmcnt(0)
|
|
; CI-NEXT: v_rcp_f32_e32 v2, v1
|
|
; CI-NEXT: v_mul_f32_e32 v2, v0, v2
|
|
; CI-NEXT: v_trunc_f32_e32 v2, v2
|
|
; CI-NEXT: v_fma_f32 v0, -v2, v1, v0
|
|
; CI-NEXT: buffer_store_dword v0, off, s[8:11], 0
|
|
; CI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: fast_frem_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
; VI-NEXT: s_add_u32 s0, s0, 16
|
|
; VI-NEXT: v_mov_b32_e32 v3, s7
|
|
; VI-NEXT: s_addc_u32 s1, s1, 0
|
|
; VI-NEXT: flat_load_dword v4, v[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v3, s1
|
|
; VI-NEXT: v_mov_b32_e32 v2, s0
|
|
; VI-NEXT: flat_load_dword v2, v[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_rcp_f32_e32 v3, v2
|
|
; VI-NEXT: v_mul_f32_e32 v3, v4, v3
|
|
; VI-NEXT: v_trunc_f32_e32 v3, v3
|
|
; VI-NEXT: v_fma_f32 v2, -v3, v2, v4
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
float addrspace(1)* %in2) #0 {
|
|
%gep2 = getelementptr float, float addrspace(1)* %in2, i32 4
|
|
%r0 = load float, float addrspace(1)* %in1, align 4
|
|
%r1 = load float, float addrspace(1)* %gep2, align 4
|
|
%r2 = frem fast float %r0, %r1
|
|
store float %r2, float addrspace(1)* %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @unsafe_frem_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
|
|
; SI-LABEL: unsafe_frem_f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: s_mov_b32 s10, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s8, s4
|
|
; SI-NEXT: s_mov_b32 s9, s5
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s7
|
|
; SI-NEXT: s_mov_b32 s6, s10
|
|
; SI-NEXT: s_mov_b32 s7, s11
|
|
; SI-NEXT: s_mov_b32 s2, s10
|
|
; SI-NEXT: s_mov_b32 s3, s11
|
|
; SI-NEXT: buffer_load_dword v0, off, s[4:7], 0
|
|
; SI-NEXT: buffer_load_dword v1, off, s[0:3], 0 offset:16
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_rcp_f32_e32 v2, v1
|
|
; SI-NEXT: v_mul_f32_e32 v2, v0, v2
|
|
; SI-NEXT: v_trunc_f32_e32 v2, v2
|
|
; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
|
|
; SI-NEXT: buffer_store_dword v0, off, s[8:11], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; CI-LABEL: unsafe_frem_f32:
|
|
; CI: ; %bb.0:
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
; CI-NEXT: s_mov_b32 s11, 0xf000
|
|
; CI-NEXT: s_mov_b32 s10, -1
|
|
; CI-NEXT: s_mov_b32 s2, s10
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: s_mov_b32 s8, s4
|
|
; CI-NEXT: s_mov_b32 s9, s5
|
|
; CI-NEXT: s_mov_b32 s4, s6
|
|
; CI-NEXT: s_mov_b32 s5, s7
|
|
; CI-NEXT: s_mov_b32 s6, s10
|
|
; CI-NEXT: s_mov_b32 s7, s11
|
|
; CI-NEXT: s_mov_b32 s3, s11
|
|
; CI-NEXT: buffer_load_dword v0, off, s[4:7], 0
|
|
; CI-NEXT: buffer_load_dword v1, off, s[0:3], 0 offset:16
|
|
; CI-NEXT: s_waitcnt vmcnt(0)
|
|
; CI-NEXT: v_rcp_f32_e32 v2, v1
|
|
; CI-NEXT: v_mul_f32_e32 v2, v0, v2
|
|
; CI-NEXT: v_trunc_f32_e32 v2, v2
|
|
; CI-NEXT: v_fma_f32 v0, -v2, v1, v0
|
|
; CI-NEXT: buffer_store_dword v0, off, s[8:11], 0
|
|
; CI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: unsafe_frem_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
; VI-NEXT: s_add_u32 s0, s0, 16
|
|
; VI-NEXT: v_mov_b32_e32 v3, s7
|
|
; VI-NEXT: s_addc_u32 s1, s1, 0
|
|
; VI-NEXT: flat_load_dword v4, v[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v3, s1
|
|
; VI-NEXT: v_mov_b32_e32 v2, s0
|
|
; VI-NEXT: flat_load_dword v2, v[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_rcp_f32_e32 v3, v2
|
|
; VI-NEXT: v_mul_f32_e32 v3, v4, v3
|
|
; VI-NEXT: v_trunc_f32_e32 v3, v3
|
|
; VI-NEXT: v_fma_f32 v2, -v3, v2, v4
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
float addrspace(1)* %in2) #1 {
|
|
%gep2 = getelementptr float, float addrspace(1)* %in2, i32 4
|
|
%r0 = load float, float addrspace(1)* %in1, align 4
|
|
%r1 = load float, float addrspace(1)* %gep2, align 4
|
|
%r2 = frem afn float %r0, %r1
|
|
store float %r2, float addrspace(1)* %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @frem_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
|
|
; SI-LABEL: frem_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[8:11], s[0:1], 0x9
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s4, s8
|
|
; SI-NEXT: s_mov_b32 s5, s9
|
|
; SI-NEXT: s_mov_b32 s8, s10
|
|
; SI-NEXT: s_mov_b32 s9, s11
|
|
; SI-NEXT: s_mov_b32 s10, s6
|
|
; SI-NEXT: s_mov_b32 s11, s7
|
|
; SI-NEXT: s_mov_b32 s2, s6
|
|
; SI-NEXT: s_mov_b32 s3, s7
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
|
|
; SI-NEXT: buffer_load_dwordx2 v[2:3], off, s[0:3], 0
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_div_scale_f64 v[4:5], s[0:1], v[2:3], v[2:3], v[0:1]
|
|
; SI-NEXT: v_rcp_f64_e32 v[6:7], v[4:5]
|
|
; SI-NEXT: v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
|
|
; SI-NEXT: v_fma_f64 v[6:7], v[6:7], v[8:9], v[6:7]
|
|
; SI-NEXT: v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
|
|
; SI-NEXT: v_fma_f64 v[6:7], v[6:7], v[8:9], v[6:7]
|
|
; SI-NEXT: v_div_scale_f64 v[8:9], s[0:1], v[0:1], v[2:3], v[0:1]
|
|
; SI-NEXT: v_mul_f64 v[10:11], v[8:9], v[6:7]
|
|
; SI-NEXT: v_fma_f64 v[12:13], -v[4:5], v[10:11], v[8:9]
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
|
|
; SI-NEXT: v_cmp_eq_u32_e64 s[0:1], v1, v9
|
|
; SI-NEXT: s_xor_b64 vcc, s[0:1], vcc
|
|
; SI-NEXT: s_nop 1
|
|
; SI-NEXT: v_div_fmas_f64 v[4:5], v[12:13], v[6:7], v[10:11]
|
|
; SI-NEXT: v_div_fixup_f64 v[4:5], v[4:5], v[2:3], v[0:1]
|
|
; SI-NEXT: v_bfe_u32 v6, v5, 20, 11
|
|
; SI-NEXT: v_add_i32_e32 v8, vcc, 0xfffffc01, v6
|
|
; SI-NEXT: s_mov_b32 s1, 0xfffff
|
|
; SI-NEXT: s_mov_b32 s0, s6
|
|
; SI-NEXT: v_lshr_b64 v[6:7], s[0:1], v8
|
|
; SI-NEXT: v_not_b32_e32 v6, v6
|
|
; SI-NEXT: v_and_b32_e32 v6, v4, v6
|
|
; SI-NEXT: v_not_b32_e32 v7, v7
|
|
; SI-NEXT: v_and_b32_e32 v7, v5, v7
|
|
; SI-NEXT: v_and_b32_e32 v9, 0x80000000, v5
|
|
; SI-NEXT: v_cmp_gt_i32_e32 vcc, 0, v8
|
|
; SI-NEXT: v_cndmask_b32_e32 v7, v7, v9, vcc
|
|
; SI-NEXT: v_cmp_lt_i32_e64 s[0:1], 51, v8
|
|
; SI-NEXT: v_cndmask_b32_e64 v5, v7, v5, s[0:1]
|
|
; SI-NEXT: v_cndmask_b32_e64 v6, v6, 0, vcc
|
|
; SI-NEXT: v_cndmask_b32_e64 v4, v6, v4, s[0:1]
|
|
; SI-NEXT: v_fma_f64 v[0:1], -v[4:5], v[2:3], v[0:1]
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; CI-LABEL: frem_f64:
|
|
; CI: ; %bb.0:
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
; CI-NEXT: s_mov_b32 s11, 0xf000
|
|
; CI-NEXT: s_mov_b32 s10, -1
|
|
; CI-NEXT: s_mov_b32 s2, s10
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: s_mov_b32 s8, s4
|
|
; CI-NEXT: s_mov_b32 s9, s5
|
|
; CI-NEXT: s_mov_b32 s4, s6
|
|
; CI-NEXT: s_mov_b32 s5, s7
|
|
; CI-NEXT: s_mov_b32 s6, s10
|
|
; CI-NEXT: s_mov_b32 s7, s11
|
|
; CI-NEXT: s_mov_b32 s3, s11
|
|
; CI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
|
|
; CI-NEXT: buffer_load_dwordx2 v[2:3], off, s[0:3], 0
|
|
; CI-NEXT: s_waitcnt vmcnt(0)
|
|
; CI-NEXT: v_div_scale_f64 v[4:5], s[0:1], v[2:3], v[2:3], v[0:1]
|
|
; CI-NEXT: v_rcp_f64_e32 v[6:7], v[4:5]
|
|
; CI-NEXT: v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
|
|
; CI-NEXT: v_fma_f64 v[6:7], v[6:7], v[8:9], v[6:7]
|
|
; CI-NEXT: v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
|
|
; CI-NEXT: v_fma_f64 v[6:7], v[6:7], v[8:9], v[6:7]
|
|
; CI-NEXT: v_div_scale_f64 v[8:9], vcc, v[0:1], v[2:3], v[0:1]
|
|
; CI-NEXT: v_mul_f64 v[10:11], v[8:9], v[6:7]
|
|
; CI-NEXT: v_fma_f64 v[4:5], -v[4:5], v[10:11], v[8:9]
|
|
; CI-NEXT: s_nop 1
|
|
; CI-NEXT: v_div_fmas_f64 v[4:5], v[4:5], v[6:7], v[10:11]
|
|
; CI-NEXT: v_div_fixup_f64 v[4:5], v[4:5], v[2:3], v[0:1]
|
|
; CI-NEXT: v_trunc_f64_e32 v[4:5], v[4:5]
|
|
; CI-NEXT: v_fma_f64 v[0:1], -v[4:5], v[2:3], v[0:1]
|
|
; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
|
|
; CI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: frem_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
; VI-NEXT: v_mov_b32_e32 v3, s7
|
|
; VI-NEXT: v_mov_b32_e32 v4, s0
|
|
; VI-NEXT: v_mov_b32_e32 v5, s1
|
|
; VI-NEXT: flat_load_dwordx2 v[2:3], v[2:3]
|
|
; VI-NEXT: flat_load_dwordx2 v[4:5], v[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_div_scale_f64 v[6:7], s[0:1], v[4:5], v[4:5], v[2:3]
|
|
; VI-NEXT: v_rcp_f64_e32 v[8:9], v[6:7]
|
|
; VI-NEXT: v_fma_f64 v[10:11], -v[6:7], v[8:9], 1.0
|
|
; VI-NEXT: v_fma_f64 v[8:9], v[8:9], v[10:11], v[8:9]
|
|
; VI-NEXT: v_fma_f64 v[10:11], -v[6:7], v[8:9], 1.0
|
|
; VI-NEXT: v_fma_f64 v[8:9], v[8:9], v[10:11], v[8:9]
|
|
; VI-NEXT: v_div_scale_f64 v[10:11], vcc, v[2:3], v[4:5], v[2:3]
|
|
; VI-NEXT: v_mul_f64 v[12:13], v[10:11], v[8:9]
|
|
; VI-NEXT: v_fma_f64 v[6:7], -v[6:7], v[12:13], v[10:11]
|
|
; VI-NEXT: s_nop 1
|
|
; VI-NEXT: v_div_fmas_f64 v[6:7], v[6:7], v[8:9], v[12:13]
|
|
; VI-NEXT: v_div_fixup_f64 v[6:7], v[6:7], v[4:5], v[2:3]
|
|
; VI-NEXT: v_trunc_f64_e32 v[6:7], v[6:7]
|
|
; VI-NEXT: v_fma_f64 v[2:3], -v[6:7], v[4:5], v[2:3]
|
|
; VI-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_endpgm
|
|
double addrspace(1)* %in2) #0 {
|
|
%r0 = load double, double addrspace(1)* %in1, align 8
|
|
%r1 = load double, double addrspace(1)* %in2, align 8
|
|
%r2 = frem double %r0, %r1
|
|
store double %r2, double addrspace(1)* %out, align 8
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @fast_frem_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
|
|
; SI-LABEL: fast_frem_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: s_mov_b32 s10, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s8, s4
|
|
; SI-NEXT: s_mov_b32 s9, s5
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s7
|
|
; SI-NEXT: s_mov_b32 s6, s10
|
|
; SI-NEXT: s_mov_b32 s7, s11
|
|
; SI-NEXT: s_mov_b32 s2, s10
|
|
; SI-NEXT: s_mov_b32 s3, s11
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
|
|
; SI-NEXT: buffer_load_dwordx2 v[2:3], off, s[0:3], 0
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_rcp_f64_e32 v[4:5], v[2:3]
|
|
; SI-NEXT: v_fma_f64 v[6:7], -v[2:3], v[4:5], 1.0
|
|
; SI-NEXT: v_fma_f64 v[4:5], v[6:7], v[4:5], v[4:5]
|
|
; SI-NEXT: v_fma_f64 v[6:7], -v[2:3], v[4:5], 1.0
|
|
; SI-NEXT: v_fma_f64 v[4:5], v[6:7], v[4:5], v[4:5]
|
|
; SI-NEXT: v_mul_f64 v[6:7], v[0:1], v[4:5]
|
|
; SI-NEXT: v_fma_f64 v[8:9], -v[2:3], v[6:7], v[0:1]
|
|
; SI-NEXT: v_fma_f64 v[4:5], v[8:9], v[4:5], v[6:7]
|
|
; SI-NEXT: v_bfe_u32 v6, v5, 20, 11
|
|
; SI-NEXT: v_add_i32_e32 v8, vcc, 0xfffffc01, v6
|
|
; SI-NEXT: s_mov_b32 s1, 0xfffff
|
|
; SI-NEXT: s_mov_b32 s0, s10
|
|
; SI-NEXT: v_lshr_b64 v[6:7], s[0:1], v8
|
|
; SI-NEXT: v_not_b32_e32 v6, v6
|
|
; SI-NEXT: v_and_b32_e32 v6, v4, v6
|
|
; SI-NEXT: v_not_b32_e32 v7, v7
|
|
; SI-NEXT: v_and_b32_e32 v7, v5, v7
|
|
; SI-NEXT: v_and_b32_e32 v9, 0x80000000, v5
|
|
; SI-NEXT: v_cmp_gt_i32_e32 vcc, 0, v8
|
|
; SI-NEXT: v_cndmask_b32_e32 v7, v7, v9, vcc
|
|
; SI-NEXT: v_cmp_lt_i32_e64 s[0:1], 51, v8
|
|
; SI-NEXT: v_cndmask_b32_e64 v5, v7, v5, s[0:1]
|
|
; SI-NEXT: v_cndmask_b32_e64 v6, v6, 0, vcc
|
|
; SI-NEXT: v_cndmask_b32_e64 v4, v6, v4, s[0:1]
|
|
; SI-NEXT: v_fma_f64 v[0:1], -v[4:5], v[2:3], v[0:1]
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; CI-LABEL: fast_frem_f64:
|
|
; CI: ; %bb.0:
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
; CI-NEXT: s_mov_b32 s11, 0xf000
|
|
; CI-NEXT: s_mov_b32 s10, -1
|
|
; CI-NEXT: s_mov_b32 s2, s10
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: s_mov_b32 s8, s4
|
|
; CI-NEXT: s_mov_b32 s9, s5
|
|
; CI-NEXT: s_mov_b32 s4, s6
|
|
; CI-NEXT: s_mov_b32 s5, s7
|
|
; CI-NEXT: s_mov_b32 s6, s10
|
|
; CI-NEXT: s_mov_b32 s7, s11
|
|
; CI-NEXT: s_mov_b32 s3, s11
|
|
; CI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
|
|
; CI-NEXT: buffer_load_dwordx2 v[2:3], off, s[0:3], 0
|
|
; CI-NEXT: s_waitcnt vmcnt(0)
|
|
; CI-NEXT: v_rcp_f64_e32 v[4:5], v[2:3]
|
|
; CI-NEXT: v_fma_f64 v[6:7], -v[2:3], v[4:5], 1.0
|
|
; CI-NEXT: v_fma_f64 v[4:5], v[6:7], v[4:5], v[4:5]
|
|
; CI-NEXT: v_fma_f64 v[6:7], -v[2:3], v[4:5], 1.0
|
|
; CI-NEXT: v_fma_f64 v[4:5], v[6:7], v[4:5], v[4:5]
|
|
; CI-NEXT: v_mul_f64 v[6:7], v[0:1], v[4:5]
|
|
; CI-NEXT: v_fma_f64 v[8:9], -v[2:3], v[6:7], v[0:1]
|
|
; CI-NEXT: v_fma_f64 v[4:5], v[8:9], v[4:5], v[6:7]
|
|
; CI-NEXT: v_trunc_f64_e32 v[4:5], v[4:5]
|
|
; CI-NEXT: v_fma_f64 v[0:1], -v[4:5], v[2:3], v[0:1]
|
|
; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
|
|
; CI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: fast_frem_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
; VI-NEXT: v_mov_b32_e32 v3, s7
|
|
; VI-NEXT: v_mov_b32_e32 v4, s0
|
|
; VI-NEXT: v_mov_b32_e32 v5, s1
|
|
; VI-NEXT: flat_load_dwordx2 v[2:3], v[2:3]
|
|
; VI-NEXT: flat_load_dwordx2 v[4:5], v[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_rcp_f64_e32 v[6:7], v[4:5]
|
|
; VI-NEXT: v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
|
|
; VI-NEXT: v_fma_f64 v[6:7], v[8:9], v[6:7], v[6:7]
|
|
; VI-NEXT: v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
|
|
; VI-NEXT: v_fma_f64 v[6:7], v[8:9], v[6:7], v[6:7]
|
|
; VI-NEXT: v_mul_f64 v[8:9], v[2:3], v[6:7]
|
|
; VI-NEXT: v_fma_f64 v[10:11], -v[4:5], v[8:9], v[2:3]
|
|
; VI-NEXT: v_fma_f64 v[6:7], v[10:11], v[6:7], v[8:9]
|
|
; VI-NEXT: v_trunc_f64_e32 v[6:7], v[6:7]
|
|
; VI-NEXT: v_fma_f64 v[2:3], -v[6:7], v[4:5], v[2:3]
|
|
; VI-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_endpgm
|
|
double addrspace(1)* %in2) #0 {
|
|
%r0 = load double, double addrspace(1)* %in1, align 8
|
|
%r1 = load double, double addrspace(1)* %in2, align 8
|
|
%r2 = frem fast double %r0, %r1
|
|
store double %r2, double addrspace(1)* %out, align 8
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @unsafe_frem_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
|
|
; SI-LABEL: unsafe_frem_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: s_mov_b32 s10, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s8, s4
|
|
; SI-NEXT: s_mov_b32 s9, s5
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s7
|
|
; SI-NEXT: s_mov_b32 s6, s10
|
|
; SI-NEXT: s_mov_b32 s7, s11
|
|
; SI-NEXT: s_mov_b32 s2, s10
|
|
; SI-NEXT: s_mov_b32 s3, s11
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
|
|
; SI-NEXT: buffer_load_dwordx2 v[2:3], off, s[0:3], 0
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_rcp_f64_e32 v[4:5], v[2:3]
|
|
; SI-NEXT: v_fma_f64 v[6:7], -v[2:3], v[4:5], 1.0
|
|
; SI-NEXT: v_fma_f64 v[4:5], v[6:7], v[4:5], v[4:5]
|
|
; SI-NEXT: v_fma_f64 v[6:7], -v[2:3], v[4:5], 1.0
|
|
; SI-NEXT: v_fma_f64 v[4:5], v[6:7], v[4:5], v[4:5]
|
|
; SI-NEXT: v_mul_f64 v[6:7], v[0:1], v[4:5]
|
|
; SI-NEXT: v_fma_f64 v[8:9], -v[2:3], v[6:7], v[0:1]
|
|
; SI-NEXT: v_fma_f64 v[4:5], v[8:9], v[4:5], v[6:7]
|
|
; SI-NEXT: v_bfe_u32 v6, v5, 20, 11
|
|
; SI-NEXT: v_add_i32_e32 v8, vcc, 0xfffffc01, v6
|
|
; SI-NEXT: s_mov_b32 s1, 0xfffff
|
|
; SI-NEXT: s_mov_b32 s0, s10
|
|
; SI-NEXT: v_lshr_b64 v[6:7], s[0:1], v8
|
|
; SI-NEXT: v_not_b32_e32 v6, v6
|
|
; SI-NEXT: v_and_b32_e32 v6, v4, v6
|
|
; SI-NEXT: v_not_b32_e32 v7, v7
|
|
; SI-NEXT: v_and_b32_e32 v7, v5, v7
|
|
; SI-NEXT: v_and_b32_e32 v9, 0x80000000, v5
|
|
; SI-NEXT: v_cmp_gt_i32_e32 vcc, 0, v8
|
|
; SI-NEXT: v_cndmask_b32_e32 v7, v7, v9, vcc
|
|
; SI-NEXT: v_cmp_lt_i32_e64 s[0:1], 51, v8
|
|
; SI-NEXT: v_cndmask_b32_e64 v5, v7, v5, s[0:1]
|
|
; SI-NEXT: v_cndmask_b32_e64 v6, v6, 0, vcc
|
|
; SI-NEXT: v_cndmask_b32_e64 v4, v6, v4, s[0:1]
|
|
; SI-NEXT: v_fma_f64 v[0:1], -v[4:5], v[2:3], v[0:1]
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; CI-LABEL: unsafe_frem_f64:
|
|
; CI: ; %bb.0:
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
; CI-NEXT: s_mov_b32 s11, 0xf000
|
|
; CI-NEXT: s_mov_b32 s10, -1
|
|
; CI-NEXT: s_mov_b32 s2, s10
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: s_mov_b32 s8, s4
|
|
; CI-NEXT: s_mov_b32 s9, s5
|
|
; CI-NEXT: s_mov_b32 s4, s6
|
|
; CI-NEXT: s_mov_b32 s5, s7
|
|
; CI-NEXT: s_mov_b32 s6, s10
|
|
; CI-NEXT: s_mov_b32 s7, s11
|
|
; CI-NEXT: s_mov_b32 s3, s11
|
|
; CI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
|
|
; CI-NEXT: buffer_load_dwordx2 v[2:3], off, s[0:3], 0
|
|
; CI-NEXT: s_waitcnt vmcnt(0)
|
|
; CI-NEXT: v_rcp_f64_e32 v[4:5], v[2:3]
|
|
; CI-NEXT: v_fma_f64 v[6:7], -v[2:3], v[4:5], 1.0
|
|
; CI-NEXT: v_fma_f64 v[4:5], v[6:7], v[4:5], v[4:5]
|
|
; CI-NEXT: v_fma_f64 v[6:7], -v[2:3], v[4:5], 1.0
|
|
; CI-NEXT: v_fma_f64 v[4:5], v[6:7], v[4:5], v[4:5]
|
|
; CI-NEXT: v_mul_f64 v[6:7], v[0:1], v[4:5]
|
|
; CI-NEXT: v_fma_f64 v[8:9], -v[2:3], v[6:7], v[0:1]
|
|
; CI-NEXT: v_fma_f64 v[4:5], v[8:9], v[4:5], v[6:7]
|
|
; CI-NEXT: v_trunc_f64_e32 v[4:5], v[4:5]
|
|
; CI-NEXT: v_fma_f64 v[0:1], -v[4:5], v[2:3], v[0:1]
|
|
; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
|
|
; CI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: unsafe_frem_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
; VI-NEXT: v_mov_b32_e32 v3, s7
|
|
; VI-NEXT: v_mov_b32_e32 v4, s0
|
|
; VI-NEXT: v_mov_b32_e32 v5, s1
|
|
; VI-NEXT: flat_load_dwordx2 v[2:3], v[2:3]
|
|
; VI-NEXT: flat_load_dwordx2 v[4:5], v[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_rcp_f64_e32 v[6:7], v[4:5]
|
|
; VI-NEXT: v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
|
|
; VI-NEXT: v_fma_f64 v[6:7], v[8:9], v[6:7], v[6:7]
|
|
; VI-NEXT: v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
|
|
; VI-NEXT: v_fma_f64 v[6:7], v[8:9], v[6:7], v[6:7]
|
|
; VI-NEXT: v_mul_f64 v[8:9], v[2:3], v[6:7]
|
|
; VI-NEXT: v_fma_f64 v[10:11], -v[4:5], v[8:9], v[2:3]
|
|
; VI-NEXT: v_fma_f64 v[6:7], v[10:11], v[6:7], v[8:9]
|
|
; VI-NEXT: v_trunc_f64_e32 v[6:7], v[6:7]
|
|
; VI-NEXT: v_fma_f64 v[2:3], -v[6:7], v[4:5], v[2:3]
|
|
; VI-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_endpgm
|
|
double addrspace(1)* %in2) #1 {
|
|
%r0 = load double, double addrspace(1)* %in1, align 8
|
|
%r1 = load double, double addrspace(1)* %in2, align 8
|
|
%r2 = frem afn double %r0, %r1
|
|
store double %r2, double addrspace(1)* %out, align 8
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @frem_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %in1,
|
|
; SI-LABEL: frem_v2f16:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; SI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s0, s4
|
|
; SI-NEXT: s_mov_b32 s1, s5
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s7
|
|
; SI-NEXT: s_mov_b32 s6, s2
|
|
; SI-NEXT: s_mov_b32 s7, s3
|
|
; SI-NEXT: s_mov_b32 s10, s2
|
|
; SI-NEXT: s_mov_b32 s11, s3
|
|
; SI-NEXT: buffer_load_dword v0, off, s[4:7], 0
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v1, v0
|
|
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
|
|
; SI-NEXT: buffer_load_dword v2, off, s[8:11], 0 offset:16
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v3, v2
|
|
; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v2
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
|
|
; SI-NEXT: v_div_scale_f32 v4, vcc, v0, v2, v0
|
|
; SI-NEXT: v_div_scale_f32 v5, s[4:5], v2, v2, v0
|
|
; SI-NEXT: v_rcp_f32_e32 v6, v5
|
|
; SI-NEXT: s_mov_b32 s6, 3
|
|
; SI-NEXT: s_mov_b32 s7, 0
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
; SI-NEXT: v_fma_f32 v7, -v5, v6, 1.0
|
|
; SI-NEXT: v_fma_f32 v6, v7, v6, v6
|
|
; SI-NEXT: v_mul_f32_e32 v7, v4, v6
|
|
; SI-NEXT: v_fma_f32 v8, -v5, v7, v4
|
|
; SI-NEXT: v_fma_f32 v7, v8, v6, v7
|
|
; SI-NEXT: v_fma_f32 v4, -v5, v7, v4
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
; SI-NEXT: v_div_fmas_f32 v4, v4, v6, v7
|
|
; SI-NEXT: v_div_fixup_f32 v4, v4, v2, v0
|
|
; SI-NEXT: v_trunc_f32_e32 v4, v4
|
|
; SI-NEXT: v_fma_f32 v0, -v4, v2, v0
|
|
; SI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 2), 0
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
|
|
; SI-NEXT: v_div_scale_f32 v2, vcc, v1, v3, v1
|
|
; SI-NEXT: v_div_scale_f32 v4, s[4:5], v3, v3, v1
|
|
; SI-NEXT: v_rcp_f32_e32 v5, v4
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
; SI-NEXT: v_fma_f32 v6, -v4, v5, 1.0
|
|
; SI-NEXT: v_fma_f32 v5, v6, v5, v5
|
|
; SI-NEXT: v_mul_f32_e32 v6, v2, v5
|
|
; SI-NEXT: v_fma_f32 v7, -v4, v6, v2
|
|
; SI-NEXT: v_fma_f32 v6, v7, v5, v6
|
|
; SI-NEXT: v_fma_f32 v2, -v4, v6, v2
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
; SI-NEXT: v_div_fmas_f32 v2, v2, v5, v6
|
|
; SI-NEXT: v_div_fixup_f32 v2, v2, v3, v1
|
|
; SI-NEXT: v_trunc_f32_e32 v2, v2
|
|
; SI-NEXT: v_fma_f32 v1, -v2, v3, v1
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
|
|
; SI-NEXT: v_or_b32_e32 v0, v1, v0
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; CI-LABEL: frem_v2f16:
|
|
; CI: ; %bb.0:
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd
|
|
; CI-NEXT: s_mov_b32 s3, 0xf000
|
|
; CI-NEXT: s_mov_b32 s2, -1
|
|
; CI-NEXT: s_mov_b32 s10, s2
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: s_mov_b32 s0, s4
|
|
; CI-NEXT: s_mov_b32 s1, s5
|
|
; CI-NEXT: s_mov_b32 s4, s6
|
|
; CI-NEXT: s_mov_b32 s5, s7
|
|
; CI-NEXT: s_mov_b32 s11, s3
|
|
; CI-NEXT: s_mov_b32 s6, s2
|
|
; CI-NEXT: s_mov_b32 s7, s3
|
|
; CI-NEXT: buffer_load_dword v0, off, s[4:7], 0
|
|
; CI-NEXT: buffer_load_dword v2, off, s[8:11], 0 offset:16
|
|
; CI-NEXT: s_mov_b32 s6, 3
|
|
; CI-NEXT: s_mov_b32 s7, 0
|
|
; CI-NEXT: s_waitcnt vmcnt(1)
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v1, v0
|
|
; CI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
|
|
; CI-NEXT: s_waitcnt vmcnt(0)
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v3, v2
|
|
; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v2
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v2, v2
|
|
; CI-NEXT: v_div_scale_f32 v5, s[4:5], v2, v2, v0
|
|
; CI-NEXT: v_div_scale_f32 v4, vcc, v0, v2, v0
|
|
; CI-NEXT: v_rcp_f32_e32 v6, v5
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
; CI-NEXT: v_fma_f32 v7, -v5, v6, 1.0
|
|
; CI-NEXT: v_fma_f32 v6, v7, v6, v6
|
|
; CI-NEXT: v_mul_f32_e32 v7, v4, v6
|
|
; CI-NEXT: v_fma_f32 v8, -v5, v7, v4
|
|
; CI-NEXT: v_fma_f32 v7, v8, v6, v7
|
|
; CI-NEXT: v_fma_f32 v4, -v5, v7, v4
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
; CI-NEXT: v_div_fmas_f32 v4, v4, v6, v7
|
|
; CI-NEXT: v_div_fixup_f32 v4, v4, v2, v0
|
|
; CI-NEXT: v_trunc_f32_e32 v4, v4
|
|
; CI-NEXT: v_fma_f32 v0, -v4, v2, v0
|
|
; CI-NEXT: v_div_scale_f32 v4, s[4:5], v3, v3, v1
|
|
; CI-NEXT: v_div_scale_f32 v2, vcc, v1, v3, v1
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 2), 0
|
|
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; CI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
|
|
; CI-NEXT: v_rcp_f32_e32 v5, v4
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
; CI-NEXT: v_fma_f32 v6, -v4, v5, 1.0
|
|
; CI-NEXT: v_fma_f32 v5, v6, v5, v5
|
|
; CI-NEXT: v_mul_f32_e32 v6, v2, v5
|
|
; CI-NEXT: v_fma_f32 v7, -v4, v6, v2
|
|
; CI-NEXT: v_fma_f32 v6, v7, v5, v6
|
|
; CI-NEXT: v_fma_f32 v2, -v4, v6, v2
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
; CI-NEXT: v_div_fmas_f32 v2, v2, v5, v6
|
|
; CI-NEXT: v_div_fixup_f32 v2, v2, v3, v1
|
|
; CI-NEXT: v_trunc_f32_e32 v2, v2
|
|
; CI-NEXT: v_fma_f32 v1, -v2, v3, v1
|
|
; CI-NEXT: v_cvt_f16_f32_e32 v1, v1
|
|
; CI-NEXT: v_or_b32_e32 v0, v1, v0
|
|
; CI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; CI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: frem_v2f16:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
; VI-NEXT: s_add_u32 s0, s0, 16
|
|
; VI-NEXT: v_mov_b32_e32 v3, s7
|
|
; VI-NEXT: s_addc_u32 s1, s1, 0
|
|
; VI-NEXT: flat_load_dword v4, v[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v3, s1
|
|
; VI-NEXT: v_mov_b32_e32 v2, s0
|
|
; VI-NEXT: flat_load_dword v2, v[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: s_waitcnt vmcnt(1)
|
|
; VI-NEXT: v_lshrrev_b32_e32 v3, 16, v4
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v5, v3
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_lshrrev_b32_e32 v6, 16, v2
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v7, v6
|
|
; VI-NEXT: v_rcp_f32_e32 v7, v7
|
|
; VI-NEXT: v_mul_f32_e32 v5, v5, v7
|
|
; VI-NEXT: v_cvt_f16_f32_e32 v5, v5
|
|
; VI-NEXT: v_div_fixup_f16 v5, v5, v6, v3
|
|
; VI-NEXT: v_trunc_f16_e32 v5, v5
|
|
; VI-NEXT: v_fma_f16 v3, -v5, v6, v3
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v6, v2
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v5, v4
|
|
; VI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
|
|
; VI-NEXT: v_rcp_f32_e32 v6, v6
|
|
; VI-NEXT: v_mul_f32_e32 v5, v5, v6
|
|
; VI-NEXT: v_cvt_f16_f32_e32 v5, v5
|
|
; VI-NEXT: v_div_fixup_f16 v5, v5, v2, v4
|
|
; VI-NEXT: v_trunc_f16_e32 v5, v5
|
|
; VI-NEXT: v_fma_f16 v2, -v5, v2, v4
|
|
; VI-NEXT: v_or_b32_e32 v2, v2, v3
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
<2 x half> addrspace(1)* %in2) #0 {
|
|
%gep2 = getelementptr <2 x half>, <2 x half> addrspace(1)* %in2, i32 4
|
|
%r0 = load <2 x half>, <2 x half> addrspace(1)* %in1, align 8
|
|
%r1 = load <2 x half>, <2 x half> addrspace(1)* %gep2, align 8
|
|
%r2 = frem <2 x half> %r0, %r1
|
|
store <2 x half> %r2, <2 x half> addrspace(1)* %out, align 8
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @frem_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %in1,
|
|
; SI-LABEL: frem_v4f16:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; SI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s0, s4
|
|
; SI-NEXT: s_mov_b32 s1, s5
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s7
|
|
; SI-NEXT: s_mov_b32 s6, s2
|
|
; SI-NEXT: s_mov_b32 s7, s3
|
|
; SI-NEXT: s_mov_b32 s10, s2
|
|
; SI-NEXT: s_mov_b32 s11, s3
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v2, v0
|
|
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v3, v0
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v4, v1
|
|
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v1
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v5, v0
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0 offset:32
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v6, v0
|
|
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v7, v1
|
|
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
|
|
; SI-NEXT: v_div_scale_f32 v8, vcc, v5, v1, v5
|
|
; SI-NEXT: v_div_scale_f32 v9, s[4:5], v1, v1, v5
|
|
; SI-NEXT: v_rcp_f32_e32 v10, v9
|
|
; SI-NEXT: s_mov_b32 s6, 3
|
|
; SI-NEXT: s_mov_b32 s7, 0
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
; SI-NEXT: v_fma_f32 v11, -v9, v10, 1.0
|
|
; SI-NEXT: v_fma_f32 v10, v11, v10, v10
|
|
; SI-NEXT: v_mul_f32_e32 v11, v8, v10
|
|
; SI-NEXT: v_fma_f32 v12, -v9, v11, v8
|
|
; SI-NEXT: v_fma_f32 v11, v12, v10, v11
|
|
; SI-NEXT: v_fma_f32 v8, -v9, v11, v8
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
; SI-NEXT: v_div_fmas_f32 v8, v8, v10, v11
|
|
; SI-NEXT: v_div_fixup_f32 v8, v8, v1, v5
|
|
; SI-NEXT: v_trunc_f32_e32 v8, v8
|
|
; SI-NEXT: v_fma_f32 v1, -v8, v1, v5
|
|
; SI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 2), 0
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
|
|
; SI-NEXT: v_div_scale_f32 v5, vcc, v4, v7, v4
|
|
; SI-NEXT: v_div_scale_f32 v8, s[4:5], v7, v7, v4
|
|
; SI-NEXT: v_rcp_f32_e32 v9, v8
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
; SI-NEXT: v_fma_f32 v10, -v8, v9, 1.0
|
|
; SI-NEXT: v_fma_f32 v9, v10, v9, v9
|
|
; SI-NEXT: v_mul_f32_e32 v10, v5, v9
|
|
; SI-NEXT: v_fma_f32 v11, -v8, v10, v5
|
|
; SI-NEXT: v_fma_f32 v10, v11, v9, v10
|
|
; SI-NEXT: v_fma_f32 v5, -v8, v10, v5
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
; SI-NEXT: v_div_fmas_f32 v5, v5, v9, v10
|
|
; SI-NEXT: v_div_fixup_f32 v5, v5, v7, v4
|
|
; SI-NEXT: v_trunc_f32_e32 v5, v5
|
|
; SI-NEXT: v_fma_f32 v4, -v5, v7, v4
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
|
|
; SI-NEXT: v_or_b32_e32 v1, v4, v1
|
|
; SI-NEXT: v_div_scale_f32 v4, vcc, v3, v0, v3
|
|
; SI-NEXT: v_div_scale_f32 v5, s[4:5], v0, v0, v3
|
|
; SI-NEXT: v_rcp_f32_e32 v7, v5
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
; SI-NEXT: v_fma_f32 v8, -v5, v7, 1.0
|
|
; SI-NEXT: v_fma_f32 v7, v8, v7, v7
|
|
; SI-NEXT: v_mul_f32_e32 v8, v4, v7
|
|
; SI-NEXT: v_fma_f32 v9, -v5, v8, v4
|
|
; SI-NEXT: v_fma_f32 v8, v9, v7, v8
|
|
; SI-NEXT: v_fma_f32 v4, -v5, v8, v4
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
; SI-NEXT: v_div_fmas_f32 v4, v4, v7, v8
|
|
; SI-NEXT: v_div_fixup_f32 v4, v4, v0, v3
|
|
; SI-NEXT: v_trunc_f32_e32 v4, v4
|
|
; SI-NEXT: v_fma_f32 v0, -v4, v0, v3
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
|
|
; SI-NEXT: v_div_scale_f32 v3, vcc, v2, v6, v2
|
|
; SI-NEXT: v_div_scale_f32 v4, s[4:5], v6, v6, v2
|
|
; SI-NEXT: v_rcp_f32_e32 v5, v4
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
; SI-NEXT: v_fma_f32 v7, -v4, v5, 1.0
|
|
; SI-NEXT: v_fma_f32 v5, v7, v5, v5
|
|
; SI-NEXT: v_mul_f32_e32 v7, v3, v5
|
|
; SI-NEXT: v_fma_f32 v8, -v4, v7, v3
|
|
; SI-NEXT: v_fma_f32 v7, v8, v5, v7
|
|
; SI-NEXT: v_fma_f32 v3, -v4, v7, v3
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
; SI-NEXT: v_div_fmas_f32 v3, v3, v5, v7
|
|
; SI-NEXT: v_div_fixup_f32 v3, v3, v6, v2
|
|
; SI-NEXT: v_trunc_f32_e32 v3, v3
|
|
; SI-NEXT: v_fma_f32 v2, -v3, v6, v2
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
|
|
; SI-NEXT: v_or_b32_e32 v0, v2, v0
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; CI-LABEL: frem_v4f16:
|
|
; CI: ; %bb.0:
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd
|
|
; CI-NEXT: s_mov_b32 s3, 0xf000
|
|
; CI-NEXT: s_mov_b32 s2, -1
|
|
; CI-NEXT: s_mov_b32 s10, s2
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: s_mov_b32 s0, s4
|
|
; CI-NEXT: s_mov_b32 s1, s5
|
|
; CI-NEXT: s_mov_b32 s4, s6
|
|
; CI-NEXT: s_mov_b32 s5, s7
|
|
; CI-NEXT: s_mov_b32 s6, s2
|
|
; CI-NEXT: s_mov_b32 s7, s3
|
|
; CI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
|
|
; CI-NEXT: s_mov_b32 s11, s3
|
|
; CI-NEXT: s_mov_b32 s6, 3
|
|
; CI-NEXT: s_mov_b32 s7, 0
|
|
; CI-NEXT: s_waitcnt vmcnt(0)
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v2, v0
|
|
; CI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v3, v0
|
|
; CI-NEXT: v_lshrrev_b32_e32 v0, 16, v1
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v4, v1
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v5, v0
|
|
; CI-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0 offset:32
|
|
; CI-NEXT: s_waitcnt vmcnt(0)
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v7, v1
|
|
; CI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v1, v1
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v6, v0
|
|
; CI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v0, v0
|
|
; CI-NEXT: v_div_scale_f32 v9, s[4:5], v1, v1, v5
|
|
; CI-NEXT: v_div_scale_f32 v8, vcc, v5, v1, v5
|
|
; CI-NEXT: v_rcp_f32_e32 v10, v9
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
; CI-NEXT: v_fma_f32 v11, -v9, v10, 1.0
|
|
; CI-NEXT: v_fma_f32 v10, v11, v10, v10
|
|
; CI-NEXT: v_mul_f32_e32 v11, v8, v10
|
|
; CI-NEXT: v_fma_f32 v12, -v9, v11, v8
|
|
; CI-NEXT: v_fma_f32 v11, v12, v10, v11
|
|
; CI-NEXT: v_fma_f32 v8, -v9, v11, v8
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
; CI-NEXT: v_div_fmas_f32 v8, v8, v10, v11
|
|
; CI-NEXT: v_div_fixup_f32 v8, v8, v1, v5
|
|
; CI-NEXT: v_trunc_f32_e32 v8, v8
|
|
; CI-NEXT: v_fma_f32 v1, -v8, v1, v5
|
|
; CI-NEXT: v_div_scale_f32 v8, s[4:5], v7, v7, v4
|
|
; CI-NEXT: v_div_scale_f32 v5, vcc, v4, v7, v4
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 2), 0
|
|
; CI-NEXT: v_cvt_f16_f32_e32 v1, v1
|
|
; CI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
|
|
; CI-NEXT: v_rcp_f32_e32 v9, v8
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
; CI-NEXT: v_fma_f32 v10, -v8, v9, 1.0
|
|
; CI-NEXT: v_fma_f32 v9, v10, v9, v9
|
|
; CI-NEXT: v_mul_f32_e32 v10, v5, v9
|
|
; CI-NEXT: v_fma_f32 v11, -v8, v10, v5
|
|
; CI-NEXT: v_fma_f32 v10, v11, v9, v10
|
|
; CI-NEXT: v_fma_f32 v5, -v8, v10, v5
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
; CI-NEXT: v_div_fmas_f32 v5, v5, v9, v10
|
|
; CI-NEXT: v_div_fixup_f32 v5, v5, v7, v4
|
|
; CI-NEXT: v_trunc_f32_e32 v5, v5
|
|
; CI-NEXT: v_fma_f32 v4, -v5, v7, v4
|
|
; CI-NEXT: v_div_scale_f32 v5, s[4:5], v0, v0, v3
|
|
; CI-NEXT: v_cvt_f16_f32_e32 v4, v4
|
|
; CI-NEXT: v_or_b32_e32 v1, v4, v1
|
|
; CI-NEXT: v_div_scale_f32 v4, vcc, v3, v0, v3
|
|
; CI-NEXT: v_rcp_f32_e32 v7, v5
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
; CI-NEXT: v_fma_f32 v8, -v5, v7, 1.0
|
|
; CI-NEXT: v_fma_f32 v7, v8, v7, v7
|
|
; CI-NEXT: v_mul_f32_e32 v8, v4, v7
|
|
; CI-NEXT: v_fma_f32 v9, -v5, v8, v4
|
|
; CI-NEXT: v_fma_f32 v8, v9, v7, v8
|
|
; CI-NEXT: v_fma_f32 v4, -v5, v8, v4
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
; CI-NEXT: v_div_fmas_f32 v4, v4, v7, v8
|
|
; CI-NEXT: v_div_fixup_f32 v4, v4, v0, v3
|
|
; CI-NEXT: v_trunc_f32_e32 v4, v4
|
|
; CI-NEXT: v_fma_f32 v0, -v4, v0, v3
|
|
; CI-NEXT: v_div_scale_f32 v4, s[4:5], v6, v6, v2
|
|
; CI-NEXT: v_div_scale_f32 v3, vcc, v2, v6, v2
|
|
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; CI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
|
|
; CI-NEXT: v_rcp_f32_e32 v5, v4
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
; CI-NEXT: v_fma_f32 v7, -v4, v5, 1.0
|
|
; CI-NEXT: v_fma_f32 v5, v7, v5, v5
|
|
; CI-NEXT: v_mul_f32_e32 v7, v3, v5
|
|
; CI-NEXT: v_fma_f32 v8, -v4, v7, v3
|
|
; CI-NEXT: v_fma_f32 v7, v8, v5, v7
|
|
; CI-NEXT: v_fma_f32 v3, -v4, v7, v3
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
; CI-NEXT: v_div_fmas_f32 v3, v3, v5, v7
|
|
; CI-NEXT: v_div_fixup_f32 v3, v3, v6, v2
|
|
; CI-NEXT: v_trunc_f32_e32 v3, v3
|
|
; CI-NEXT: v_fma_f32 v2, -v3, v6, v2
|
|
; CI-NEXT: v_cvt_f16_f32_e32 v2, v2
|
|
; CI-NEXT: v_or_b32_e32 v0, v2, v0
|
|
; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; CI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: frem_v4f16:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
; VI-NEXT: s_add_u32 s0, s0, 32
|
|
; VI-NEXT: s_addc_u32 s1, s1, 0
|
|
; VI-NEXT: v_mov_b32_e32 v5, s1
|
|
; VI-NEXT: v_mov_b32_e32 v4, s0
|
|
; VI-NEXT: flat_load_dwordx2 v[4:5], v[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v3, s7
|
|
; VI-NEXT: flat_load_dwordx2 v[2:3], v[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: s_waitcnt vmcnt(1)
|
|
; VI-NEXT: v_lshrrev_b32_e32 v8, 16, v5
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v9, v8
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_lshrrev_b32_e32 v6, 16, v3
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v7, v6
|
|
; VI-NEXT: v_rcp_f32_e32 v9, v9
|
|
; VI-NEXT: v_mul_f32_e32 v7, v7, v9
|
|
; VI-NEXT: v_cvt_f16_f32_e32 v7, v7
|
|
; VI-NEXT: v_div_fixup_f16 v7, v7, v8, v6
|
|
; VI-NEXT: v_trunc_f16_e32 v7, v7
|
|
; VI-NEXT: v_fma_f16 v6, -v7, v8, v6
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v8, v5
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v7, v3
|
|
; VI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
|
|
; VI-NEXT: v_rcp_f32_e32 v8, v8
|
|
; VI-NEXT: v_mul_f32_e32 v7, v7, v8
|
|
; VI-NEXT: v_cvt_f16_f32_e32 v7, v7
|
|
; VI-NEXT: v_div_fixup_f16 v7, v7, v5, v3
|
|
; VI-NEXT: v_trunc_f16_e32 v7, v7
|
|
; VI-NEXT: v_fma_f16 v3, -v7, v5, v3
|
|
; VI-NEXT: v_lshrrev_b32_e32 v7, 16, v4
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v8, v7
|
|
; VI-NEXT: v_lshrrev_b32_e32 v5, 16, v2
|
|
; VI-NEXT: v_or_b32_e32 v3, v3, v6
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v6, v5
|
|
; VI-NEXT: v_rcp_f32_e32 v8, v8
|
|
; VI-NEXT: v_mul_f32_e32 v6, v6, v8
|
|
; VI-NEXT: v_cvt_f16_f32_e32 v6, v6
|
|
; VI-NEXT: v_div_fixup_f16 v6, v6, v7, v5
|
|
; VI-NEXT: v_trunc_f16_e32 v6, v6
|
|
; VI-NEXT: v_fma_f16 v5, -v6, v7, v5
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v7, v4
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v6, v2
|
|
; VI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
|
|
; VI-NEXT: v_rcp_f32_e32 v7, v7
|
|
; VI-NEXT: v_mul_f32_e32 v6, v6, v7
|
|
; VI-NEXT: v_cvt_f16_f32_e32 v6, v6
|
|
; VI-NEXT: v_div_fixup_f16 v6, v6, v4, v2
|
|
; VI-NEXT: v_trunc_f16_e32 v6, v6
|
|
; VI-NEXT: v_fma_f16 v2, -v6, v4, v2
|
|
; VI-NEXT: v_or_b32_e32 v2, v2, v5
|
|
; VI-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_endpgm
|
|
<4 x half> addrspace(1)* %in2) #0 {
|
|
%gep2 = getelementptr <4 x half>, <4 x half> addrspace(1)* %in2, i32 4
|
|
%r0 = load <4 x half>, <4 x half> addrspace(1)* %in1, align 16
|
|
%r1 = load <4 x half>, <4 x half> addrspace(1)* %gep2, align 16
|
|
%r2 = frem <4 x half> %r0, %r1
|
|
store <4 x half> %r2, <4 x half> addrspace(1)* %out, align 16
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @frem_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %in1,
|
|
; SI-LABEL: frem_v2f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; SI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s0, s4
|
|
; SI-NEXT: s_mov_b32 s1, s5
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s7
|
|
; SI-NEXT: s_mov_b32 s6, s2
|
|
; SI-NEXT: s_mov_b32 s7, s3
|
|
; SI-NEXT: s_mov_b32 s10, s2
|
|
; SI-NEXT: s_mov_b32 s11, s3
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
|
|
; SI-NEXT: buffer_load_dwordx2 v[2:3], off, s[8:11], 0 offset:32
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_div_scale_f32 v4, vcc, v1, v3, v1
|
|
; SI-NEXT: v_div_scale_f32 v5, s[4:5], v3, v3, v1
|
|
; SI-NEXT: v_rcp_f32_e32 v6, v5
|
|
; SI-NEXT: s_mov_b32 s6, 3
|
|
; SI-NEXT: s_mov_b32 s7, 0
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
; SI-NEXT: v_fma_f32 v7, -v5, v6, 1.0
|
|
; SI-NEXT: v_fma_f32 v6, v7, v6, v6
|
|
; SI-NEXT: v_mul_f32_e32 v7, v4, v6
|
|
; SI-NEXT: v_fma_f32 v8, -v5, v7, v4
|
|
; SI-NEXT: v_fma_f32 v7, v8, v6, v7
|
|
; SI-NEXT: v_fma_f32 v4, -v5, v7, v4
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
; SI-NEXT: v_div_fmas_f32 v4, v4, v6, v7
|
|
; SI-NEXT: v_div_fixup_f32 v4, v4, v3, v1
|
|
; SI-NEXT: v_trunc_f32_e32 v4, v4
|
|
; SI-NEXT: v_fma_f32 v1, -v4, v3, v1
|
|
; SI-NEXT: v_div_scale_f32 v3, vcc, v0, v2, v0
|
|
; SI-NEXT: v_div_scale_f32 v4, s[4:5], v2, v2, v0
|
|
; SI-NEXT: v_rcp_f32_e32 v5, v4
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
; SI-NEXT: v_fma_f32 v6, -v4, v5, 1.0
|
|
; SI-NEXT: v_fma_f32 v5, v6, v5, v5
|
|
; SI-NEXT: v_mul_f32_e32 v6, v3, v5
|
|
; SI-NEXT: v_fma_f32 v7, -v4, v6, v3
|
|
; SI-NEXT: v_fma_f32 v6, v7, v5, v6
|
|
; SI-NEXT: v_fma_f32 v3, -v4, v6, v3
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
; SI-NEXT: v_div_fmas_f32 v3, v3, v5, v6
|
|
; SI-NEXT: v_div_fixup_f32 v3, v3, v2, v0
|
|
; SI-NEXT: v_trunc_f32_e32 v3, v3
|
|
; SI-NEXT: v_fma_f32 v0, -v3, v2, v0
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; CI-LABEL: frem_v2f32:
|
|
; CI: ; %bb.0:
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd
|
|
; CI-NEXT: s_mov_b32 s3, 0xf000
|
|
; CI-NEXT: s_mov_b32 s2, -1
|
|
; CI-NEXT: s_mov_b32 s10, s2
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: s_mov_b32 s0, s4
|
|
; CI-NEXT: s_mov_b32 s1, s5
|
|
; CI-NEXT: s_mov_b32 s4, s6
|
|
; CI-NEXT: s_mov_b32 s5, s7
|
|
; CI-NEXT: s_mov_b32 s6, s2
|
|
; CI-NEXT: s_mov_b32 s7, s3
|
|
; CI-NEXT: s_mov_b32 s11, s3
|
|
; CI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
|
|
; CI-NEXT: buffer_load_dwordx2 v[2:3], off, s[8:11], 0 offset:32
|
|
; CI-NEXT: s_mov_b32 s6, 3
|
|
; CI-NEXT: s_mov_b32 s7, 0
|
|
; CI-NEXT: s_waitcnt vmcnt(0)
|
|
; CI-NEXT: v_div_scale_f32 v5, s[4:5], v3, v3, v1
|
|
; CI-NEXT: v_div_scale_f32 v4, vcc, v1, v3, v1
|
|
; CI-NEXT: v_rcp_f32_e32 v6, v5
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
; CI-NEXT: v_fma_f32 v7, -v5, v6, 1.0
|
|
; CI-NEXT: v_fma_f32 v6, v7, v6, v6
|
|
; CI-NEXT: v_mul_f32_e32 v7, v4, v6
|
|
; CI-NEXT: v_fma_f32 v8, -v5, v7, v4
|
|
; CI-NEXT: v_fma_f32 v7, v8, v6, v7
|
|
; CI-NEXT: v_fma_f32 v4, -v5, v7, v4
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
; CI-NEXT: v_div_fmas_f32 v4, v4, v6, v7
|
|
; CI-NEXT: v_div_fixup_f32 v4, v4, v3, v1
|
|
; CI-NEXT: v_trunc_f32_e32 v4, v4
|
|
; CI-NEXT: v_fma_f32 v1, -v4, v3, v1
|
|
; CI-NEXT: v_div_scale_f32 v4, s[4:5], v2, v2, v0
|
|
; CI-NEXT: v_div_scale_f32 v3, vcc, v0, v2, v0
|
|
; CI-NEXT: v_rcp_f32_e32 v5, v4
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
; CI-NEXT: v_fma_f32 v6, -v4, v5, 1.0
|
|
; CI-NEXT: v_fma_f32 v5, v6, v5, v5
|
|
; CI-NEXT: v_mul_f32_e32 v6, v3, v5
|
|
; CI-NEXT: v_fma_f32 v7, -v4, v6, v3
|
|
; CI-NEXT: v_fma_f32 v6, v7, v5, v6
|
|
; CI-NEXT: v_fma_f32 v3, -v4, v6, v3
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
; CI-NEXT: v_div_fmas_f32 v3, v3, v5, v6
|
|
; CI-NEXT: v_div_fixup_f32 v3, v3, v2, v0
|
|
; CI-NEXT: v_trunc_f32_e32 v3, v3
|
|
; CI-NEXT: v_fma_f32 v0, -v3, v2, v0
|
|
; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; CI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: frem_v2f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
; VI-NEXT: s_mov_b32 s2, 3
|
|
; VI-NEXT: s_mov_b32 s3, 0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
; VI-NEXT: s_add_u32 s0, s0, 32
|
|
; VI-NEXT: s_addc_u32 s1, s1, 0
|
|
; VI-NEXT: v_mov_b32_e32 v5, s1
|
|
; VI-NEXT: v_mov_b32_e32 v3, s7
|
|
; VI-NEXT: v_mov_b32_e32 v4, s0
|
|
; VI-NEXT: flat_load_dwordx2 v[2:3], v[2:3]
|
|
; VI-NEXT: flat_load_dwordx2 v[4:5], v[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_div_scale_f32 v7, s[0:1], v5, v5, v3
|
|
; VI-NEXT: v_div_scale_f32 v6, vcc, v3, v5, v3
|
|
; VI-NEXT: v_rcp_f32_e32 v8, v7
|
|
; VI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s2
|
|
; VI-NEXT: v_fma_f32 v9, -v7, v8, 1.0
|
|
; VI-NEXT: v_fma_f32 v8, v9, v8, v8
|
|
; VI-NEXT: v_mul_f32_e32 v9, v6, v8
|
|
; VI-NEXT: v_fma_f32 v10, -v7, v9, v6
|
|
; VI-NEXT: v_fma_f32 v9, v10, v8, v9
|
|
; VI-NEXT: v_fma_f32 v6, -v7, v9, v6
|
|
; VI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s3
|
|
; VI-NEXT: v_div_fmas_f32 v6, v6, v8, v9
|
|
; VI-NEXT: v_div_fixup_f32 v6, v6, v5, v3
|
|
; VI-NEXT: v_trunc_f32_e32 v6, v6
|
|
; VI-NEXT: v_fma_f32 v3, -v6, v5, v3
|
|
; VI-NEXT: v_div_scale_f32 v6, s[0:1], v4, v4, v2
|
|
; VI-NEXT: v_div_scale_f32 v5, vcc, v2, v4, v2
|
|
; VI-NEXT: v_rcp_f32_e32 v7, v6
|
|
; VI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s2
|
|
; VI-NEXT: v_fma_f32 v8, -v6, v7, 1.0
|
|
; VI-NEXT: v_fma_f32 v7, v8, v7, v7
|
|
; VI-NEXT: v_mul_f32_e32 v8, v5, v7
|
|
; VI-NEXT: v_fma_f32 v9, -v6, v8, v5
|
|
; VI-NEXT: v_fma_f32 v8, v9, v7, v8
|
|
; VI-NEXT: v_fma_f32 v5, -v6, v8, v5
|
|
; VI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s3
|
|
; VI-NEXT: v_div_fmas_f32 v5, v5, v7, v8
|
|
; VI-NEXT: v_div_fixup_f32 v5, v5, v4, v2
|
|
; VI-NEXT: v_trunc_f32_e32 v5, v5
|
|
; VI-NEXT: v_fma_f32 v2, -v5, v4, v2
|
|
; VI-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_endpgm
|
|
<2 x float> addrspace(1)* %in2) #0 {
|
|
%gep2 = getelementptr <2 x float>, <2 x float> addrspace(1)* %in2, i32 4
|
|
%r0 = load <2 x float>, <2 x float> addrspace(1)* %in1, align 8
|
|
%r1 = load <2 x float>, <2 x float> addrspace(1)* %gep2, align 8
|
|
%r2 = frem <2 x float> %r0, %r1
|
|
store <2 x float> %r2, <2 x float> addrspace(1)* %out, align 8
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @frem_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in1,
|
|
; SI-LABEL: frem_v4f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; SI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s0, s4
|
|
; SI-NEXT: s_mov_b32 s1, s5
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s7
|
|
; SI-NEXT: s_mov_b32 s6, s2
|
|
; SI-NEXT: s_mov_b32 s7, s3
|
|
; SI-NEXT: s_mov_b32 s10, s2
|
|
; SI-NEXT: s_mov_b32 s11, s3
|
|
; SI-NEXT: buffer_load_dwordx4 v[0:3], off, s[4:7], 0
|
|
; SI-NEXT: buffer_load_dwordx4 v[4:7], off, s[8:11], 0 offset:64
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_div_scale_f32 v8, vcc, v3, v7, v3
|
|
; SI-NEXT: v_div_scale_f32 v9, s[4:5], v7, v7, v3
|
|
; SI-NEXT: v_rcp_f32_e32 v10, v9
|
|
; SI-NEXT: s_mov_b32 s6, 3
|
|
; SI-NEXT: s_mov_b32 s7, 0
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
; SI-NEXT: v_fma_f32 v11, -v9, v10, 1.0
|
|
; SI-NEXT: v_fma_f32 v10, v11, v10, v10
|
|
; SI-NEXT: v_mul_f32_e32 v11, v8, v10
|
|
; SI-NEXT: v_fma_f32 v12, -v9, v11, v8
|
|
; SI-NEXT: v_fma_f32 v11, v12, v10, v11
|
|
; SI-NEXT: v_fma_f32 v8, -v9, v11, v8
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
; SI-NEXT: v_div_fmas_f32 v8, v8, v10, v11
|
|
; SI-NEXT: v_div_fixup_f32 v8, v8, v7, v3
|
|
; SI-NEXT: v_trunc_f32_e32 v8, v8
|
|
; SI-NEXT: v_fma_f32 v3, -v8, v7, v3
|
|
; SI-NEXT: v_div_scale_f32 v7, vcc, v2, v6, v2
|
|
; SI-NEXT: v_div_scale_f32 v8, s[4:5], v6, v6, v2
|
|
; SI-NEXT: v_rcp_f32_e32 v9, v8
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
; SI-NEXT: v_fma_f32 v10, -v8, v9, 1.0
|
|
; SI-NEXT: v_fma_f32 v9, v10, v9, v9
|
|
; SI-NEXT: v_mul_f32_e32 v10, v7, v9
|
|
; SI-NEXT: v_fma_f32 v11, -v8, v10, v7
|
|
; SI-NEXT: v_fma_f32 v10, v11, v9, v10
|
|
; SI-NEXT: v_fma_f32 v7, -v8, v10, v7
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
; SI-NEXT: v_div_fmas_f32 v7, v7, v9, v10
|
|
; SI-NEXT: v_div_fixup_f32 v7, v7, v6, v2
|
|
; SI-NEXT: v_trunc_f32_e32 v7, v7
|
|
; SI-NEXT: v_fma_f32 v2, -v7, v6, v2
|
|
; SI-NEXT: v_div_scale_f32 v6, vcc, v1, v5, v1
|
|
; SI-NEXT: v_div_scale_f32 v7, s[4:5], v5, v5, v1
|
|
; SI-NEXT: v_rcp_f32_e32 v8, v7
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
; SI-NEXT: v_fma_f32 v9, -v7, v8, 1.0
|
|
; SI-NEXT: v_fma_f32 v8, v9, v8, v8
|
|
; SI-NEXT: v_mul_f32_e32 v9, v6, v8
|
|
; SI-NEXT: v_fma_f32 v10, -v7, v9, v6
|
|
; SI-NEXT: v_fma_f32 v9, v10, v8, v9
|
|
; SI-NEXT: v_fma_f32 v6, -v7, v9, v6
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
; SI-NEXT: v_div_fmas_f32 v6, v6, v8, v9
|
|
; SI-NEXT: v_div_fixup_f32 v6, v6, v5, v1
|
|
; SI-NEXT: v_trunc_f32_e32 v6, v6
|
|
; SI-NEXT: v_fma_f32 v1, -v6, v5, v1
|
|
; SI-NEXT: v_div_scale_f32 v5, vcc, v0, v4, v0
|
|
; SI-NEXT: v_div_scale_f32 v6, s[4:5], v4, v4, v0
|
|
; SI-NEXT: v_rcp_f32_e32 v7, v6
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
; SI-NEXT: v_fma_f32 v8, -v6, v7, 1.0
|
|
; SI-NEXT: v_fma_f32 v7, v8, v7, v7
|
|
; SI-NEXT: v_mul_f32_e32 v8, v5, v7
|
|
; SI-NEXT: v_fma_f32 v9, -v6, v8, v5
|
|
; SI-NEXT: v_fma_f32 v8, v9, v7, v8
|
|
; SI-NEXT: v_fma_f32 v5, -v6, v8, v5
|
|
; SI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
; SI-NEXT: v_div_fmas_f32 v5, v5, v7, v8
|
|
; SI-NEXT: v_div_fixup_f32 v5, v5, v4, v0
|
|
; SI-NEXT: v_trunc_f32_e32 v5, v5
|
|
; SI-NEXT: v_fma_f32 v0, -v5, v4, v0
|
|
; SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; CI-LABEL: frem_v4f32:
|
|
; CI: ; %bb.0:
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd
|
|
; CI-NEXT: s_mov_b32 s3, 0xf000
|
|
; CI-NEXT: s_mov_b32 s2, -1
|
|
; CI-NEXT: s_mov_b32 s10, s2
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: s_mov_b32 s0, s4
|
|
; CI-NEXT: s_mov_b32 s1, s5
|
|
; CI-NEXT: s_mov_b32 s4, s6
|
|
; CI-NEXT: s_mov_b32 s5, s7
|
|
; CI-NEXT: s_mov_b32 s6, s2
|
|
; CI-NEXT: s_mov_b32 s7, s3
|
|
; CI-NEXT: s_mov_b32 s11, s3
|
|
; CI-NEXT: buffer_load_dwordx4 v[0:3], off, s[4:7], 0
|
|
; CI-NEXT: buffer_load_dwordx4 v[4:7], off, s[8:11], 0 offset:64
|
|
; CI-NEXT: s_mov_b32 s6, 3
|
|
; CI-NEXT: s_mov_b32 s7, 0
|
|
; CI-NEXT: s_waitcnt vmcnt(0)
|
|
; CI-NEXT: v_div_scale_f32 v9, s[4:5], v7, v7, v3
|
|
; CI-NEXT: v_div_scale_f32 v8, vcc, v3, v7, v3
|
|
; CI-NEXT: v_rcp_f32_e32 v10, v9
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
; CI-NEXT: v_fma_f32 v11, -v9, v10, 1.0
|
|
; CI-NEXT: v_fma_f32 v10, v11, v10, v10
|
|
; CI-NEXT: v_mul_f32_e32 v11, v8, v10
|
|
; CI-NEXT: v_fma_f32 v12, -v9, v11, v8
|
|
; CI-NEXT: v_fma_f32 v11, v12, v10, v11
|
|
; CI-NEXT: v_fma_f32 v8, -v9, v11, v8
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
; CI-NEXT: v_div_fmas_f32 v8, v8, v10, v11
|
|
; CI-NEXT: v_div_fixup_f32 v8, v8, v7, v3
|
|
; CI-NEXT: v_trunc_f32_e32 v8, v8
|
|
; CI-NEXT: v_fma_f32 v3, -v8, v7, v3
|
|
; CI-NEXT: v_div_scale_f32 v8, s[4:5], v6, v6, v2
|
|
; CI-NEXT: v_div_scale_f32 v7, vcc, v2, v6, v2
|
|
; CI-NEXT: v_rcp_f32_e32 v9, v8
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
; CI-NEXT: v_fma_f32 v10, -v8, v9, 1.0
|
|
; CI-NEXT: v_fma_f32 v9, v10, v9, v9
|
|
; CI-NEXT: v_mul_f32_e32 v10, v7, v9
|
|
; CI-NEXT: v_fma_f32 v11, -v8, v10, v7
|
|
; CI-NEXT: v_fma_f32 v10, v11, v9, v10
|
|
; CI-NEXT: v_fma_f32 v7, -v8, v10, v7
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
; CI-NEXT: v_div_fmas_f32 v7, v7, v9, v10
|
|
; CI-NEXT: v_div_fixup_f32 v7, v7, v6, v2
|
|
; CI-NEXT: v_trunc_f32_e32 v7, v7
|
|
; CI-NEXT: v_fma_f32 v2, -v7, v6, v2
|
|
; CI-NEXT: v_div_scale_f32 v7, s[4:5], v5, v5, v1
|
|
; CI-NEXT: v_div_scale_f32 v6, vcc, v1, v5, v1
|
|
; CI-NEXT: v_rcp_f32_e32 v8, v7
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
; CI-NEXT: v_fma_f32 v9, -v7, v8, 1.0
|
|
; CI-NEXT: v_fma_f32 v8, v9, v8, v8
|
|
; CI-NEXT: v_mul_f32_e32 v9, v6, v8
|
|
; CI-NEXT: v_fma_f32 v10, -v7, v9, v6
|
|
; CI-NEXT: v_fma_f32 v9, v10, v8, v9
|
|
; CI-NEXT: v_fma_f32 v6, -v7, v9, v6
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
; CI-NEXT: v_div_fmas_f32 v6, v6, v8, v9
|
|
; CI-NEXT: v_div_fixup_f32 v6, v6, v5, v1
|
|
; CI-NEXT: v_trunc_f32_e32 v6, v6
|
|
; CI-NEXT: v_fma_f32 v1, -v6, v5, v1
|
|
; CI-NEXT: v_div_scale_f32 v6, s[4:5], v4, v4, v0
|
|
; CI-NEXT: v_div_scale_f32 v5, vcc, v0, v4, v0
|
|
; CI-NEXT: v_rcp_f32_e32 v7, v6
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s6
|
|
; CI-NEXT: v_fma_f32 v8, -v6, v7, 1.0
|
|
; CI-NEXT: v_fma_f32 v7, v8, v7, v7
|
|
; CI-NEXT: v_mul_f32_e32 v8, v5, v7
|
|
; CI-NEXT: v_fma_f32 v9, -v6, v8, v5
|
|
; CI-NEXT: v_fma_f32 v8, v9, v7, v8
|
|
; CI-NEXT: v_fma_f32 v5, -v6, v8, v5
|
|
; CI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s7
|
|
; CI-NEXT: v_div_fmas_f32 v5, v5, v7, v8
|
|
; CI-NEXT: v_div_fixup_f32 v5, v5, v4, v0
|
|
; CI-NEXT: v_trunc_f32_e32 v5, v5
|
|
; CI-NEXT: v_fma_f32 v0, -v5, v4, v0
|
|
; CI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
|
|
; CI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: frem_v4f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
; VI-NEXT: s_mov_b32 s2, 3
|
|
; VI-NEXT: s_mov_b32 s3, 0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: s_add_u32 s0, s0, 64
|
|
; VI-NEXT: s_addc_u32 s1, s1, 0
|
|
; VI-NEXT: v_mov_b32_e32 v5, s1
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_mov_b32_e32 v4, s0
|
|
; VI-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
|
|
; VI-NEXT: flat_load_dwordx4 v[4:7], v[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v8, s4
|
|
; VI-NEXT: v_mov_b32_e32 v9, s5
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_div_scale_f32 v11, s[0:1], v7, v7, v3
|
|
; VI-NEXT: v_div_scale_f32 v10, vcc, v3, v7, v3
|
|
; VI-NEXT: v_rcp_f32_e32 v12, v11
|
|
; VI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s2
|
|
; VI-NEXT: v_fma_f32 v13, -v11, v12, 1.0
|
|
; VI-NEXT: v_fma_f32 v12, v13, v12, v12
|
|
; VI-NEXT: v_mul_f32_e32 v13, v10, v12
|
|
; VI-NEXT: v_fma_f32 v14, -v11, v13, v10
|
|
; VI-NEXT: v_fma_f32 v13, v14, v12, v13
|
|
; VI-NEXT: v_fma_f32 v10, -v11, v13, v10
|
|
; VI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s3
|
|
; VI-NEXT: v_div_fmas_f32 v10, v10, v12, v13
|
|
; VI-NEXT: v_div_fixup_f32 v10, v10, v7, v3
|
|
; VI-NEXT: v_trunc_f32_e32 v10, v10
|
|
; VI-NEXT: v_fma_f32 v3, -v10, v7, v3
|
|
; VI-NEXT: v_div_scale_f32 v10, s[0:1], v6, v6, v2
|
|
; VI-NEXT: v_div_scale_f32 v7, vcc, v2, v6, v2
|
|
; VI-NEXT: v_rcp_f32_e32 v11, v10
|
|
; VI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s2
|
|
; VI-NEXT: v_fma_f32 v12, -v10, v11, 1.0
|
|
; VI-NEXT: v_fma_f32 v11, v12, v11, v11
|
|
; VI-NEXT: v_mul_f32_e32 v12, v7, v11
|
|
; VI-NEXT: v_fma_f32 v13, -v10, v12, v7
|
|
; VI-NEXT: v_fma_f32 v12, v13, v11, v12
|
|
; VI-NEXT: v_fma_f32 v7, -v10, v12, v7
|
|
; VI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s3
|
|
; VI-NEXT: v_div_fmas_f32 v7, v7, v11, v12
|
|
; VI-NEXT: v_div_fixup_f32 v7, v7, v6, v2
|
|
; VI-NEXT: v_trunc_f32_e32 v7, v7
|
|
; VI-NEXT: v_fma_f32 v2, -v7, v6, v2
|
|
; VI-NEXT: v_div_scale_f32 v7, s[0:1], v5, v5, v1
|
|
; VI-NEXT: v_div_scale_f32 v6, vcc, v1, v5, v1
|
|
; VI-NEXT: v_rcp_f32_e32 v10, v7
|
|
; VI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s2
|
|
; VI-NEXT: v_fma_f32 v11, -v7, v10, 1.0
|
|
; VI-NEXT: v_fma_f32 v10, v11, v10, v10
|
|
; VI-NEXT: v_mul_f32_e32 v11, v6, v10
|
|
; VI-NEXT: v_fma_f32 v12, -v7, v11, v6
|
|
; VI-NEXT: v_fma_f32 v11, v12, v10, v11
|
|
; VI-NEXT: v_fma_f32 v6, -v7, v11, v6
|
|
; VI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s3
|
|
; VI-NEXT: v_div_fmas_f32 v6, v6, v10, v11
|
|
; VI-NEXT: v_div_fixup_f32 v6, v6, v5, v1
|
|
; VI-NEXT: v_trunc_f32_e32 v6, v6
|
|
; VI-NEXT: v_fma_f32 v1, -v6, v5, v1
|
|
; VI-NEXT: v_div_scale_f32 v6, s[0:1], v4, v4, v0
|
|
; VI-NEXT: v_div_scale_f32 v5, vcc, v0, v4, v0
|
|
; VI-NEXT: v_rcp_f32_e32 v7, v6
|
|
; VI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s2
|
|
; VI-NEXT: v_fma_f32 v10, -v6, v7, 1.0
|
|
; VI-NEXT: v_fma_f32 v7, v10, v7, v7
|
|
; VI-NEXT: v_mul_f32_e32 v10, v5, v7
|
|
; VI-NEXT: v_fma_f32 v11, -v6, v10, v5
|
|
; VI-NEXT: v_fma_f32 v10, v11, v7, v10
|
|
; VI-NEXT: v_fma_f32 v5, -v6, v10, v5
|
|
; VI-NEXT: s_setreg_b32 hwreg(HW_REG_MODE, 4, 2), s3
|
|
; VI-NEXT: v_div_fmas_f32 v5, v5, v7, v10
|
|
; VI-NEXT: v_div_fixup_f32 v5, v5, v4, v0
|
|
; VI-NEXT: v_trunc_f32_e32 v5, v5
|
|
; VI-NEXT: v_fma_f32 v0, -v5, v4, v0
|
|
; VI-NEXT: flat_store_dwordx4 v[8:9], v[0:3]
|
|
; VI-NEXT: s_endpgm
|
|
<4 x float> addrspace(1)* %in2) #0 {
|
|
%gep2 = getelementptr <4 x float>, <4 x float> addrspace(1)* %in2, i32 4
|
|
%r0 = load <4 x float>, <4 x float> addrspace(1)* %in1, align 16
|
|
%r1 = load <4 x float>, <4 x float> addrspace(1)* %gep2, align 16
|
|
%r2 = frem <4 x float> %r0, %r1
|
|
store <4 x float> %r2, <4 x float> addrspace(1)* %out, align 16
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @frem_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %in1,
|
|
; SI-LABEL: frem_v2f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[8:11], s[0:1], 0x9
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s4, s8
|
|
; SI-NEXT: s_mov_b32 s5, s9
|
|
; SI-NEXT: s_mov_b32 s8, s10
|
|
; SI-NEXT: s_mov_b32 s9, s11
|
|
; SI-NEXT: s_mov_b32 s10, s6
|
|
; SI-NEXT: s_mov_b32 s11, s7
|
|
; SI-NEXT: s_mov_b32 s2, s6
|
|
; SI-NEXT: s_mov_b32 s3, s7
|
|
; SI-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
|
|
; SI-NEXT: buffer_load_dwordx4 v[4:7], off, s[0:3], 0 offset:64
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_div_scale_f64 v[8:9], s[0:1], v[6:7], v[6:7], v[2:3]
|
|
; SI-NEXT: v_rcp_f64_e32 v[10:11], v[8:9]
|
|
; SI-NEXT: v_fma_f64 v[12:13], -v[8:9], v[10:11], 1.0
|
|
; SI-NEXT: v_fma_f64 v[10:11], v[10:11], v[12:13], v[10:11]
|
|
; SI-NEXT: v_fma_f64 v[12:13], -v[8:9], v[10:11], 1.0
|
|
; SI-NEXT: v_fma_f64 v[10:11], v[10:11], v[12:13], v[10:11]
|
|
; SI-NEXT: v_div_scale_f64 v[12:13], s[0:1], v[2:3], v[6:7], v[2:3]
|
|
; SI-NEXT: v_mul_f64 v[14:15], v[12:13], v[10:11]
|
|
; SI-NEXT: v_fma_f64 v[16:17], -v[8:9], v[14:15], v[12:13]
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v7, v9
|
|
; SI-NEXT: v_cmp_eq_u32_e64 s[0:1], v3, v13
|
|
; SI-NEXT: s_xor_b64 vcc, s[0:1], vcc
|
|
; SI-NEXT: s_nop 1
|
|
; SI-NEXT: v_div_fmas_f64 v[8:9], v[16:17], v[10:11], v[14:15]
|
|
; SI-NEXT: v_div_fixup_f64 v[8:9], v[8:9], v[6:7], v[2:3]
|
|
; SI-NEXT: v_bfe_u32 v10, v9, 20, 11
|
|
; SI-NEXT: s_movk_i32 s8, 0xfc01
|
|
; SI-NEXT: v_add_i32_e32 v12, vcc, s8, v10
|
|
; SI-NEXT: s_mov_b32 s3, 0xfffff
|
|
; SI-NEXT: v_lshr_b64 v[10:11], s[2:3], v12
|
|
; SI-NEXT: v_not_b32_e32 v10, v10
|
|
; SI-NEXT: v_and_b32_e32 v10, v8, v10
|
|
; SI-NEXT: v_not_b32_e32 v11, v11
|
|
; SI-NEXT: v_and_b32_e32 v11, v9, v11
|
|
; SI-NEXT: s_brev_b32 s9, 1
|
|
; SI-NEXT: v_and_b32_e32 v13, s9, v9
|
|
; SI-NEXT: v_cmp_gt_i32_e32 vcc, 0, v12
|
|
; SI-NEXT: v_cndmask_b32_e32 v11, v11, v13, vcc
|
|
; SI-NEXT: v_cmp_lt_i32_e64 s[0:1], 51, v12
|
|
; SI-NEXT: v_cndmask_b32_e64 v9, v11, v9, s[0:1]
|
|
; SI-NEXT: v_cndmask_b32_e64 v10, v10, 0, vcc
|
|
; SI-NEXT: v_cndmask_b32_e64 v8, v10, v8, s[0:1]
|
|
; SI-NEXT: v_fma_f64 v[2:3], -v[8:9], v[6:7], v[2:3]
|
|
; SI-NEXT: v_div_scale_f64 v[6:7], s[0:1], v[4:5], v[4:5], v[0:1]
|
|
; SI-NEXT: v_rcp_f64_e32 v[8:9], v[6:7]
|
|
; SI-NEXT: v_fma_f64 v[10:11], -v[6:7], v[8:9], 1.0
|
|
; SI-NEXT: v_fma_f64 v[8:9], v[8:9], v[10:11], v[8:9]
|
|
; SI-NEXT: v_fma_f64 v[10:11], -v[6:7], v[8:9], 1.0
|
|
; SI-NEXT: v_fma_f64 v[8:9], v[8:9], v[10:11], v[8:9]
|
|
; SI-NEXT: v_div_scale_f64 v[10:11], s[0:1], v[0:1], v[4:5], v[0:1]
|
|
; SI-NEXT: v_mul_f64 v[12:13], v[10:11], v[8:9]
|
|
; SI-NEXT: v_fma_f64 v[14:15], -v[6:7], v[12:13], v[10:11]
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v7
|
|
; SI-NEXT: v_cmp_eq_u32_e64 s[0:1], v1, v11
|
|
; SI-NEXT: s_xor_b64 vcc, s[0:1], vcc
|
|
; SI-NEXT: s_nop 1
|
|
; SI-NEXT: v_div_fmas_f64 v[6:7], v[14:15], v[8:9], v[12:13]
|
|
; SI-NEXT: v_div_fixup_f64 v[6:7], v[6:7], v[4:5], v[0:1]
|
|
; SI-NEXT: v_bfe_u32 v8, v7, 20, 11
|
|
; SI-NEXT: v_add_i32_e32 v10, vcc, s8, v8
|
|
; SI-NEXT: v_lshr_b64 v[8:9], s[2:3], v10
|
|
; SI-NEXT: v_not_b32_e32 v8, v8
|
|
; SI-NEXT: v_and_b32_e32 v8, v6, v8
|
|
; SI-NEXT: v_not_b32_e32 v9, v9
|
|
; SI-NEXT: v_and_b32_e32 v9, v7, v9
|
|
; SI-NEXT: v_and_b32_e32 v11, s9, v7
|
|
; SI-NEXT: v_cmp_gt_i32_e32 vcc, 0, v10
|
|
; SI-NEXT: v_cndmask_b32_e32 v9, v9, v11, vcc
|
|
; SI-NEXT: v_cmp_lt_i32_e64 s[0:1], 51, v10
|
|
; SI-NEXT: v_cndmask_b32_e64 v7, v9, v7, s[0:1]
|
|
; SI-NEXT: v_cndmask_b32_e64 v8, v8, 0, vcc
|
|
; SI-NEXT: v_cndmask_b32_e64 v6, v8, v6, s[0:1]
|
|
; SI-NEXT: v_fma_f64 v[0:1], -v[6:7], v[4:5], v[0:1]
|
|
; SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; CI-LABEL: frem_v2f64:
|
|
; CI: ; %bb.0:
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd
|
|
; CI-NEXT: s_mov_b32 s3, 0xf000
|
|
; CI-NEXT: s_mov_b32 s2, -1
|
|
; CI-NEXT: s_mov_b32 s10, s2
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: s_mov_b32 s0, s4
|
|
; CI-NEXT: s_mov_b32 s1, s5
|
|
; CI-NEXT: s_mov_b32 s4, s6
|
|
; CI-NEXT: s_mov_b32 s5, s7
|
|
; CI-NEXT: s_mov_b32 s6, s2
|
|
; CI-NEXT: s_mov_b32 s7, s3
|
|
; CI-NEXT: s_mov_b32 s11, s3
|
|
; CI-NEXT: buffer_load_dwordx4 v[0:3], off, s[4:7], 0
|
|
; CI-NEXT: buffer_load_dwordx4 v[4:7], off, s[8:11], 0 offset:64
|
|
; CI-NEXT: s_waitcnt vmcnt(0)
|
|
; CI-NEXT: v_div_scale_f64 v[8:9], s[4:5], v[6:7], v[6:7], v[2:3]
|
|
; CI-NEXT: v_rcp_f64_e32 v[10:11], v[8:9]
|
|
; CI-NEXT: v_fma_f64 v[12:13], -v[8:9], v[10:11], 1.0
|
|
; CI-NEXT: v_fma_f64 v[10:11], v[10:11], v[12:13], v[10:11]
|
|
; CI-NEXT: v_fma_f64 v[12:13], -v[8:9], v[10:11], 1.0
|
|
; CI-NEXT: v_fma_f64 v[10:11], v[10:11], v[12:13], v[10:11]
|
|
; CI-NEXT: v_div_scale_f64 v[12:13], vcc, v[2:3], v[6:7], v[2:3]
|
|
; CI-NEXT: v_mul_f64 v[14:15], v[12:13], v[10:11]
|
|
; CI-NEXT: v_fma_f64 v[8:9], -v[8:9], v[14:15], v[12:13]
|
|
; CI-NEXT: s_nop 1
|
|
; CI-NEXT: v_div_fmas_f64 v[8:9], v[8:9], v[10:11], v[14:15]
|
|
; CI-NEXT: v_div_fixup_f64 v[8:9], v[8:9], v[6:7], v[2:3]
|
|
; CI-NEXT: v_trunc_f64_e32 v[8:9], v[8:9]
|
|
; CI-NEXT: v_fma_f64 v[2:3], -v[8:9], v[6:7], v[2:3]
|
|
; CI-NEXT: v_div_scale_f64 v[6:7], s[4:5], v[4:5], v[4:5], v[0:1]
|
|
; CI-NEXT: v_rcp_f64_e32 v[8:9], v[6:7]
|
|
; CI-NEXT: v_fma_f64 v[10:11], -v[6:7], v[8:9], 1.0
|
|
; CI-NEXT: v_fma_f64 v[8:9], v[8:9], v[10:11], v[8:9]
|
|
; CI-NEXT: v_fma_f64 v[10:11], -v[6:7], v[8:9], 1.0
|
|
; CI-NEXT: v_fma_f64 v[8:9], v[8:9], v[10:11], v[8:9]
|
|
; CI-NEXT: v_div_scale_f64 v[10:11], vcc, v[0:1], v[4:5], v[0:1]
|
|
; CI-NEXT: v_mul_f64 v[12:13], v[10:11], v[8:9]
|
|
; CI-NEXT: v_fma_f64 v[6:7], -v[6:7], v[12:13], v[10:11]
|
|
; CI-NEXT: s_nop 1
|
|
; CI-NEXT: v_div_fmas_f64 v[6:7], v[6:7], v[8:9], v[12:13]
|
|
; CI-NEXT: v_div_fixup_f64 v[6:7], v[6:7], v[4:5], v[0:1]
|
|
; CI-NEXT: v_trunc_f64_e32 v[6:7], v[6:7]
|
|
; CI-NEXT: v_fma_f64 v[0:1], -v[6:7], v[4:5], v[0:1]
|
|
; CI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
|
|
; CI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: frem_v2f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: s_add_u32 s0, s0, 64
|
|
; VI-NEXT: s_addc_u32 s1, s1, 0
|
|
; VI-NEXT: v_mov_b32_e32 v5, s1
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_mov_b32_e32 v4, s0
|
|
; VI-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
|
|
; VI-NEXT: flat_load_dwordx4 v[4:7], v[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v8, s4
|
|
; VI-NEXT: v_mov_b32_e32 v9, s5
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_div_scale_f64 v[10:11], s[0:1], v[6:7], v[6:7], v[2:3]
|
|
; VI-NEXT: v_rcp_f64_e32 v[12:13], v[10:11]
|
|
; VI-NEXT: v_fma_f64 v[14:15], -v[10:11], v[12:13], 1.0
|
|
; VI-NEXT: v_fma_f64 v[12:13], v[12:13], v[14:15], v[12:13]
|
|
; VI-NEXT: v_fma_f64 v[14:15], -v[10:11], v[12:13], 1.0
|
|
; VI-NEXT: v_fma_f64 v[12:13], v[12:13], v[14:15], v[12:13]
|
|
; VI-NEXT: v_div_scale_f64 v[14:15], vcc, v[2:3], v[6:7], v[2:3]
|
|
; VI-NEXT: v_mul_f64 v[16:17], v[14:15], v[12:13]
|
|
; VI-NEXT: v_fma_f64 v[10:11], -v[10:11], v[16:17], v[14:15]
|
|
; VI-NEXT: s_nop 1
|
|
; VI-NEXT: v_div_fmas_f64 v[10:11], v[10:11], v[12:13], v[16:17]
|
|
; VI-NEXT: v_div_fixup_f64 v[10:11], v[10:11], v[6:7], v[2:3]
|
|
; VI-NEXT: v_trunc_f64_e32 v[10:11], v[10:11]
|
|
; VI-NEXT: v_fma_f64 v[2:3], -v[10:11], v[6:7], v[2:3]
|
|
; VI-NEXT: v_div_scale_f64 v[6:7], s[0:1], v[4:5], v[4:5], v[0:1]
|
|
; VI-NEXT: v_rcp_f64_e32 v[10:11], v[6:7]
|
|
; VI-NEXT: v_fma_f64 v[12:13], -v[6:7], v[10:11], 1.0
|
|
; VI-NEXT: v_fma_f64 v[10:11], v[10:11], v[12:13], v[10:11]
|
|
; VI-NEXT: v_fma_f64 v[12:13], -v[6:7], v[10:11], 1.0
|
|
; VI-NEXT: v_fma_f64 v[10:11], v[10:11], v[12:13], v[10:11]
|
|
; VI-NEXT: v_div_scale_f64 v[12:13], vcc, v[0:1], v[4:5], v[0:1]
|
|
; VI-NEXT: v_mul_f64 v[14:15], v[12:13], v[10:11]
|
|
; VI-NEXT: v_fma_f64 v[6:7], -v[6:7], v[14:15], v[12:13]
|
|
; VI-NEXT: s_nop 1
|
|
; VI-NEXT: v_div_fmas_f64 v[6:7], v[6:7], v[10:11], v[14:15]
|
|
; VI-NEXT: v_div_fixup_f64 v[6:7], v[6:7], v[4:5], v[0:1]
|
|
; VI-NEXT: v_trunc_f64_e32 v[6:7], v[6:7]
|
|
; VI-NEXT: v_fma_f64 v[0:1], -v[6:7], v[4:5], v[0:1]
|
|
; VI-NEXT: flat_store_dwordx4 v[8:9], v[0:3]
|
|
; VI-NEXT: s_endpgm
|
|
<2 x double> addrspace(1)* %in2) #0 {
|
|
%gep2 = getelementptr <2 x double>, <2 x double> addrspace(1)* %in2, i32 4
|
|
%r0 = load <2 x double>, <2 x double> addrspace(1)* %in1, align 16
|
|
%r1 = load <2 x double>, <2 x double> addrspace(1)* %gep2, align 16
|
|
%r2 = frem <2 x double> %r0, %r1
|
|
store <2 x double> %r2, <2 x double> addrspace(1)* %out, align 16
|
|
ret void
|
|
}
|
|
|
|
attributes #0 = { nounwind "unsafe-fp-math"="false" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
|
|
attributes #1 = { nounwind "unsafe-fp-math"="true" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
|