1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-26 12:43:36 +01:00
llvm-mirror/test/CodeGen/RISCV/calling-conv-half.ll
Craig Topper 6e55b005fb [RISCV] Improve 64-bit integer constant materialization for more cases.
For positive constants we try shifting left to remove leading zeros
and fill the bottom bits with 1s. We then materialize that constant
shift it right.

This patch adds a new strategy to try filling the bottom bits with
zeros instead. This catches some additional cases.
2021-04-02 10:18:08 -07:00

528 lines
19 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s | FileCheck %s -check-prefix=RV32I
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s | FileCheck %s -check-prefix=RV64I
; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s | FileCheck %s -check-prefix=RV32IF
; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s | FileCheck %s -check-prefix=RV64IF
; RUN: llc -mtriple=riscv32 -mattr=+f -target-abi=ilp32f -verify-machineinstrs < %s | FileCheck %s -check-prefix=RV32-ILP32F
; RUN: llc -mtriple=riscv64 -mattr=+f -target-abi=lp64f -verify-machineinstrs < %s | FileCheck %s -check-prefix=RV64-LP64F
; Tests passing half arguments and returns without Zfh.
; Covers with and without F extension and ilp32f/ilp64f
; calling conventions.
define i32 @callee_half_in_regs(i32 %a, half %b) nounwind {
; RV32I-LABEL: callee_half_in_regs:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lui a0, 16
; RV32I-NEXT: addi a0, a0, -1
; RV32I-NEXT: and a0, a1, a0
; RV32I-NEXT: call __gnu_h2f_ieee@plt
; RV32I-NEXT: call __fixsfsi@plt
; RV32I-NEXT: add a0, s0, a0
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV64I-LABEL: callee_half_in_regs:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lui a0, 16
; RV64I-NEXT: addiw a0, a0, -1
; RV64I-NEXT: and a0, a1, a0
; RV64I-NEXT: call __gnu_h2f_ieee@plt
; RV64I-NEXT: call __fixsfdi@plt
; RV64I-NEXT: addw a0, s0, a0
; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV32IF-LABEL: callee_half_in_regs:
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IF-NEXT: mv s0, a0
; RV32IF-NEXT: mv a0, a1
; RV32IF-NEXT: call __gnu_h2f_ieee@plt
; RV32IF-NEXT: fmv.w.x ft0, a0
; RV32IF-NEXT: fcvt.w.s a0, ft0, rtz
; RV32IF-NEXT: add a0, s0, a0
; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: callee_half_in_regs:
; RV64IF: # %bb.0:
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; RV64IF-NEXT: mv s0, a0
; RV64IF-NEXT: mv a0, a1
; RV64IF-NEXT: call __gnu_h2f_ieee@plt
; RV64IF-NEXT: fmv.w.x ft0, a0
; RV64IF-NEXT: fcvt.l.s a0, ft0, rtz
; RV64IF-NEXT: addw a0, s0, a0
; RV64IF-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
;
; RV32-ILP32F-LABEL: callee_half_in_regs:
; RV32-ILP32F: # %bb.0:
; RV32-ILP32F-NEXT: addi sp, sp, -16
; RV32-ILP32F-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ILP32F-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32-ILP32F-NEXT: mv s0, a0
; RV32-ILP32F-NEXT: fmv.x.w a0, fa0
; RV32-ILP32F-NEXT: call __gnu_h2f_ieee@plt
; RV32-ILP32F-NEXT: fcvt.w.s a0, fa0, rtz
; RV32-ILP32F-NEXT: add a0, s0, a0
; RV32-ILP32F-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32-ILP32F-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ILP32F-NEXT: addi sp, sp, 16
; RV32-ILP32F-NEXT: ret
;
; RV64-LP64F-LABEL: callee_half_in_regs:
; RV64-LP64F: # %bb.0:
; RV64-LP64F-NEXT: addi sp, sp, -16
; RV64-LP64F-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-LP64F-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; RV64-LP64F-NEXT: mv s0, a0
; RV64-LP64F-NEXT: fmv.x.w a0, fa0
; RV64-LP64F-NEXT: call __gnu_h2f_ieee@plt
; RV64-LP64F-NEXT: fcvt.l.s a0, fa0, rtz
; RV64-LP64F-NEXT: addw a0, s0, a0
; RV64-LP64F-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; RV64-LP64F-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-LP64F-NEXT: addi sp, sp, 16
; RV64-LP64F-NEXT: ret
%b_fptosi = fptosi half %b to i32
%1 = add i32 %a, %b_fptosi
ret i32 %1
}
define i32 @caller_half_in_regs() nounwind {
; RV32I-LABEL: caller_half_in_regs:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: addi a0, zero, 1
; RV32I-NEXT: lui a1, 4
; RV32I-NEXT: call callee_half_in_regs@plt
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV64I-LABEL: caller_half_in_regs:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: addi a0, zero, 1
; RV64I-NEXT: lui a1, 4
; RV64I-NEXT: call callee_half_in_regs@plt
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV32IF-LABEL: caller_half_in_regs:
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: addi a0, zero, 1
; RV32IF-NEXT: lui a1, 1048564
; RV32IF-NEXT: call callee_half_in_regs@plt
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: caller_half_in_regs:
; RV64IF: # %bb.0:
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: lui a0, %hi(.LCPI1_0)
; RV64IF-NEXT: flw ft0, %lo(.LCPI1_0)(a0)
; RV64IF-NEXT: fmv.x.w a1, ft0
; RV64IF-NEXT: addi a0, zero, 1
; RV64IF-NEXT: call callee_half_in_regs@plt
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
;
; RV32-ILP32F-LABEL: caller_half_in_regs:
; RV32-ILP32F: # %bb.0:
; RV32-ILP32F-NEXT: addi sp, sp, -16
; RV32-ILP32F-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ILP32F-NEXT: lui a0, %hi(.LCPI1_0)
; RV32-ILP32F-NEXT: flw fa0, %lo(.LCPI1_0)(a0)
; RV32-ILP32F-NEXT: addi a0, zero, 1
; RV32-ILP32F-NEXT: call callee_half_in_regs@plt
; RV32-ILP32F-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ILP32F-NEXT: addi sp, sp, 16
; RV32-ILP32F-NEXT: ret
;
; RV64-LP64F-LABEL: caller_half_in_regs:
; RV64-LP64F: # %bb.0:
; RV64-LP64F-NEXT: addi sp, sp, -16
; RV64-LP64F-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-LP64F-NEXT: lui a0, %hi(.LCPI1_0)
; RV64-LP64F-NEXT: flw fa0, %lo(.LCPI1_0)(a0)
; RV64-LP64F-NEXT: addi a0, zero, 1
; RV64-LP64F-NEXT: call callee_half_in_regs@plt
; RV64-LP64F-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-LP64F-NEXT: addi sp, sp, 16
; RV64-LP64F-NEXT: ret
%1 = call i32 @callee_half_in_regs(i32 1, half 2.0)
ret i32 %1
}
define i32 @callee_half_on_stack(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, half %i) nounwind {
; RV32I-LABEL: callee_half_on_stack:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32I-NEXT: lhu a0, 16(sp)
; RV32I-NEXT: mv s0, a7
; RV32I-NEXT: call __gnu_h2f_ieee@plt
; RV32I-NEXT: call __fixsfsi@plt
; RV32I-NEXT: add a0, s0, a0
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV64I-LABEL: callee_half_on_stack:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: lhu a0, 16(sp)
; RV64I-NEXT: mv s0, a7
; RV64I-NEXT: call __gnu_h2f_ieee@plt
; RV64I-NEXT: call __fixsfdi@plt
; RV64I-NEXT: addw a0, s0, a0
; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV32IF-LABEL: callee_half_on_stack:
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IF-NEXT: lhu a0, 16(sp)
; RV32IF-NEXT: mv s0, a7
; RV32IF-NEXT: call __gnu_h2f_ieee@plt
; RV32IF-NEXT: fmv.w.x ft0, a0
; RV32IF-NEXT: fcvt.w.s a0, ft0, rtz
; RV32IF-NEXT: add a0, s0, a0
; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: callee_half_on_stack:
; RV64IF: # %bb.0:
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; RV64IF-NEXT: lhu a0, 16(sp)
; RV64IF-NEXT: mv s0, a7
; RV64IF-NEXT: call __gnu_h2f_ieee@plt
; RV64IF-NEXT: fmv.w.x ft0, a0
; RV64IF-NEXT: fcvt.l.s a0, ft0, rtz
; RV64IF-NEXT: addw a0, s0, a0
; RV64IF-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
;
; RV32-ILP32F-LABEL: callee_half_on_stack:
; RV32-ILP32F: # %bb.0:
; RV32-ILP32F-NEXT: addi sp, sp, -16
; RV32-ILP32F-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ILP32F-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32-ILP32F-NEXT: mv s0, a7
; RV32-ILP32F-NEXT: fmv.x.w a0, fa0
; RV32-ILP32F-NEXT: call __gnu_h2f_ieee@plt
; RV32-ILP32F-NEXT: fcvt.w.s a0, fa0, rtz
; RV32-ILP32F-NEXT: add a0, s0, a0
; RV32-ILP32F-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32-ILP32F-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ILP32F-NEXT: addi sp, sp, 16
; RV32-ILP32F-NEXT: ret
;
; RV64-LP64F-LABEL: callee_half_on_stack:
; RV64-LP64F: # %bb.0:
; RV64-LP64F-NEXT: addi sp, sp, -16
; RV64-LP64F-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-LP64F-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; RV64-LP64F-NEXT: mv s0, a7
; RV64-LP64F-NEXT: fmv.x.w a0, fa0
; RV64-LP64F-NEXT: call __gnu_h2f_ieee@plt
; RV64-LP64F-NEXT: fcvt.l.s a0, fa0, rtz
; RV64-LP64F-NEXT: addw a0, s0, a0
; RV64-LP64F-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; RV64-LP64F-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-LP64F-NEXT: addi sp, sp, 16
; RV64-LP64F-NEXT: ret
%1 = fptosi half %i to i32
%2 = add i32 %h, %1
ret i32 %2
}
define i32 @caller_half_on_stack() nounwind {
; RV32I-LABEL: caller_half_on_stack:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: lui a0, 5
; RV32I-NEXT: addi t0, a0, -1792
; RV32I-NEXT: addi a0, zero, 1
; RV32I-NEXT: addi a1, zero, 2
; RV32I-NEXT: addi a2, zero, 3
; RV32I-NEXT: addi a3, zero, 4
; RV32I-NEXT: addi a4, zero, 5
; RV32I-NEXT: addi a5, zero, 6
; RV32I-NEXT: addi a6, zero, 7
; RV32I-NEXT: addi a7, zero, 8
; RV32I-NEXT: sw t0, 0(sp)
; RV32I-NEXT: call callee_half_on_stack@plt
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV64I-LABEL: caller_half_on_stack:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: lui a0, 5
; RV64I-NEXT: addiw t0, a0, -1792
; RV64I-NEXT: addi a0, zero, 1
; RV64I-NEXT: addi a1, zero, 2
; RV64I-NEXT: addi a2, zero, 3
; RV64I-NEXT: addi a3, zero, 4
; RV64I-NEXT: addi a4, zero, 5
; RV64I-NEXT: addi a5, zero, 6
; RV64I-NEXT: addi a6, zero, 7
; RV64I-NEXT: addi a7, zero, 8
; RV64I-NEXT: sd t0, 0(sp)
; RV64I-NEXT: call callee_half_on_stack@plt
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV32IF-LABEL: caller_half_on_stack:
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: lui a0, 1048565
; RV32IF-NEXT: addi t0, a0, -1792
; RV32IF-NEXT: addi a0, zero, 1
; RV32IF-NEXT: addi a1, zero, 2
; RV32IF-NEXT: addi a2, zero, 3
; RV32IF-NEXT: addi a3, zero, 4
; RV32IF-NEXT: addi a4, zero, 5
; RV32IF-NEXT: addi a5, zero, 6
; RV32IF-NEXT: addi a6, zero, 7
; RV32IF-NEXT: addi a7, zero, 8
; RV32IF-NEXT: sw t0, 0(sp)
; RV32IF-NEXT: call callee_half_on_stack@plt
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: caller_half_on_stack:
; RV64IF: # %bb.0:
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: addi a0, zero, -183
; RV64IF-NEXT: slli a0, a0, 40
; RV64IF-NEXT: srli t0, a0, 32
; RV64IF-NEXT: addi a0, zero, 1
; RV64IF-NEXT: addi a1, zero, 2
; RV64IF-NEXT: addi a2, zero, 3
; RV64IF-NEXT: addi a3, zero, 4
; RV64IF-NEXT: addi a4, zero, 5
; RV64IF-NEXT: addi a5, zero, 6
; RV64IF-NEXT: addi a6, zero, 7
; RV64IF-NEXT: addi a7, zero, 8
; RV64IF-NEXT: sw t0, 0(sp)
; RV64IF-NEXT: call callee_half_on_stack@plt
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
;
; RV32-ILP32F-LABEL: caller_half_on_stack:
; RV32-ILP32F: # %bb.0:
; RV32-ILP32F-NEXT: addi sp, sp, -16
; RV32-ILP32F-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ILP32F-NEXT: lui a0, %hi(.LCPI3_0)
; RV32-ILP32F-NEXT: flw fa0, %lo(.LCPI3_0)(a0)
; RV32-ILP32F-NEXT: addi a0, zero, 1
; RV32-ILP32F-NEXT: addi a1, zero, 2
; RV32-ILP32F-NEXT: addi a2, zero, 3
; RV32-ILP32F-NEXT: addi a3, zero, 4
; RV32-ILP32F-NEXT: addi a4, zero, 5
; RV32-ILP32F-NEXT: addi a5, zero, 6
; RV32-ILP32F-NEXT: addi a6, zero, 7
; RV32-ILP32F-NEXT: addi a7, zero, 8
; RV32-ILP32F-NEXT: call callee_half_on_stack@plt
; RV32-ILP32F-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ILP32F-NEXT: addi sp, sp, 16
; RV32-ILP32F-NEXT: ret
;
; RV64-LP64F-LABEL: caller_half_on_stack:
; RV64-LP64F: # %bb.0:
; RV64-LP64F-NEXT: addi sp, sp, -16
; RV64-LP64F-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-LP64F-NEXT: lui a0, %hi(.LCPI3_0)
; RV64-LP64F-NEXT: flw fa0, %lo(.LCPI3_0)(a0)
; RV64-LP64F-NEXT: addi a0, zero, 1
; RV64-LP64F-NEXT: addi a1, zero, 2
; RV64-LP64F-NEXT: addi a2, zero, 3
; RV64-LP64F-NEXT: addi a3, zero, 4
; RV64-LP64F-NEXT: addi a4, zero, 5
; RV64-LP64F-NEXT: addi a5, zero, 6
; RV64-LP64F-NEXT: addi a6, zero, 7
; RV64-LP64F-NEXT: addi a7, zero, 8
; RV64-LP64F-NEXT: call callee_half_on_stack@plt
; RV64-LP64F-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-LP64F-NEXT: addi sp, sp, 16
; RV64-LP64F-NEXT: ret
%1 = call i32 @callee_half_on_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, half 10.0)
ret i32 %1
}
define half @callee_half_ret() nounwind {
; RV32I-LABEL: callee_half_ret:
; RV32I: # %bb.0:
; RV32I-NEXT: lui a0, 4
; RV32I-NEXT: addi a0, a0, -1024
; RV32I-NEXT: ret
;
; RV64I-LABEL: callee_half_ret:
; RV64I: # %bb.0:
; RV64I-NEXT: lui a0, 4
; RV64I-NEXT: addiw a0, a0, -1024
; RV64I-NEXT: ret
;
; RV32IF-LABEL: callee_half_ret:
; RV32IF: # %bb.0:
; RV32IF-NEXT: lui a0, 1048564
; RV32IF-NEXT: addi a0, a0, -1024
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: callee_half_ret:
; RV64IF: # %bb.0:
; RV64IF-NEXT: lui a0, %hi(.LCPI4_0)
; RV64IF-NEXT: flw ft0, %lo(.LCPI4_0)(a0)
; RV64IF-NEXT: fmv.x.w a0, ft0
; RV64IF-NEXT: ret
;
; RV32-ILP32F-LABEL: callee_half_ret:
; RV32-ILP32F: # %bb.0:
; RV32-ILP32F-NEXT: lui a0, %hi(.LCPI4_0)
; RV32-ILP32F-NEXT: flw fa0, %lo(.LCPI4_0)(a0)
; RV32-ILP32F-NEXT: ret
;
; RV64-LP64F-LABEL: callee_half_ret:
; RV64-LP64F: # %bb.0:
; RV64-LP64F-NEXT: lui a0, %hi(.LCPI4_0)
; RV64-LP64F-NEXT: flw fa0, %lo(.LCPI4_0)(a0)
; RV64-LP64F-NEXT: ret
ret half 1.0
}
define i32 @caller_half_ret() nounwind {
; RV32I-LABEL: caller_half_ret:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: call callee_half_ret@plt
; RV32I-NEXT: lui a1, 16
; RV32I-NEXT: addi a1, a1, -1
; RV32I-NEXT: and a0, a0, a1
; RV32I-NEXT: call __gnu_h2f_ieee@plt
; RV32I-NEXT: call __fixsfsi@plt
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV64I-LABEL: caller_half_ret:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: call callee_half_ret@plt
; RV64I-NEXT: lui a1, 16
; RV64I-NEXT: addiw a1, a1, -1
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: call __gnu_h2f_ieee@plt
; RV64I-NEXT: call __fixsfdi@plt
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV32IF-LABEL: caller_half_ret:
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: call callee_half_ret@plt
; RV32IF-NEXT: call __gnu_h2f_ieee@plt
; RV32IF-NEXT: fmv.w.x ft0, a0
; RV32IF-NEXT: fcvt.w.s a0, ft0, rtz
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: caller_half_ret:
; RV64IF: # %bb.0:
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: call callee_half_ret@plt
; RV64IF-NEXT: call __gnu_h2f_ieee@plt
; RV64IF-NEXT: fmv.w.x ft0, a0
; RV64IF-NEXT: fcvt.l.s a0, ft0, rtz
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
;
; RV32-ILP32F-LABEL: caller_half_ret:
; RV32-ILP32F: # %bb.0:
; RV32-ILP32F-NEXT: addi sp, sp, -16
; RV32-ILP32F-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ILP32F-NEXT: call callee_half_ret@plt
; RV32-ILP32F-NEXT: fmv.x.w a0, fa0
; RV32-ILP32F-NEXT: call __gnu_h2f_ieee@plt
; RV32-ILP32F-NEXT: fcvt.w.s a0, fa0, rtz
; RV32-ILP32F-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ILP32F-NEXT: addi sp, sp, 16
; RV32-ILP32F-NEXT: ret
;
; RV64-LP64F-LABEL: caller_half_ret:
; RV64-LP64F: # %bb.0:
; RV64-LP64F-NEXT: addi sp, sp, -16
; RV64-LP64F-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-LP64F-NEXT: call callee_half_ret@plt
; RV64-LP64F-NEXT: fmv.x.w a0, fa0
; RV64-LP64F-NEXT: call __gnu_h2f_ieee@plt
; RV64-LP64F-NEXT: fcvt.l.s a0, fa0, rtz
; RV64-LP64F-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-LP64F-NEXT: addi sp, sp, 16
; RV64-LP64F-NEXT: ret
%1 = call half @callee_half_ret()
%2 = fptosi half %1 to i32
ret i32 %2
}