1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 03:02:36 +01:00

[NFC][Test] Update test for IEEE Long Double

This commit is contained in:
QingShan Zhang 2020-11-20 09:56:53 +00:00
parent 39c3beff62
commit 95e940fbb4
3 changed files with 395 additions and 88 deletions

View File

@ -1,3 +1,4 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=arm64-linux-gnu -verify-machineinstrs -mcpu=cyclone -aarch64-enable-atomic-cfg-tidy=0 < %s | FileCheck -enable-var-scope %s
@lhs = global fp128 zeroinitializer, align 16
@ -5,53 +6,85 @@
define fp128 @test_add() {
; CHECK-LABEL: test_add:
; CHECK: // %bb.0:
; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: adrp x8, lhs
; CHECK-NEXT: ldr q0, [x8, :lo12:lhs]
; CHECK-NEXT: adrp x8, rhs
; CHECK-NEXT: ldr q1, [x8, :lo12:rhs]
; CHECK-NEXT: bl __addtf3
; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
%lhs = load fp128, fp128* @lhs, align 16
%rhs = load fp128, fp128* @rhs, align 16
; CHECK: ldr q0, [{{x[0-9]+}}, :lo12:lhs]
; CHECK: ldr q1, [{{x[0-9]+}}, :lo12:rhs]
%val = fadd fp128 %lhs, %rhs
; CHECK: bl __addtf3
ret fp128 %val
}
define fp128 @test_sub() {
; CHECK-LABEL: test_sub:
; CHECK: // %bb.0:
; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: adrp x8, lhs
; CHECK-NEXT: ldr q0, [x8, :lo12:lhs]
; CHECK-NEXT: adrp x8, rhs
; CHECK-NEXT: ldr q1, [x8, :lo12:rhs]
; CHECK-NEXT: bl __subtf3
; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
%lhs = load fp128, fp128* @lhs, align 16
%rhs = load fp128, fp128* @rhs, align 16
; CHECK: ldr q0, [{{x[0-9]+}}, :lo12:lhs]
; CHECK: ldr q1, [{{x[0-9]+}}, :lo12:rhs]
%val = fsub fp128 %lhs, %rhs
; CHECK: bl __subtf3
ret fp128 %val
}
define fp128 @test_mul() {
; CHECK-LABEL: test_mul:
; CHECK: // %bb.0:
; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: adrp x8, lhs
; CHECK-NEXT: ldr q0, [x8, :lo12:lhs]
; CHECK-NEXT: adrp x8, rhs
; CHECK-NEXT: ldr q1, [x8, :lo12:rhs]
; CHECK-NEXT: bl __multf3
; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
%lhs = load fp128, fp128* @lhs, align 16
%rhs = load fp128, fp128* @rhs, align 16
; CHECK: ldr q0, [{{x[0-9]+}}, :lo12:lhs]
; CHECK: ldr q1, [{{x[0-9]+}}, :lo12:rhs]
%val = fmul fp128 %lhs, %rhs
; CHECK: bl __multf3
ret fp128 %val
}
define fp128 @test_div() {
; CHECK-LABEL: test_div:
; CHECK: // %bb.0:
; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: adrp x8, lhs
; CHECK-NEXT: ldr q0, [x8, :lo12:lhs]
; CHECK-NEXT: adrp x8, rhs
; CHECK-NEXT: ldr q1, [x8, :lo12:rhs]
; CHECK-NEXT: bl __divtf3
; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
%lhs = load fp128, fp128* @lhs, align 16
%rhs = load fp128, fp128* @rhs, align 16
; CHECK: ldr q0, [{{x[0-9]+}}, :lo12:lhs]
; CHECK: ldr q1, [{{x[0-9]+}}, :lo12:rhs]
%val = fdiv fp128 %lhs, %rhs
; CHECK: bl __divtf3
ret fp128 %val
}
@ -60,163 +93,260 @@ define fp128 @test_div() {
define void @test_fptosi() {
; CHECK-LABEL: test_fptosi:
; CHECK: // %bb.0:
; CHECK-NEXT: sub sp, sp, #32 // =32
; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: adrp x8, lhs
; CHECK-NEXT: ldr q0, [x8, :lo12:lhs]
; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
; CHECK-NEXT: bl __fixtfsi
; CHECK-NEXT: adrp x8, var32
; CHECK-NEXT: str w0, [x8, :lo12:var32]
; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
; CHECK-NEXT: bl __fixtfdi
; CHECK-NEXT: adrp x8, var64
; CHECK-NEXT: str x0, [x8, :lo12:var64]
; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
; CHECK-NEXT: add sp, sp, #32 // =32
; CHECK-NEXT: ret
%val = load fp128, fp128* @lhs, align 16
%val32 = fptosi fp128 %val to i32
store i32 %val32, i32* @var32
; CHECK: bl __fixtfsi
%val64 = fptosi fp128 %val to i64
store i64 %val64, i64* @var64
; CHECK: bl __fixtfdi
ret void
}
define void @test_fptoui() {
; CHECK-LABEL: test_fptoui:
; CHECK: // %bb.0:
; CHECK-NEXT: sub sp, sp, #32 // =32
; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: adrp x8, lhs
; CHECK-NEXT: ldr q0, [x8, :lo12:lhs]
; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
; CHECK-NEXT: bl __fixunstfsi
; CHECK-NEXT: adrp x8, var32
; CHECK-NEXT: str w0, [x8, :lo12:var32]
; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
; CHECK-NEXT: bl __fixunstfdi
; CHECK-NEXT: adrp x8, var64
; CHECK-NEXT: str x0, [x8, :lo12:var64]
; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
; CHECK-NEXT: add sp, sp, #32 // =32
; CHECK-NEXT: ret
%val = load fp128, fp128* @lhs, align 16
%val32 = fptoui fp128 %val to i32
store i32 %val32, i32* @var32
; CHECK: bl __fixunstfsi
%val64 = fptoui fp128 %val to i64
store i64 %val64, i64* @var64
; CHECK: bl __fixunstfdi
ret void
}
define void @test_sitofp() {
; CHECK-LABEL: test_sitofp:
; CHECK: // %bb.0:
; CHECK-NEXT: stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w19, -8
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: adrp x8, var32
; CHECK-NEXT: ldr w0, [x8, :lo12:var32]
; CHECK-NEXT: bl __floatsitf
; CHECK-NEXT: adrp x19, lhs
; CHECK-NEXT: str q0, [x19, :lo12:lhs]
; CHECK-NEXT: adrp x8, var64
; CHECK-NEXT: ldr x0, [x8, :lo12:var64]
; CHECK-NEXT: bl __floatditf
; CHECK-NEXT: str q0, [x19, :lo12:lhs]
; CHECK-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload
; CHECK-NEXT: ret
%src32 = load i32, i32* @var32
%val32 = sitofp i32 %src32 to fp128
store volatile fp128 %val32, fp128* @lhs
; CHECK: bl __floatsitf
%src64 = load i64, i64* @var64
%val64 = sitofp i64 %src64 to fp128
store volatile fp128 %val64, fp128* @lhs
; CHECK: bl __floatditf
ret void
}
define void @test_uitofp() {
; CHECK-LABEL: test_uitofp:
; CHECK: // %bb.0:
; CHECK-NEXT: stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w19, -8
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: adrp x8, var32
; CHECK-NEXT: ldr w0, [x8, :lo12:var32]
; CHECK-NEXT: bl __floatunsitf
; CHECK-NEXT: adrp x19, lhs
; CHECK-NEXT: str q0, [x19, :lo12:lhs]
; CHECK-NEXT: adrp x8, var64
; CHECK-NEXT: ldr x0, [x8, :lo12:var64]
; CHECK-NEXT: bl __floatunditf
; CHECK-NEXT: str q0, [x19, :lo12:lhs]
; CHECK-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload
; CHECK-NEXT: ret
%src32 = load i32, i32* @var32
%val32 = uitofp i32 %src32 to fp128
store volatile fp128 %val32, fp128* @lhs
; CHECK: bl __floatunsitf
%src64 = load i64, i64* @var64
%val64 = uitofp i64 %src64 to fp128
store volatile fp128 %val64, fp128* @lhs
; CHECK: bl __floatunditf
ret void
}
define i1 @test_setcc1() {
; CHECK-LABEL: test_setcc1:
; CHECK: // %bb.0:
; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: adrp x8, lhs
; CHECK-NEXT: ldr q0, [x8, :lo12:lhs]
; CHECK-NEXT: adrp x8, rhs
; CHECK-NEXT: ldr q1, [x8, :lo12:rhs]
; CHECK-NEXT: bl __letf2
; CHECK-NEXT: cmp w0, #0 // =0
; CHECK-NEXT: cset w0, le
; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
%lhs = load fp128, fp128* @lhs, align 16
%rhs = load fp128, fp128* @rhs, align 16
; CHECK: ldr q0, [{{x[0-9]+}}, :lo12:lhs]
; CHECK: ldr q1, [{{x[0-9]+}}, :lo12:rhs]
; Technically, everything after the call to __letf2 is redundant, but we'll let
; LLVM have its fun for now.
%val = fcmp ole fp128 %lhs, %rhs
; CHECK: bl __letf2
; CHECK: cmp w0, #0
; CHECK: cset w0, le
ret i1 %val
; CHECK: ret
}
define i1 @test_setcc2() {
; CHECK-LABEL: test_setcc2:
; CHECK: // %bb.0:
; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: adrp x8, lhs
; CHECK-NEXT: ldr q0, [x8, :lo12:lhs]
; CHECK-NEXT: adrp x8, rhs
; CHECK-NEXT: ldr q1, [x8, :lo12:rhs]
; CHECK-NEXT: bl __letf2
; CHECK-NEXT: cmp w0, #0 // =0
; CHECK-NEXT: cset w0, gt
; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
%lhs = load fp128, fp128* @lhs, align 16
%rhs = load fp128, fp128* @rhs, align 16
; CHECK: ldr q0, [{{x[0-9]+}}, :lo12:lhs]
; CHECK: ldr q1, [{{x[0-9]+}}, :lo12:rhs]
%val = fcmp ugt fp128 %lhs, %rhs
; CHECK: bl __letf2
; CHECK: cmp w0, #0
; CHECK: cset w0, gt
ret i1 %val
; CHECK: ret
}
define i1 @test_setcc3() {
; CHECK-LABEL: test_setcc3:
; CHECK: // %bb.0:
; CHECK-NEXT: sub sp, sp, #48 // =48
; CHECK-NEXT: stp x30, x19, [sp, #32] // 16-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: .cfi_offset w19, -8
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: adrp x8, lhs
; CHECK-NEXT: ldr q0, [x8, :lo12:lhs]
; CHECK-NEXT: adrp x8, rhs
; CHECK-NEXT: ldr q1, [x8, :lo12:rhs]
; CHECK-NEXT: stp q1, q0, [sp] // 32-byte Folded Spill
; CHECK-NEXT: bl __eqtf2
; CHECK-NEXT: cmp w0, #0 // =0
; CHECK-NEXT: cset w19, eq
; CHECK-NEXT: ldp q1, q0, [sp] // 32-byte Folded Reload
; CHECK-NEXT: bl __unordtf2
; CHECK-NEXT: cmp w0, #0 // =0
; CHECK-NEXT: cset w8, ne
; CHECK-NEXT: orr w0, w8, w19
; CHECK-NEXT: ldp x30, x19, [sp, #32] // 16-byte Folded Reload
; CHECK-NEXT: add sp, sp, #48 // =48
; CHECK-NEXT: ret
%lhs = load fp128, fp128* @lhs, align 16
%rhs = load fp128, fp128* @rhs, align 16
; CHECK: ldr q0, [{{x[0-9]+}}, :lo12:lhs]
; CHECK: ldr q1, [{{x[0-9]+}}, :lo12:rhs]
%val = fcmp ueq fp128 %lhs, %rhs
; CHECK: bl __eqtf2
; CHECK: cmp w0, #0
; CHECK: cset w19, eq
; CHECK: bl __unordtf2
; CHECK: cmp w0, #0
; CHECK: cset w8, ne
; CHECK: orr w0, w8, w19
ret i1 %val
; CHECK: ret
}
define i32 @test_br_cc() {
; CHECK-LABEL: test_br_cc:
; CHECK: // %bb.0:
; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: adrp x8, lhs
; CHECK-NEXT: ldr q0, [x8, :lo12:lhs]
; CHECK-NEXT: adrp x8, rhs
; CHECK-NEXT: ldr q1, [x8, :lo12:rhs]
; CHECK-NEXT: bl __lttf2
; CHECK-NEXT: cmp w0, #0 // =0
; CHECK-NEXT: b.ge .LBB11_2
; CHECK-NEXT: // %bb.1: // %iftrue
; CHECK-NEXT: mov w0, #42
; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
; CHECK-NEXT: .LBB11_2: // %iffalse
; CHECK-NEXT: mov w0, #29
; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
%lhs = load fp128, fp128* @lhs, align 16
%rhs = load fp128, fp128* @rhs, align 16
; CHECK: ldr q0, [{{x[0-9]+}}, :lo12:lhs]
; CHECK: ldr q1, [{{x[0-9]+}}, :lo12:rhs]
; olt == !uge, which LLVM optimizes this to.
%cond = fcmp olt fp128 %lhs, %rhs
; CHECK: bl __lttf2
; CHECK-NEXT: cmp w0, #0
; CHECK-NEXT: b.ge {{.LBB[0-9]+_[0-9]+}}
br i1 %cond, label %iftrue, label %iffalse
iftrue:
ret i32 42
; CHECK-NEXT: %bb.
; CHECK-NEXT: mov w0, #42
; CHECK: ret
iffalse:
ret i32 29
; CHECK: mov w0, #29
; CHECK: ret
}
define void @test_select(i1 %cond, fp128 %lhs, fp128 %rhs) {
; CHECK-LABEL: test_select:
; CHECK: // %bb.0:
; CHECK-NEXT: tst w0, #0x1
; CHECK-NEXT: b.eq .LBB12_2
; CHECK-NEXT: // %bb.1:
; CHECK-NEXT: mov v1.16b, v0.16b
; CHECK-NEXT: .LBB12_2:
; CHECK-NEXT: adrp x8, lhs
; CHECK-NEXT: str q1, [x8, :lo12:lhs]
; CHECK-NEXT: ret
%val = select i1 %cond, fp128 %lhs, fp128 %rhs
store fp128 %val, fp128* @lhs, align 16
; CHECK: tst w0, #0x1
; CHECK-NEXT: b.eq [[IFFALSE:.LBB[0-9]+_[0-9]+]]
; CHECK-NEXT: %bb.
; CHECK-NEXT: mov v[[VAL:[0-9]+]].16b, v0.16b
; CHECK-NEXT: [[IFFALSE]]:
; CHECK: str q[[VAL]], [{{x[0-9]+}}, :lo12:lhs]
ret void
; CHECK: ret
}
@varhalf = global half 0.0, align 2
@ -225,68 +355,97 @@ define void @test_select(i1 %cond, fp128 %lhs, fp128 %rhs) {
define void @test_round() {
; CHECK-LABEL: test_round:
; CHECK: // %bb.0:
; CHECK-NEXT: sub sp, sp, #32 // =32
; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: adrp x8, lhs
; CHECK-NEXT: ldr q0, [x8, :lo12:lhs]
; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
; CHECK-NEXT: bl __trunctfhf2
; CHECK-NEXT: adrp x8, varhalf
; CHECK-NEXT: str h0, [x8, :lo12:varhalf]
; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
; CHECK-NEXT: bl __trunctfsf2
; CHECK-NEXT: adrp x8, varfloat
; CHECK-NEXT: str s0, [x8, :lo12:varfloat]
; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
; CHECK-NEXT: bl __trunctfdf2
; CHECK-NEXT: adrp x8, vardouble
; CHECK-NEXT: str d0, [x8, :lo12:vardouble]
; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
; CHECK-NEXT: add sp, sp, #32 // =32
; CHECK-NEXT: ret
%val = load fp128, fp128* @lhs, align 16
%half = fptrunc fp128 %val to half
store half %half, half* @varhalf, align 2
; CHECK: ldr q0, [{{x[0-9]+}}, :lo12:lhs]
; CHECK: bl __trunctfhf2
; CHECK: str h0, [{{x[0-9]+}}, :lo12:varhalf]
%float = fptrunc fp128 %val to float
store float %float, float* @varfloat, align 4
; CHECK: bl __trunctfsf2
; CHECK: str s0, [{{x[0-9]+}}, :lo12:varfloat]
%double = fptrunc fp128 %val to double
store double %double, double* @vardouble, align 8
; CHECK: bl __trunctfdf2
; CHECK: str d0, [{{x[0-9]+}}, :lo12:vardouble]
ret void
}
define void @test_extend() {
; CHECK-LABEL: test_extend:
; CHECK: // %bb.0:
; CHECK-NEXT: stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w19, -8
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: adrp x19, lhs
; CHECK-NEXT: adrp x8, varhalf
; CHECK-NEXT: ldr h0, [x8, :lo12:varhalf]
; CHECK-NEXT: bl __extendhftf2
; CHECK-NEXT: str q0, [x19, :lo12:lhs]
; CHECK-NEXT: adrp x8, varfloat
; CHECK-NEXT: ldr s0, [x8, :lo12:varfloat]
; CHECK-NEXT: bl __extendsftf2
; CHECK-NEXT: str q0, [x19, :lo12:lhs]
; CHECK-NEXT: adrp x8, vardouble
; CHECK-NEXT: ldr d0, [x8, :lo12:vardouble]
; CHECK-NEXT: bl __extenddftf2
; CHECK-NEXT: str q0, [x19, :lo12:lhs]
; CHECK-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload
; CHECK-NEXT: ret
%val = load fp128, fp128* @lhs, align 16
%half = load half, half* @varhalf
%fromhalf = fpext half %half to fp128
store volatile fp128 %fromhalf, fp128* @lhs, align 16
; CHECK: ldr h0, [{{x[0-9]+}}, :lo12:varhalf]
; CHECK: bl __extendhftf2
; CHECK: str q0, [{{x[0-9]+}}, :lo12:lhs]
%float = load float, float* @varfloat
%fromfloat = fpext float %float to fp128
store volatile fp128 %fromfloat, fp128* @lhs, align 16
; CHECK: bl __extendsftf2
; CHECK: str q0, [{{x[0-9]+}}, :lo12:lhs]
%double = load double, double* @vardouble
%fromdouble = fpext double %double to fp128
store volatile fp128 %fromdouble, fp128* @lhs, align 16
; CHECK: bl __extenddftf2
; CHECK: str q0, [{{x[0-9]+}}, :lo12:lhs]
ret void
; CHECK: ret
}
define fp128 @test_neg(fp128 %in) {
; CHECK-LABEL: test_neg:
; CHECK: // %bb.0:
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: str q0, [sp, #-16]!
; CHECK-NEXT: ldrb w8, [sp, #15]
; CHECK-NEXT: eor w8, w8, #0x80
; CHECK-NEXT: strb w8, [sp, #15]
; CHECK-NEXT: ldr q0, [sp], #16
; CHECK-NEXT: ret
;; We convert this to fneg, and target-independent code expands it with
;; integer operations.
%ret = fsub fp128 0xL00000000000000008000000000000000, %in
ret fp128 %ret
; CHECK: str q0, [sp, #-16]!
; CHECK-NEXT: ldrb w8, [sp, #15]
; CHECK-NEXT: eor w8, w8, #0x80
; CHECK-NEXT: strb w8, [sp, #15]
; CHECK-NEXT: ldr q0, [sp], #16
; CHECK-NEXT: ret
}

View File

@ -1,6 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -verify-machineinstrs -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
; RUN: < %s -mtriple=powerpc64le-unknown-linux -mcpu=pwr9 | FileCheck %s
; RUN: llc -verify-machineinstrs -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
; RUN: < %s -mtriple=powerpc64le-unknown-linux -mcpu=pwr8 | FileCheck %s --check-prefix=CHECK-P8
declare fp128 @llvm.experimental.constrained.fadd.f128(fp128, fp128, metadata, metadata)
declare fp128 @llvm.experimental.constrained.fsub.f128(fp128, fp128, metadata, metadata)
@ -15,6 +17,20 @@ define fp128 @fadd_f128(fp128 %f1, fp128 %f2) #0 {
; CHECK: # %bb.0:
; CHECK-NEXT: xsaddqp v2, v2, v3
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: fadd_f128:
; CHECK-P8: # %bb.0:
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -32(r1)
; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: bl __addkf3
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: addi r1, r1, 32
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
%res = call fp128 @llvm.experimental.constrained.fadd.f128(
fp128 %f1, fp128 %f2,
metadata !"round.dynamic",
@ -27,6 +43,20 @@ define fp128 @fsub_f128(fp128 %f1, fp128 %f2) #0 {
; CHECK: # %bb.0:
; CHECK-NEXT: xssubqp v2, v2, v3
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: fsub_f128:
; CHECK-P8: # %bb.0:
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -32(r1)
; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: bl __subkf3
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: addi r1, r1, 32
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
%res = call fp128 @llvm.experimental.constrained.fsub.f128(
fp128 %f1, fp128 %f2,
metadata !"round.dynamic",
@ -39,6 +69,20 @@ define fp128 @fmul_f128(fp128 %f1, fp128 %f2) #0 {
; CHECK: # %bb.0:
; CHECK-NEXT: xsmulqp v2, v2, v3
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: fmul_f128:
; CHECK-P8: # %bb.0:
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -32(r1)
; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: bl __mulkf3
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: addi r1, r1, 32
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
%res = call fp128 @llvm.experimental.constrained.fmul.f128(
fp128 %f1, fp128 %f2,
metadata !"round.dynamic",
@ -51,6 +95,20 @@ define fp128 @fdiv_f128(fp128 %f1, fp128 %f2) #0 {
; CHECK: # %bb.0:
; CHECK-NEXT: xsdivqp v2, v2, v3
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: fdiv_f128:
; CHECK-P8: # %bb.0:
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -32(r1)
; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: bl __divkf3
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: addi r1, r1, 32
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
%res = call fp128 @llvm.experimental.constrained.fdiv.f128(
fp128 %f1, fp128 %f2,
metadata !"round.dynamic",
@ -64,6 +122,20 @@ define fp128 @fmadd_f128(fp128 %f0, fp128 %f1, fp128 %f2) #0 {
; CHECK-NEXT: xsmaddqp v4, v2, v3
; CHECK-NEXT: vmr v2, v4
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: fmadd_f128:
; CHECK-P8: # %bb.0:
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -32(r1)
; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: bl fmal
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: addi r1, r1, 32
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
%res = call fp128 @llvm.experimental.constrained.fma.f128(
fp128 %f0, fp128 %f1, fp128 %f2,
metadata !"round.dynamic",
@ -77,6 +149,23 @@ define fp128 @fmsub_f128(fp128 %f0, fp128 %f1, fp128 %f2) #0 {
; CHECK-NEXT: xsmsubqp v4, v2, v3
; CHECK-NEXT: vmr v2, v4
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: fmsub_f128:
; CHECK-P8: # %bb.0:
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -32(r1)
; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: li r9, 1
; CHECK-P8-NEXT: sldi r9, r9, 63
; CHECK-P8-NEXT: xor r8, r8, r9
; CHECK-P8-NEXT: bl fmal
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: addi r1, r1, 32
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
%neg = fneg fp128 %f2
%res = call fp128 @llvm.experimental.constrained.fma.f128(
fp128 %f0, fp128 %f1, fp128 %neg,
@ -91,6 +180,23 @@ define fp128 @fnmadd_f128(fp128 %f0, fp128 %f1, fp128 %f2) #0 {
; CHECK-NEXT: xsnmaddqp v4, v2, v3
; CHECK-NEXT: vmr v2, v4
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: fnmadd_f128:
; CHECK-P8: # %bb.0:
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -32(r1)
; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: bl fmal
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: li r5, 1
; CHECK-P8-NEXT: sldi r5, r5, 63
; CHECK-P8-NEXT: xor r4, r4, r5
; CHECK-P8-NEXT: addi r1, r1, 32
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
%fma = call fp128 @llvm.experimental.constrained.fma.f128(
fp128 %f0, fp128 %f1, fp128 %f2,
metadata !"round.dynamic",
@ -105,6 +211,27 @@ define fp128 @fnmsub_f128(fp128 %f0, fp128 %f1, fp128 %f2) #0 {
; CHECK-NEXT: xsnmsubqp v4, v2, v3
; CHECK-NEXT: vmr v2, v4
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: fnmsub_f128:
; CHECK-P8: # %bb.0:
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: li r9, 1
; CHECK-P8-NEXT: sldi r30, r9, 63
; CHECK-P8-NEXT: xor r8, r8, r30
; CHECK-P8-NEXT: bl fmal
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: xor r4, r4, r30
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
%neg = fneg fp128 %f2
%fma = call fp128 @llvm.experimental.constrained.fma.f128(
fp128 %f0, fp128 %f1, fp128 %neg,
@ -120,6 +247,20 @@ define fp128 @fsqrt_f128(fp128 %f1) #0 {
; CHECK: # %bb.0:
; CHECK-NEXT: xssqrtqp v2, v2
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: fsqrt_f128:
; CHECK-P8: # %bb.0:
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -32(r1)
; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: bl sqrtl
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: addi r1, r1, 32
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
%res = call fp128 @llvm.experimental.constrained.sqrt.f128(
fp128 %f1,
metadata !"round.dynamic",

View File

@ -1,3 +1,4 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -O2 -mtriple=x86_64-linux-android -mattr=+mmx \
; RUN: -enable-legalize-types-checking | FileCheck %s
; RUN: llc < %s -O2 -mtriple=x86_64-linux-gnu -mattr=+mmx \
@ -7,28 +8,34 @@
@my_fp128 = global fp128 0xL00000000000000003FFF000000000000, align 16
define fp128 @get_fp128() {
; CHECK-LABEL: get_fp128:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movaps {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
entry:
%0 = load fp128, fp128* @my_fp128, align 16
ret fp128 %0
; CHECK-LABEL: get_fp128:
; CHECK: movaps my_fp128(%rip), %xmm0
; CHECK-NEXT: retq
}
@TestLoadExtend.data = internal unnamed_addr constant [2 x float] [float 0x3FB99999A0000000, float 0x3FC99999A0000000], align 4
define fp128 @TestLoadExtend(fp128 %x, i32 %n) {
; CHECK-LABEL: TestLoadExtend:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: movslq %edi, %rax
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: callq __extendsftf2
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
entry:
%idxprom = sext i32 %n to i64
%arrayidx = getelementptr inbounds [2 x float], [2 x float]* @TestLoadExtend.data, i64 0, i64 %idxprom
%0 = load float, float* %arrayidx, align 4
%conv = fpext float %0 to fp128
ret fp128 %conv
; CHECK-LABEL: TestLoadExtend:
; CHECK: movslq %edi, %rax
; CHECK-NEXT: movss TestLoadExtend.data(,%rax,4), %xmm0
; CHECK-NEXT: callq __extendsftf2
; CHECK: retq
}
; CHECK-LABEL: my_fp128: