1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-25 20:23:11 +01:00
llvm-mirror/test/CodeGen/PowerPC/f128-truncateNconv.ll

1406 lines
45 KiB
LLVM
Raw Normal View History

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -relocation-model=pic -mcpu=pwr9 -mtriple=powerpc64le-unknown-unknown \
; RUN: -verify-machineinstrs -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names < %s \
; RUN: | FileCheck %s
; RUN: llc -relocation-model=pic -mcpu=pwr8 -mtriple=powerpc64le-unknown-unknown \
; RUN: -verify-machineinstrs -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names < %s \
; RUN: | FileCheck %s -check-prefix=CHECK-P8
@f128Array = global [4 x fp128] [fp128 0xL00000000000000004004C00000000000,
fp128 0xLF000000000000000400808AB851EB851,
fp128 0xL5000000000000000400E0C26324C8366,
fp128 0xL8000000000000000400A24E2E147AE14],
align 16
; Function Attrs: norecurse nounwind readonly
define i64 @qpConv2sdw(fp128* nocapture readonly %a) {
; CHECK-LABEL: qpConv2sdw:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-NEXT: xscvqpsdz v2, v2
; CHECK-NEXT: mfvsrd r3, v2
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2sdw:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -32(r1)
; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: lvx v2, 0, r3
; CHECK-P8-NEXT: bl __fixkfdi
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: addi r1, r1, 32
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load fp128, fp128* %a, align 16
%conv = fptosi fp128 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind
define void @qpConv2sdw_02(i64* nocapture %res) local_unnamed_addr #1 {
; CHECK-LABEL: qpConv2sdw_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
; CHECK-NEXT: lxv v2, 32(r4)
; CHECK-NEXT: xscvqpsdz v2, v2
; CHECK-NEXT: stxsd v2, 0(r3)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2sdw_02:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-P8-NEXT: mr r30, r3
; CHECK-P8-NEXT: ld r4, .LC0@toc@l(r4)
; CHECK-P8-NEXT: addi r4, r4, 32
; CHECK-P8-NEXT: lvx v2, 0, r4
; CHECK-P8-NEXT: bl __fixkfdi
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: std r3, 0(r30)
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load fp128, fp128* getelementptr inbounds
([4 x fp128], [4 x fp128]* @f128Array, i64 0,
i64 2), align 16
%conv = fptosi fp128 %0 to i64
store i64 %conv, i64* %res, align 8
ret void
}
; Function Attrs: norecurse nounwind readonly
define i64 @qpConv2sdw_03(fp128* nocapture readonly %a) {
; CHECK-LABEL: qpConv2sdw_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-NEXT: addis r3, r2, .LC0@toc@ha
; CHECK-NEXT: ld r3, .LC0@toc@l(r3)
; CHECK-NEXT: lxv v3, 16(r3)
; CHECK-NEXT: xsaddqp v2, v2, v3
; CHECK-NEXT: xscvqpsdz v2, v2
; CHECK-NEXT: mfvsrd r3, v2
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2sdw_03:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -32(r1)
; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-P8-NEXT: lvx v2, 0, r3
; CHECK-P8-NEXT: ld r4, .LC0@toc@l(r4)
; CHECK-P8-NEXT: addi r4, r4, 16
; CHECK-P8-NEXT: lvx v3, 0, r4
; CHECK-P8-NEXT: bl __addkf3
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: bl __fixkfdi
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: addi r1, r1, 32
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load fp128, fp128* %a, align 16
%1 = load fp128, fp128* getelementptr inbounds
([4 x fp128], [4 x fp128]* @f128Array, i64 0,
i64 1), align 16
%add = fadd fp128 %0, %1
%conv = fptosi fp128 %add to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind
define void @qpConv2sdw_04(fp128* nocapture readonly %a,
; CHECK-LABEL: qpConv2sdw_04:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-NEXT: lxv v3, 0(r4)
; CHECK-NEXT: xsaddqp v2, v2, v3
; CHECK-NEXT: xscvqpsdz v2, v2
; CHECK-NEXT: stxsd v2, 0(r5)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2sdw_04:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: lvx v2, 0, r3
; CHECK-P8-NEXT: lvx v3, 0, r4
; CHECK-P8-NEXT: mr r30, r5
; CHECK-P8-NEXT: bl __addkf3
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: bl __fixkfdi
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: std r3, 0(r30)
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
fp128* nocapture readonly %b, i64* nocapture %res) {
entry:
%0 = load fp128, fp128* %a, align 16
%1 = load fp128, fp128* %b, align 16
%add = fadd fp128 %0, %1
%conv = fptosi fp128 %add to i64
store i64 %conv, i64* %res, align 8
ret void
}
; Function Attrs: norecurse nounwind
define void @qpConv2sdw_testXForm(i64* nocapture %res, i32 signext %idx) {
; CHECK-LABEL: qpConv2sdw_testXForm:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: sldi r4, r4, 3
; CHECK-NEXT: ld r5, .LC0@toc@l(r5)
; CHECK-NEXT: lxv v2, 32(r5)
; CHECK-NEXT: xscvqpsdz v2, v2
; CHECK-NEXT: stxsdx v2, r3, r4
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2sdw_testXForm:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 64
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r29, -24
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r29, -24(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -64(r1)
; CHECK-P8-NEXT: mr r30, r4
; CHECK-P8-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-P8-NEXT: mr r29, r3
; CHECK-P8-NEXT: ld r4, .LC0@toc@l(r4)
; CHECK-P8-NEXT: addi r4, r4, 32
; CHECK-P8-NEXT: lvx v2, 0, r4
; CHECK-P8-NEXT: bl __fixkfdi
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: sldi r4, r30, 3
; CHECK-P8-NEXT: stdx r3, r29, r4
; CHECK-P8-NEXT: addi r1, r1, 64
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: ld r29, -24(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load fp128, fp128* getelementptr inbounds
([4 x fp128], [4 x fp128]* @f128Array,
i64 0, i64 2), align 16
%conv = fptosi fp128 %0 to i64
%idxprom = sext i32 %idx to i64
%arrayidx = getelementptr inbounds i64, i64* %res, i64 %idxprom
store i64 %conv, i64* %arrayidx, align 8
ret void
}
; Function Attrs: norecurse nounwind readonly
define i64 @qpConv2udw(fp128* nocapture readonly %a) {
; CHECK-LABEL: qpConv2udw:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-NEXT: xscvqpudz v2, v2
; CHECK-NEXT: mfvsrd r3, v2
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2udw:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -32(r1)
; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: lvx v2, 0, r3
; CHECK-P8-NEXT: bl __fixunskfdi
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: addi r1, r1, 32
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load fp128, fp128* %a, align 16
%conv = fptoui fp128 %0 to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind
define void @qpConv2udw_02(i64* nocapture %res) {
; CHECK-LABEL: qpConv2udw_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
; CHECK-NEXT: lxv v2, 32(r4)
; CHECK-NEXT: xscvqpudz v2, v2
; CHECK-NEXT: stxsd v2, 0(r3)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2udw_02:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-P8-NEXT: mr r30, r3
; CHECK-P8-NEXT: ld r4, .LC0@toc@l(r4)
; CHECK-P8-NEXT: addi r4, r4, 32
; CHECK-P8-NEXT: lvx v2, 0, r4
; CHECK-P8-NEXT: bl __fixunskfdi
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: std r3, 0(r30)
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load fp128, fp128* getelementptr inbounds
([4 x fp128], [4 x fp128]* @f128Array, i64 0,
i64 2), align 16
%conv = fptoui fp128 %0 to i64
store i64 %conv, i64* %res, align 8
ret void
}
; Function Attrs: norecurse nounwind readonly
define i64 @qpConv2udw_03(fp128* nocapture readonly %a) {
; CHECK-LABEL: qpConv2udw_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-NEXT: addis r3, r2, .LC0@toc@ha
; CHECK-NEXT: ld r3, .LC0@toc@l(r3)
; CHECK-NEXT: lxv v3, 16(r3)
; CHECK-NEXT: xsaddqp v2, v2, v3
; CHECK-NEXT: xscvqpudz v2, v2
; CHECK-NEXT: mfvsrd r3, v2
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2udw_03:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -32(r1)
; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-P8-NEXT: lvx v2, 0, r3
; CHECK-P8-NEXT: ld r4, .LC0@toc@l(r4)
; CHECK-P8-NEXT: addi r4, r4, 16
; CHECK-P8-NEXT: lvx v3, 0, r4
; CHECK-P8-NEXT: bl __addkf3
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: bl __fixunskfdi
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: addi r1, r1, 32
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load fp128, fp128* %a, align 16
%1 = load fp128, fp128* getelementptr inbounds
([4 x fp128], [4 x fp128]* @f128Array, i64 0,
i64 1), align 16
%add = fadd fp128 %0, %1
%conv = fptoui fp128 %add to i64
ret i64 %conv
}
; Function Attrs: norecurse nounwind
define void @qpConv2udw_04(fp128* nocapture readonly %a,
; CHECK-LABEL: qpConv2udw_04:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-NEXT: lxv v3, 0(r4)
; CHECK-NEXT: xsaddqp v2, v2, v3
; CHECK-NEXT: xscvqpudz v2, v2
; CHECK-NEXT: stxsd v2, 0(r5)
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2udw_04:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: lvx v2, 0, r3
; CHECK-P8-NEXT: lvx v3, 0, r4
; CHECK-P8-NEXT: mr r30, r5
; CHECK-P8-NEXT: bl __addkf3
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: bl __fixunskfdi
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: std r3, 0(r30)
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
fp128* nocapture readonly %b, i64* nocapture %res) {
entry:
%0 = load fp128, fp128* %a, align 16
%1 = load fp128, fp128* %b, align 16
%add = fadd fp128 %0, %1
%conv = fptoui fp128 %add to i64
store i64 %conv, i64* %res, align 8
ret void
}
; Function Attrs: norecurse nounwind
define void @qpConv2udw_testXForm(i64* nocapture %res, i32 signext %idx) {
; CHECK-LABEL: qpConv2udw_testXForm:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r5, r2, .LC0@toc@ha
; CHECK-NEXT: sldi r4, r4, 3
; CHECK-NEXT: ld r5, .LC0@toc@l(r5)
; CHECK-NEXT: lxvx v2, 0, r5
; CHECK-NEXT: xscvqpudz v2, v2
; CHECK-NEXT: stxsdx v2, r3, r4
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2udw_testXForm:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 64
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r29, -24
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r29, -24(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -64(r1)
; CHECK-P8-NEXT: mr r30, r4
; CHECK-P8-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-P8-NEXT: mr r29, r3
; CHECK-P8-NEXT: ld r4, .LC0@toc@l(r4)
; CHECK-P8-NEXT: lvx v2, 0, r4
; CHECK-P8-NEXT: bl __fixunskfdi
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: sldi r4, r30, 3
; CHECK-P8-NEXT: stdx r3, r29, r4
; CHECK-P8-NEXT: addi r1, r1, 64
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: ld r29, -24(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load fp128, fp128* getelementptr inbounds
([4 x fp128], [4 x fp128]* @f128Array,
i64 0, i64 0), align 16
%conv = fptoui fp128 %0 to i64
%idxprom = sext i32 %idx to i64
%arrayidx = getelementptr inbounds i64, i64* %res, i64 %idxprom
store i64 %conv, i64* %arrayidx, align 8
ret void
}
; Function Attrs: norecurse nounwind readonly
define signext i32 @qpConv2sw(fp128* nocapture readonly %a) {
; CHECK-LABEL: qpConv2sw:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-NEXT: xscvqpswz v2, v2
; CHECK-NEXT: mfvsrwz r3, v2
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2sw:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -32(r1)
; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: lvx v2, 0, r3
; CHECK-P8-NEXT: bl __fixkfsi
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: extsw r3, r3
; CHECK-P8-NEXT: addi r1, r1, 32
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load fp128, fp128* %a, align 16
%conv = fptosi fp128 %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind
define void @qpConv2sw_02(i32* nocapture %res) {
; CHECK-LABEL: qpConv2sw_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
; CHECK-NEXT: lxv v2, 32(r4)
; CHECK-NEXT: xscvqpswz v2, v2
; CHECK-NEXT: stxsiwx v2, 0, r3
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2sw_02:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-P8-NEXT: mr r30, r3
; CHECK-P8-NEXT: ld r4, .LC0@toc@l(r4)
; CHECK-P8-NEXT: addi r4, r4, 32
; CHECK-P8-NEXT: lvx v2, 0, r4
; CHECK-P8-NEXT: bl __fixkfsi
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stw r3, 0(r30)
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load fp128, fp128* getelementptr inbounds
([4 x fp128], [4 x fp128]* @f128Array, i64 0,
i64 2), align 16
%conv = fptosi fp128 %0 to i32
store i32 %conv, i32* %res, align 4
ret void
}
; Function Attrs: norecurse nounwind readonly
define signext i32 @qpConv2sw_03(fp128* nocapture readonly %a) {
; CHECK-LABEL: qpConv2sw_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-NEXT: addis r3, r2, .LC0@toc@ha
; CHECK-NEXT: ld r3, .LC0@toc@l(r3)
; CHECK-NEXT: lxv v3, 16(r3)
; CHECK-NEXT: xsaddqp v2, v2, v3
; CHECK-NEXT: xscvqpswz v2, v2
; CHECK-NEXT: mfvsrwz r3, v2
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2sw_03:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -32(r1)
; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-P8-NEXT: lvx v2, 0, r3
; CHECK-P8-NEXT: ld r4, .LC0@toc@l(r4)
; CHECK-P8-NEXT: addi r4, r4, 16
; CHECK-P8-NEXT: lvx v3, 0, r4
; CHECK-P8-NEXT: bl __addkf3
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: bl __fixkfsi
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: extsw r3, r3
; CHECK-P8-NEXT: addi r1, r1, 32
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load fp128, fp128* %a, align 16
%1 = load fp128, fp128* getelementptr inbounds
([4 x fp128], [4 x fp128]* @f128Array, i64 0,
i64 1), align 16
%add = fadd fp128 %0, %1
%conv = fptosi fp128 %add to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind
define void @qpConv2sw_04(fp128* nocapture readonly %a,
; CHECK-LABEL: qpConv2sw_04:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-NEXT: lxv v3, 0(r4)
; CHECK-NEXT: xsaddqp v2, v2, v3
; CHECK-NEXT: xscvqpswz v2, v2
; CHECK-NEXT: stxsiwx v2, 0, r5
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2sw_04:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: lvx v2, 0, r3
; CHECK-P8-NEXT: lvx v3, 0, r4
; CHECK-P8-NEXT: mr r30, r5
; CHECK-P8-NEXT: bl __addkf3
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: bl __fixkfsi
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stw r3, 0(r30)
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
fp128* nocapture readonly %b, i32* nocapture %res) {
entry:
%0 = load fp128, fp128* %a, align 16
%1 = load fp128, fp128* %b, align 16
%add = fadd fp128 %0, %1
%conv = fptosi fp128 %add to i32
store i32 %conv, i32* %res, align 4
ret void
}
; Function Attrs: norecurse nounwind readonly
define zeroext i32 @qpConv2uw(fp128* nocapture readonly %a) {
; CHECK-LABEL: qpConv2uw:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-NEXT: xscvqpuwz v2, v2
; CHECK-NEXT: mfvsrwz r3, v2
; CHECK-NEXT: clrldi r3, r3, 32
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2uw:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -32(r1)
; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: lvx v2, 0, r3
; CHECK-P8-NEXT: bl __fixunskfsi
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: addi r1, r1, 32
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load fp128, fp128* %a, align 16
%conv = fptoui fp128 %0 to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind
define void @qpConv2uw_02(i32* nocapture %res) {
; CHECK-LABEL: qpConv2uw_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
; CHECK-NEXT: lxv v2, 32(r4)
; CHECK-NEXT: xscvqpuwz v2, v2
; CHECK-NEXT: stxsiwx v2, 0, r3
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2uw_02:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-P8-NEXT: mr r30, r3
; CHECK-P8-NEXT: ld r4, .LC0@toc@l(r4)
; CHECK-P8-NEXT: addi r4, r4, 32
; CHECK-P8-NEXT: lvx v2, 0, r4
; CHECK-P8-NEXT: bl __fixunskfsi
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stw r3, 0(r30)
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load fp128, fp128* getelementptr inbounds
([4 x fp128], [4 x fp128]* @f128Array, i64 0,
i64 2), align 16
%conv = fptoui fp128 %0 to i32
store i32 %conv, i32* %res, align 4
ret void
}
; Function Attrs: norecurse nounwind readonly
define zeroext i32 @qpConv2uw_03(fp128* nocapture readonly %a) {
; CHECK-LABEL: qpConv2uw_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-NEXT: addis r3, r2, .LC0@toc@ha
; CHECK-NEXT: ld r3, .LC0@toc@l(r3)
; CHECK-NEXT: lxv v3, 16(r3)
; CHECK-NEXT: xsaddqp v2, v2, v3
; CHECK-NEXT: xscvqpuwz v2, v2
; CHECK-NEXT: mfvsrwz r3, v2
; CHECK-NEXT: clrldi r3, r3, 32
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2uw_03:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -32(r1)
; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-P8-NEXT: lvx v2, 0, r3
; CHECK-P8-NEXT: ld r4, .LC0@toc@l(r4)
; CHECK-P8-NEXT: addi r4, r4, 16
; CHECK-P8-NEXT: lvx v3, 0, r4
; CHECK-P8-NEXT: bl __addkf3
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: bl __fixunskfsi
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: addi r1, r1, 32
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load fp128, fp128* %a, align 16
%1 = load fp128, fp128* getelementptr inbounds
([4 x fp128], [4 x fp128]* @f128Array, i64 0,
i64 1), align 16
%add = fadd fp128 %0, %1
%conv = fptoui fp128 %add to i32
ret i32 %conv
}
; Function Attrs: norecurse nounwind
define void @qpConv2uw_04(fp128* nocapture readonly %a,
; CHECK-LABEL: qpConv2uw_04:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-NEXT: lxv v3, 0(r4)
; CHECK-NEXT: xsaddqp v2, v2, v3
; CHECK-NEXT: xscvqpuwz v2, v2
; CHECK-NEXT: stxsiwx v2, 0, r5
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2uw_04:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: lvx v2, 0, r3
; CHECK-P8-NEXT: lvx v3, 0, r4
; CHECK-P8-NEXT: mr r30, r5
; CHECK-P8-NEXT: bl __addkf3
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: bl __fixunskfsi
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stw r3, 0(r30)
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
fp128* nocapture readonly %b, i32* nocapture %res) {
entry:
%0 = load fp128, fp128* %a, align 16
%1 = load fp128, fp128* %b, align 16
%add = fadd fp128 %0, %1
%conv = fptoui fp128 %add to i32
store i32 %conv, i32* %res, align 4
ret void
}
; Function Attrs: norecurse nounwind readonly
define signext i16 @qpConv2shw(fp128* nocapture readonly %a) {
; CHECK-LABEL: qpConv2shw:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-NEXT: xscvqpswz v2, v2
; CHECK-NEXT: mfvsrwz r3, v2
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2shw:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -32(r1)
; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: lvx v2, 0, r3
; CHECK-P8-NEXT: bl __fixkfsi
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: extsw r3, r3
; CHECK-P8-NEXT: addi r1, r1, 32
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load fp128, fp128* %a, align 16
%conv = fptosi fp128 %0 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind
define void @qpConv2shw_02(i16* nocapture %res) {
; CHECK-LABEL: qpConv2shw_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
; CHECK-NEXT: lxv v2, 32(r4)
; CHECK-NEXT: xscvqpswz v2, v2
; CHECK-NEXT: stxsihx v2, 0, r3
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2shw_02:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-P8-NEXT: mr r30, r3
; CHECK-P8-NEXT: ld r4, .LC0@toc@l(r4)
; CHECK-P8-NEXT: addi r4, r4, 32
; CHECK-P8-NEXT: lvx v2, 0, r4
; CHECK-P8-NEXT: bl __fixkfsi
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: sth r3, 0(r30)
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load fp128, fp128* getelementptr inbounds
([4 x fp128], [4 x fp128]* @f128Array,
i64 0, i64 2), align 16
%conv = fptosi fp128 %0 to i16
store i16 %conv, i16* %res, align 2
ret void
}
; Function Attrs: norecurse nounwind readonly
define signext i16 @qpConv2shw_03(fp128* nocapture readonly %a) {
; CHECK-LABEL: qpConv2shw_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-NEXT: addis r3, r2, .LC0@toc@ha
; CHECK-NEXT: ld r3, .LC0@toc@l(r3)
; CHECK-NEXT: lxv v3, 16(r3)
; CHECK-NEXT: xsaddqp v2, v2, v3
; CHECK-NEXT: xscvqpswz v2, v2
; CHECK-NEXT: mfvsrwz r3, v2
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2shw_03:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -32(r1)
; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-P8-NEXT: lvx v2, 0, r3
; CHECK-P8-NEXT: ld r4, .LC0@toc@l(r4)
; CHECK-P8-NEXT: addi r4, r4, 16
; CHECK-P8-NEXT: lvx v3, 0, r4
; CHECK-P8-NEXT: bl __addkf3
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: bl __fixkfsi
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: extsw r3, r3
; CHECK-P8-NEXT: addi r1, r1, 32
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load fp128, fp128* %a, align 16
%1 = load fp128, fp128* getelementptr inbounds
([4 x fp128], [4 x fp128]* @f128Array,
i64 0, i64 1), align 16
%add = fadd fp128 %0, %1
%conv = fptosi fp128 %add to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind
define void @qpConv2shw_04(fp128* nocapture readonly %a,
; CHECK-LABEL: qpConv2shw_04:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-NEXT: lxv v3, 0(r4)
; CHECK-NEXT: xsaddqp v2, v2, v3
; CHECK-NEXT: xscvqpswz v2, v2
; CHECK-NEXT: stxsihx v2, 0, r5
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2shw_04:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: lvx v2, 0, r3
; CHECK-P8-NEXT: lvx v3, 0, r4
; CHECK-P8-NEXT: mr r30, r5
; CHECK-P8-NEXT: bl __addkf3
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: bl __fixkfsi
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: sth r3, 0(r30)
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
fp128* nocapture readonly %b, i16* nocapture %res) {
entry:
%0 = load fp128, fp128* %a, align 16
%1 = load fp128, fp128* %b, align 16
%add = fadd fp128 %0, %1
%conv = fptosi fp128 %add to i16
store i16 %conv, i16* %res, align 2
ret void
}
; Function Attrs: norecurse nounwind readonly
define zeroext i16 @qpConv2uhw(fp128* nocapture readonly %a) {
; CHECK-LABEL: qpConv2uhw:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-NEXT: xscvqpswz v2, v2
; CHECK-NEXT: mfvsrwz r3, v2
; CHECK-NEXT: clrldi r3, r3, 32
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2uhw:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -32(r1)
; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: lvx v2, 0, r3
; CHECK-P8-NEXT: bl __fixkfsi
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: addi r1, r1, 32
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load fp128, fp128* %a, align 16
%conv = fptoui fp128 %0 to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind
define void @qpConv2uhw_02(i16* nocapture %res) {
; CHECK-LABEL: qpConv2uhw_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
; CHECK-NEXT: lxv v2, 32(r4)
; CHECK-NEXT: xscvqpuwz v2, v2
; CHECK-NEXT: stxsihx v2, 0, r3
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2uhw_02:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-P8-NEXT: mr r30, r3
; CHECK-P8-NEXT: ld r4, .LC0@toc@l(r4)
; CHECK-P8-NEXT: addi r4, r4, 32
; CHECK-P8-NEXT: lvx v2, 0, r4
; CHECK-P8-NEXT: bl __fixkfsi
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: sth r3, 0(r30)
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load fp128, fp128* getelementptr inbounds
([4 x fp128], [4 x fp128]* @f128Array,
i64 0, i64 2), align 16
%conv = fptoui fp128 %0 to i16
store i16 %conv, i16* %res, align 2
ret void
}
; Function Attrs: norecurse nounwind readonly
define zeroext i16 @qpConv2uhw_03(fp128* nocapture readonly %a) {
; CHECK-LABEL: qpConv2uhw_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-NEXT: addis r3, r2, .LC0@toc@ha
; CHECK-NEXT: ld r3, .LC0@toc@l(r3)
; CHECK-NEXT: lxv v3, 16(r3)
; CHECK-NEXT: xsaddqp v2, v2, v3
; CHECK-NEXT: xscvqpswz v2, v2
; CHECK-NEXT: mfvsrwz r3, v2
; CHECK-NEXT: clrldi r3, r3, 32
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2uhw_03:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -32(r1)
; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-P8-NEXT: lvx v2, 0, r3
; CHECK-P8-NEXT: ld r4, .LC0@toc@l(r4)
; CHECK-P8-NEXT: addi r4, r4, 16
; CHECK-P8-NEXT: lvx v3, 0, r4
; CHECK-P8-NEXT: bl __addkf3
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: bl __fixkfsi
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: addi r1, r1, 32
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load fp128, fp128* %a, align 16
%1 = load fp128, fp128* getelementptr inbounds
([4 x fp128], [4 x fp128]* @f128Array,
i64 0, i64 1), align 16
%add = fadd fp128 %0, %1
%conv = fptoui fp128 %add to i16
ret i16 %conv
}
; Function Attrs: norecurse nounwind
define void @qpConv2uhw_04(fp128* nocapture readonly %a,
; CHECK-LABEL: qpConv2uhw_04:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-NEXT: lxv v3, 0(r4)
; CHECK-NEXT: xsaddqp v2, v2, v3
; CHECK-NEXT: xscvqpuwz v2, v2
; CHECK-NEXT: stxsihx v2, 0, r5
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2uhw_04:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: lvx v2, 0, r3
; CHECK-P8-NEXT: lvx v3, 0, r4
; CHECK-P8-NEXT: mr r30, r5
; CHECK-P8-NEXT: bl __addkf3
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: bl __fixkfsi
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: sth r3, 0(r30)
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
fp128* nocapture readonly %b, i16* nocapture %res) {
entry:
%0 = load fp128, fp128* %a, align 16
%1 = load fp128, fp128* %b, align 16
%add = fadd fp128 %0, %1
%conv = fptoui fp128 %add to i16
store i16 %conv, i16* %res, align 2
ret void
}
; Function Attrs: norecurse nounwind readonly
define signext i8 @qpConv2sb(fp128* nocapture readonly %a) {
; CHECK-LABEL: qpConv2sb:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-NEXT: xscvqpswz v2, v2
; CHECK-NEXT: mfvsrwz r3, v2
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2sb:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -32(r1)
; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: lvx v2, 0, r3
; CHECK-P8-NEXT: bl __fixkfsi
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: extsw r3, r3
; CHECK-P8-NEXT: addi r1, r1, 32
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load fp128, fp128* %a, align 16
%conv = fptosi fp128 %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind
define void @qpConv2sb_02(i8* nocapture %res) {
; CHECK-LABEL: qpConv2sb_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
; CHECK-NEXT: lxv v2, 32(r4)
; CHECK-NEXT: xscvqpswz v2, v2
; CHECK-NEXT: stxsibx v2, 0, r3
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2sb_02:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-P8-NEXT: mr r30, r3
; CHECK-P8-NEXT: ld r4, .LC0@toc@l(r4)
; CHECK-P8-NEXT: addi r4, r4, 32
; CHECK-P8-NEXT: lvx v2, 0, r4
; CHECK-P8-NEXT: bl __fixkfsi
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stb r3, 0(r30)
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load fp128, fp128* getelementptr inbounds
([4 x fp128], [4 x fp128]* @f128Array,
i64 0, i64 2), align 16
%conv = fptosi fp128 %0 to i8
store i8 %conv, i8* %res, align 1
ret void
}
; Function Attrs: norecurse nounwind readonly
define signext i8 @qpConv2sb_03(fp128* nocapture readonly %a) {
; CHECK-LABEL: qpConv2sb_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-NEXT: addis r3, r2, .LC0@toc@ha
; CHECK-NEXT: ld r3, .LC0@toc@l(r3)
; CHECK-NEXT: lxv v3, 16(r3)
; CHECK-NEXT: xsaddqp v2, v2, v3
; CHECK-NEXT: xscvqpswz v2, v2
; CHECK-NEXT: mfvsrwz r3, v2
; CHECK-NEXT: extsw r3, r3
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2sb_03:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -32(r1)
; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-P8-NEXT: lvx v2, 0, r3
; CHECK-P8-NEXT: ld r4, .LC0@toc@l(r4)
; CHECK-P8-NEXT: addi r4, r4, 16
; CHECK-P8-NEXT: lvx v3, 0, r4
; CHECK-P8-NEXT: bl __addkf3
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: bl __fixkfsi
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: extsw r3, r3
; CHECK-P8-NEXT: addi r1, r1, 32
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load fp128, fp128* %a, align 16
%1 = load fp128, fp128* getelementptr inbounds
([4 x fp128], [4 x fp128]* @f128Array,
i64 0, i64 1), align 16
%add = fadd fp128 %0, %1
%conv = fptosi fp128 %add to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind
define void @qpConv2sb_04(fp128* nocapture readonly %a,
; CHECK-LABEL: qpConv2sb_04:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-NEXT: lxv v3, 0(r4)
; CHECK-NEXT: xsaddqp v2, v2, v3
; CHECK-NEXT: xscvqpswz v2, v2
; CHECK-NEXT: stxsibx v2, 0, r5
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2sb_04:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: lvx v2, 0, r3
; CHECK-P8-NEXT: lvx v3, 0, r4
; CHECK-P8-NEXT: mr r30, r5
; CHECK-P8-NEXT: bl __addkf3
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: bl __fixkfsi
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stb r3, 0(r30)
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
fp128* nocapture readonly %b, i8* nocapture %res) {
entry:
%0 = load fp128, fp128* %a, align 16
%1 = load fp128, fp128* %b, align 16
%add = fadd fp128 %0, %1
%conv = fptosi fp128 %add to i8
store i8 %conv, i8* %res, align 1
ret void
}
; Function Attrs: norecurse nounwind readonly
define zeroext i8 @qpConv2ub(fp128* nocapture readonly %a) {
; CHECK-LABEL: qpConv2ub:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-NEXT: xscvqpswz v2, v2
; CHECK-NEXT: mfvsrwz r3, v2
; CHECK-NEXT: clrldi r3, r3, 32
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2ub:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -32(r1)
; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: lvx v2, 0, r3
; CHECK-P8-NEXT: bl __fixkfsi
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: addi r1, r1, 32
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load fp128, fp128* %a, align 16
%conv = fptoui fp128 %0 to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind
define void @qpConv2ub_02(i8* nocapture %res) {
; CHECK-LABEL: qpConv2ub_02:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-NEXT: ld r4, .LC0@toc@l(r4)
; CHECK-NEXT: lxv v2, 32(r4)
; CHECK-NEXT: xscvqpuwz v2, v2
; CHECK-NEXT: stxsibx v2, 0, r3
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2ub_02:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-P8-NEXT: mr r30, r3
; CHECK-P8-NEXT: ld r4, .LC0@toc@l(r4)
; CHECK-P8-NEXT: addi r4, r4, 32
; CHECK-P8-NEXT: lvx v2, 0, r4
; CHECK-P8-NEXT: bl __fixkfsi
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stb r3, 0(r30)
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load fp128, fp128* getelementptr inbounds
([4 x fp128], [4 x fp128]* @f128Array,
i64 0, i64 2), align 16
%conv = fptoui fp128 %0 to i8
store i8 %conv, i8* %res, align 1
ret void
}
; Function Attrs: norecurse nounwind readonly
define zeroext i8 @qpConv2ub_03(fp128* nocapture readonly %a) {
; CHECK-LABEL: qpConv2ub_03:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-NEXT: addis r3, r2, .LC0@toc@ha
; CHECK-NEXT: ld r3, .LC0@toc@l(r3)
; CHECK-NEXT: lxv v3, 16(r3)
; CHECK-NEXT: xsaddqp v2, v2, v3
; CHECK-NEXT: xscvqpswz v2, v2
; CHECK-NEXT: mfvsrwz r3, v2
; CHECK-NEXT: clrldi r3, r3, 32
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2ub_03:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -32(r1)
; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: addis r4, r2, .LC0@toc@ha
; CHECK-P8-NEXT: lvx v2, 0, r3
; CHECK-P8-NEXT: ld r4, .LC0@toc@l(r4)
; CHECK-P8-NEXT: addi r4, r4, 16
; CHECK-P8-NEXT: lvx v3, 0, r4
; CHECK-P8-NEXT: bl __addkf3
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: bl __fixkfsi
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: addi r1, r1, 32
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
entry:
%0 = load fp128, fp128* %a, align 16
%1 = load fp128, fp128* getelementptr inbounds
([4 x fp128], [4 x fp128]* @f128Array,
i64 0, i64 1), align 16
%add = fadd fp128 %0, %1
%conv = fptoui fp128 %add to i8
ret i8 %conv
}
; Function Attrs: norecurse nounwind
define void @qpConv2ub_04(fp128* nocapture readonly %a,
; CHECK-LABEL: qpConv2ub_04:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxv v2, 0(r3)
; CHECK-NEXT: lxv v3, 0(r4)
; CHECK-NEXT: xsaddqp v2, v2, v3
; CHECK-NEXT: xscvqpuwz v2, v2
; CHECK-NEXT: stxsibx v2, 0, r5
; CHECK-NEXT: blr
;
; CHECK-P8-LABEL: qpConv2ub_04:
; CHECK-P8: # %bb.0: # %entry
; CHECK-P8-NEXT: mflr r0
; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
; CHECK-P8-NEXT: .cfi_offset lr, 16
; CHECK-P8-NEXT: .cfi_offset r30, -16
; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; CHECK-P8-NEXT: std r0, 16(r1)
; CHECK-P8-NEXT: stdu r1, -48(r1)
; CHECK-P8-NEXT: lvx v2, 0, r3
; CHECK-P8-NEXT: lvx v3, 0, r4
; CHECK-P8-NEXT: mr r30, r5
; CHECK-P8-NEXT: bl __addkf3
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: bl __fixkfsi
; CHECK-P8-NEXT: nop
; CHECK-P8-NEXT: stb r3, 0(r30)
; CHECK-P8-NEXT: addi r1, r1, 48
; CHECK-P8-NEXT: ld r0, 16(r1)
; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
; CHECK-P8-NEXT: mtlr r0
; CHECK-P8-NEXT: blr
fp128* nocapture readonly %b, i8* nocapture %res) {
entry:
%0 = load fp128, fp128* %a, align 16
%1 = load fp128, fp128* %b, align 16
%add = fadd fp128 %0, %1
%conv = fptoui fp128 %add to i8
store i8 %conv, i8* %res, align 1
ret void
}