From e2a03586ce5ea28cdbf368f3a45876e7dfe0c5b1 Mon Sep 17 00:00:00 2001 From: Qiu Chaofan Date: Mon, 13 Jul 2020 12:15:44 +0800 Subject: [PATCH] [PowerPC] Support constrained conversion in SPE target This patch adds support for constrained int/fp conversion between signed/unsigned i32 and f32/f64. Reviewed By: jhibbits Differential Revision: https://reviews.llvm.org/D82747 --- lib/Target/PowerPC/PPCISelLowering.cpp | 8 +- lib/Target/PowerPC/PPCInstrSPE.td | 16 +- test/CodeGen/PowerPC/fp-strict-conv.ll | 274 +++++++++++++++++++++++++ 3 files changed, 288 insertions(+), 10 deletions(-) create mode 100644 test/CodeGen/PowerPC/fp-strict-conv.ll diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp index 49140bab513..575ad68fecd 100644 --- a/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/lib/Target/PowerPC/PPCISelLowering.cpp @@ -423,6 +423,9 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM, if (Subtarget.hasSPE()) { // SPE has built-in conversions + setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Legal); + setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Legal); + setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Legal); setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal); setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal); setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal); @@ -572,9 +575,10 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM, setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); } else { // PowerPC does not have FP_TO_UINT on 32-bit implementations. - if (Subtarget.hasSPE()) + if (Subtarget.hasSPE()) { + setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Legal); setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal); - else + } else setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); } diff --git a/lib/Target/PowerPC/PPCInstrSPE.td b/lib/Target/PowerPC/PPCInstrSPE.td index 935c3044ae4..858eb0c9fe5 100644 --- a/lib/Target/PowerPC/PPCInstrSPE.td +++ b/lib/Target/PowerPC/PPCInstrSPE.td @@ -158,7 +158,7 @@ def EFDCFSF : EFXForm_2a<755, (outs sperc:$RT), (ins spe4rc:$RB), def EFDCFSI : EFXForm_2a<753, (outs sperc:$RT), (ins gprc:$RB), "efdcfsi $RT, $RB", IIC_FPDGeneral, - [(set f64:$RT, (sint_to_fp i32:$RB))]>; + [(set f64:$RT, (any_sint_to_fp i32:$RB))]>; def EFDCFSID : EFXForm_2a<739, (outs sperc:$RT), (ins gprc:$RB), "efdcfsid $RT, $RB", IIC_FPDGeneral, @@ -169,7 +169,7 @@ def EFDCFUF : EFXForm_2a<754, (outs sperc:$RT), (ins spe4rc:$RB), def EFDCFUI : EFXForm_2a<752, (outs sperc:$RT), (ins gprc:$RB), "efdcfui $RT, $RB", IIC_FPDGeneral, - [(set f64:$RT, (uint_to_fp i32:$RB))]>; + [(set f64:$RT, (any_uint_to_fp i32:$RB))]>; def EFDCFUID : EFXForm_2a<738, (outs sperc:$RT), (ins gprc:$RB), "efdcfuid $RT, $RB", IIC_FPDGeneral, @@ -197,7 +197,7 @@ def EFDCTSIDZ : EFXForm_2a<747, (outs gprc:$RT), (ins sperc:$RB), def EFDCTSIZ : EFXForm_2a<762, (outs gprc:$RT), (ins sperc:$RB), "efdctsiz $RT, $RB", IIC_FPDGeneral, - [(set i32:$RT, (fp_to_sint f64:$RB))]>; + [(set i32:$RT, (any_fp_to_sint f64:$RB))]>; def EFDCTUF : EFXForm_2a<758, (outs sperc:$RT), (ins spe4rc:$RB), "efdctuf $RT, $RB", IIC_FPDGeneral, []>; @@ -212,7 +212,7 @@ def EFDCTUIDZ : EFXForm_2a<746, (outs gprc:$RT), (ins sperc:$RB), def EFDCTUIZ : EFXForm_2a<760, (outs gprc:$RT), (ins sperc:$RB), "efdctuiz $RT, $RB", IIC_FPDGeneral, - [(set i32:$RT, (fp_to_uint f64:$RB))]>; + [(set i32:$RT, (any_fp_to_uint f64:$RB))]>; def EFDDIV : EFXForm_1<745, (outs sperc:$RT), (ins sperc:$RA, sperc:$RB), "efddiv $RT, $RA, $RB", IIC_FPDivD, @@ -261,14 +261,14 @@ def EFSCFSF : EFXForm_2a<723, (outs spe4rc:$RT), (ins spe4rc:$RB), def EFSCFSI : EFXForm_2a<721, (outs spe4rc:$RT), (ins gprc:$RB), "efscfsi $RT, $RB", IIC_FPSGeneral, - [(set f32:$RT, (sint_to_fp i32:$RB))]>; + [(set f32:$RT, (any_sint_to_fp i32:$RB))]>; def EFSCFUF : EFXForm_2a<722, (outs spe4rc:$RT), (ins spe4rc:$RB), "efscfuf $RT, $RB", IIC_FPSGeneral, []>; def EFSCFUI : EFXForm_2a<720, (outs spe4rc:$RT), (ins gprc:$RB), "efscfui $RT, $RB", IIC_FPSGeneral, - [(set f32:$RT, (uint_to_fp i32:$RB))]>; + [(set f32:$RT, (any_uint_to_fp i32:$RB))]>; let isCompare = 1 in { def EFSCMPEQ : EFXForm_3<718, (outs crrc:$crD), (ins spe4rc:$RA, spe4rc:$RB), @@ -288,7 +288,7 @@ def EFSCTSI : EFXForm_2a<725, (outs gprc:$RT), (ins spe4rc:$RB), def EFSCTSIZ : EFXForm_2a<730, (outs gprc:$RT), (ins spe4rc:$RB), "efsctsiz $RT, $RB", IIC_FPSGeneral, - [(set i32:$RT, (fp_to_sint f32:$RB))]>; + [(set i32:$RT, (any_fp_to_sint f32:$RB))]>; def EFSCTUF : EFXForm_2a<726, (outs sperc:$RT), (ins spe4rc:$RB), "efsctuf $RT, $RB", IIC_FPSGeneral, []>; @@ -299,7 +299,7 @@ def EFSCTUI : EFXForm_2a<724, (outs gprc:$RT), (ins spe4rc:$RB), def EFSCTUIZ : EFXForm_2a<728, (outs gprc:$RT), (ins spe4rc:$RB), "efsctuiz $RT, $RB", IIC_FPSGeneral, - [(set i32:$RT, (fp_to_uint f32:$RB))]>; + [(set i32:$RT, (any_fp_to_uint f32:$RB))]>; def EFSDIV : EFXForm_1<713, (outs spe4rc:$RT), (ins spe4rc:$RA, spe4rc:$RB), "efsdiv $RT, $RA, $RB", IIC_FPDivD, diff --git a/test/CodeGen/PowerPC/fp-strict-conv.ll b/test/CodeGen/PowerPC/fp-strict-conv.ll new file mode 100644 index 00000000000..ab806a19c15 --- /dev/null +++ b/test/CodeGen/PowerPC/fp-strict-conv.ll @@ -0,0 +1,274 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -verify-machineinstrs -ppc-asm-full-reg-names < %s -mcpu=e500 \ +; RUN: -mtriple=powerpc-unknown-linux-gnu -mattr=spe | FileCheck %s \ +; RUN: -check-prefix=SPE + +declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata) +declare i64 @llvm.experimental.constrained.fptosi.i64.f64(double, metadata) +declare i64 @llvm.experimental.constrained.fptoui.i64.f64(double, metadata) +declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata) + +declare i32 @llvm.experimental.constrained.fptosi.i32.f32(float, metadata) +declare i64 @llvm.experimental.constrained.fptosi.i64.f32(float, metadata) +declare i64 @llvm.experimental.constrained.fptoui.i64.f32(float, metadata) +declare i32 @llvm.experimental.constrained.fptoui.i32.f32(float, metadata) + +declare double @llvm.experimental.constrained.sitofp.f64.i32(i32, metadata, metadata) +declare double @llvm.experimental.constrained.sitofp.f64.i64(i64, metadata, metadata) +declare double @llvm.experimental.constrained.uitofp.f64.i32(i32, metadata, metadata) +declare double @llvm.experimental.constrained.uitofp.f64.i64(i64, metadata, metadata) + +declare float @llvm.experimental.constrained.sitofp.f32.i64(i64, metadata, metadata) +declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata) +declare float @llvm.experimental.constrained.uitofp.f32.i32(i32, metadata, metadata) +declare float @llvm.experimental.constrained.uitofp.f32.i64(i64, metadata, metadata) + +define i32 @d_to_i32(double %m) #0 { +; SPE-LABEL: d_to_i32: +; SPE: # %bb.0: # %entry +; SPE-NEXT: evmergelo r3, r3, r4 +; SPE-NEXT: efdctsiz r3, r3 +; SPE-NEXT: blr +entry: + %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %m, metadata !"fpexcept.strict") #0 + ret i32 %conv +} + +define i64 @d_to_i64(double %m) #0 { +; SPE-LABEL: d_to_i64: +; SPE: # %bb.0: # %entry +; SPE-NEXT: mflr r0 +; SPE-NEXT: stw r0, 4(r1) +; SPE-NEXT: stwu r1, -16(r1) +; SPE-NEXT: .cfi_def_cfa_offset 16 +; SPE-NEXT: .cfi_offset lr, 4 +; SPE-NEXT: evmergelo r4, r3, r4 +; SPE-NEXT: evmergehi r3, r4, r4 +; SPE-NEXT: # kill: def $r4 killed $r4 killed $s4 +; SPE-NEXT: # kill: def $r3 killed $r3 killed $s3 +; SPE-NEXT: bl __fixdfdi +; SPE-NEXT: lwz r0, 20(r1) +; SPE-NEXT: addi r1, r1, 16 +; SPE-NEXT: mtlr r0 +; SPE-NEXT: blr +entry: + %conv = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %m, metadata !"fpexcept.strict") #0 + ret i64 %conv +} + +define i64 @d_to_u64(double %m) #0 { +; SPE-LABEL: d_to_u64: +; SPE: # %bb.0: # %entry +; SPE-NEXT: mflr r0 +; SPE-NEXT: stw r0, 4(r1) +; SPE-NEXT: stwu r1, -16(r1) +; SPE-NEXT: .cfi_def_cfa_offset 16 +; SPE-NEXT: .cfi_offset lr, 4 +; SPE-NEXT: evmergelo r4, r3, r4 +; SPE-NEXT: evmergehi r3, r4, r4 +; SPE-NEXT: # kill: def $r4 killed $r4 killed $s4 +; SPE-NEXT: # kill: def $r3 killed $r3 killed $s3 +; SPE-NEXT: bl __fixunsdfdi +; SPE-NEXT: lwz r0, 20(r1) +; SPE-NEXT: addi r1, r1, 16 +; SPE-NEXT: mtlr r0 +; SPE-NEXT: blr +entry: + %conv = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %m, metadata !"fpexcept.strict") #0 + ret i64 %conv +} + +define zeroext i32 @d_to_u32(double %m) #0 { +; SPE-LABEL: d_to_u32: +; SPE: # %bb.0: # %entry +; SPE-NEXT: evmergelo r3, r3, r4 +; SPE-NEXT: efdctuiz r3, r3 +; SPE-NEXT: blr +entry: + %conv = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %m, metadata !"fpexcept.strict") #0 + ret i32 %conv +} + +define signext i32 @f_to_i32(float %m) #0 { +; SPE-LABEL: f_to_i32: +; SPE: # %bb.0: # %entry +; SPE-NEXT: efsctsiz r3, r3 +; SPE-NEXT: blr +entry: + %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %m, metadata !"fpexcept.strict") #0 + ret i32 %conv +} + +define i64 @f_to_i64(float %m) #0 { +; SPE-LABEL: f_to_i64: +; SPE: # %bb.0: # %entry +; SPE-NEXT: mflr r0 +; SPE-NEXT: stw r0, 4(r1) +; SPE-NEXT: stwu r1, -16(r1) +; SPE-NEXT: .cfi_def_cfa_offset 16 +; SPE-NEXT: .cfi_offset lr, 4 +; SPE-NEXT: bl __fixsfdi +; SPE-NEXT: lwz r0, 20(r1) +; SPE-NEXT: addi r1, r1, 16 +; SPE-NEXT: mtlr r0 +; SPE-NEXT: blr +entry: + %conv = call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %m, metadata !"fpexcept.strict") #0 + ret i64 %conv +} + +define i64 @f_to_u64(float %m) #0 { +; SPE-LABEL: f_to_u64: +; SPE: # %bb.0: # %entry +; SPE-NEXT: mflr r0 +; SPE-NEXT: stw r0, 4(r1) +; SPE-NEXT: stwu r1, -16(r1) +; SPE-NEXT: .cfi_def_cfa_offset 16 +; SPE-NEXT: .cfi_offset lr, 4 +; SPE-NEXT: bl __fixunssfdi +; SPE-NEXT: lwz r0, 20(r1) +; SPE-NEXT: addi r1, r1, 16 +; SPE-NEXT: mtlr r0 +; SPE-NEXT: blr +entry: + %conv = call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %m, metadata !"fpexcept.strict") #0 + ret i64 %conv +} + +define zeroext i32 @f_to_u32(float %m) #0 { +; SPE-LABEL: f_to_u32: +; SPE: # %bb.0: # %entry +; SPE-NEXT: efsctuiz r3, r3 +; SPE-NEXT: blr +entry: + %conv = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %m, metadata !"fpexcept.strict") #0 + ret i32 %conv +} + +define double @i32_to_d(i32 signext %m) #0 { +; SPE-LABEL: i32_to_d: +; SPE: # %bb.0: # %entry +; SPE-NEXT: efdcfsi r4, r3 +; SPE-NEXT: evmergehi r3, r4, r4 +; SPE-NEXT: # kill: def $r4 killed $r4 killed $s4 +; SPE-NEXT: # kill: def $r3 killed $r3 killed $s3 +; SPE-NEXT: blr +entry: + %conv = tail call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 + ret double %conv +} + +define double @i64_to_d(i64 %m) #0 { +; SPE-LABEL: i64_to_d: +; SPE: # %bb.0: # %entry +; SPE-NEXT: mflr r0 +; SPE-NEXT: stw r0, 4(r1) +; SPE-NEXT: stwu r1, -16(r1) +; SPE-NEXT: .cfi_def_cfa_offset 16 +; SPE-NEXT: .cfi_offset lr, 4 +; SPE-NEXT: bl __floatdidf +; SPE-NEXT: evmergelo r4, r3, r4 +; SPE-NEXT: evmergehi r3, r4, r4 +; SPE-NEXT: lwz r0, 20(r1) +; SPE-NEXT: # kill: def $r3 killed $r3 killed $s3 +; SPE-NEXT: # kill: def $r4 killed $r4 killed $s4 +; SPE-NEXT: addi r1, r1, 16 +; SPE-NEXT: mtlr r0 +; SPE-NEXT: blr +entry: + %conv = tail call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 + ret double %conv +} + +define double @u32_to_d(i32 zeroext %m) #0 { +; SPE-LABEL: u32_to_d: +; SPE: # %bb.0: # %entry +; SPE-NEXT: efdcfui r4, r3 +; SPE-NEXT: evmergehi r3, r4, r4 +; SPE-NEXT: # kill: def $r4 killed $r4 killed $s4 +; SPE-NEXT: # kill: def $r3 killed $r3 killed $s3 +; SPE-NEXT: blr +entry: + %conv = tail call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 + ret double %conv +} + +define double @u64_to_d(i64 %m) #0 { +; SPE-LABEL: u64_to_d: +; SPE: # %bb.0: # %entry +; SPE-NEXT: mflr r0 +; SPE-NEXT: stw r0, 4(r1) +; SPE-NEXT: stwu r1, -16(r1) +; SPE-NEXT: .cfi_def_cfa_offset 16 +; SPE-NEXT: .cfi_offset lr, 4 +; SPE-NEXT: bl __floatundidf +; SPE-NEXT: evmergelo r4, r3, r4 +; SPE-NEXT: evmergehi r3, r4, r4 +; SPE-NEXT: lwz r0, 20(r1) +; SPE-NEXT: # kill: def $r3 killed $r3 killed $s3 +; SPE-NEXT: # kill: def $r4 killed $r4 killed $s4 +; SPE-NEXT: addi r1, r1, 16 +; SPE-NEXT: mtlr r0 +; SPE-NEXT: blr +entry: + %conv = tail call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 + ret double %conv +} + +define float @i32_to_f(i32 signext %m) #0 { +; SPE-LABEL: i32_to_f: +; SPE: # %bb.0: # %entry +; SPE-NEXT: efscfsi r3, r3 +; SPE-NEXT: blr +entry: + %conv = tail call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 + ret float %conv +} + +define float @i64_to_f(i64 %m) #0 { +; SPE-LABEL: i64_to_f: +; SPE: # %bb.0: # %entry +; SPE-NEXT: mflr r0 +; SPE-NEXT: stw r0, 4(r1) +; SPE-NEXT: stwu r1, -16(r1) +; SPE-NEXT: .cfi_def_cfa_offset 16 +; SPE-NEXT: .cfi_offset lr, 4 +; SPE-NEXT: bl __floatdisf +; SPE-NEXT: lwz r0, 20(r1) +; SPE-NEXT: addi r1, r1, 16 +; SPE-NEXT: mtlr r0 +; SPE-NEXT: blr +entry: + %conv = tail call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 + ret float %conv +} + +define float @u32_to_f(i32 zeroext %m) #0 { +; SPE-LABEL: u32_to_f: +; SPE: # %bb.0: # %entry +; SPE-NEXT: efscfui r3, r3 +; SPE-NEXT: blr +entry: + %conv = tail call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 + ret float %conv +} + +define float @u64_to_f(i64 %m) #0 { +; SPE-LABEL: u64_to_f: +; SPE: # %bb.0: # %entry +; SPE-NEXT: mflr r0 +; SPE-NEXT: stw r0, 4(r1) +; SPE-NEXT: stwu r1, -16(r1) +; SPE-NEXT: .cfi_def_cfa_offset 16 +; SPE-NEXT: .cfi_offset lr, 4 +; SPE-NEXT: bl __floatundisf +; SPE-NEXT: lwz r0, 20(r1) +; SPE-NEXT: addi r1, r1, 16 +; SPE-NEXT: mtlr r0 +; SPE-NEXT: blr +entry: + %conv = tail call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0 + ret float %conv +} + +attributes #0 = { strictfp }