mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-25 12:12:47 +01:00
[amdgpu] Improve the from f32 to i64.
- Take the same principle as the conversion from f64 to i64 with extra necessary pre- and post-processing. It helps to reduce that conversion sequence by half compared to legacy one. Reviewed By: foad Differential Revision: https://reviews.llvm.org/D104427
This commit is contained in:
parent
649e66f4eb
commit
be5f17eb4b
@ -2591,33 +2591,77 @@ SDValue AMDGPUTargetLowering::LowerSINT_TO_FP(SDValue Op,
|
|||||||
return LowerINT_TO_FP64(Op, DAG, true);
|
return LowerINT_TO_FP64(Op, DAG, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
SDValue AMDGPUTargetLowering::LowerFP64_TO_INT(SDValue Op, SelectionDAG &DAG,
|
SDValue AMDGPUTargetLowering::LowerFP_TO_INT64(SDValue Op, SelectionDAG &DAG,
|
||||||
bool Signed) const {
|
bool Signed) const {
|
||||||
SDLoc SL(Op);
|
SDLoc SL(Op);
|
||||||
|
|
||||||
SDValue Src = Op.getOperand(0);
|
SDValue Src = Op.getOperand(0);
|
||||||
|
EVT SrcVT = Src.getValueType();
|
||||||
|
|
||||||
SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
|
assert(SrcVT == MVT::f32 || SrcVT == MVT::f64);
|
||||||
|
|
||||||
SDValue K0 = DAG.getConstantFP(BitsToDouble(UINT64_C(0x3df0000000000000)), SL,
|
// The basic idea of converting a floating point number into a pair of 32-bit
|
||||||
MVT::f64);
|
// integers is illustrated as follows:
|
||||||
SDValue K1 = DAG.getConstantFP(BitsToDouble(UINT64_C(0xc1f0000000000000)), SL,
|
//
|
||||||
MVT::f64);
|
// tf := trunc(val);
|
||||||
|
// hif := floor(tf * 2^-32);
|
||||||
|
// lof := tf - hif * 2^32; // lof is always positive due to floor.
|
||||||
|
// hi := fptoi(hif);
|
||||||
|
// lo := fptoi(lof);
|
||||||
|
//
|
||||||
|
SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, SrcVT, Src);
|
||||||
|
SDValue Sign;
|
||||||
|
if (Signed && SrcVT == MVT::f32) {
|
||||||
|
// However, a 32-bit floating point number has only 23 bits mantissa and
|
||||||
|
// it's not enough to hold all the significant bits of `lof` if val is
|
||||||
|
// negative. To avoid the loss of precision, We need to take the absolute
|
||||||
|
// value after truncating and flip the result back based on the original
|
||||||
|
// signedness.
|
||||||
|
Sign = DAG.getNode(ISD::SRA, SL, MVT::i32,
|
||||||
|
DAG.getNode(ISD::BITCAST, SL, MVT::i32, Trunc),
|
||||||
|
DAG.getConstant(31, SL, MVT::i32));
|
||||||
|
Trunc = DAG.getNode(ISD::FABS, SL, SrcVT, Trunc);
|
||||||
|
}
|
||||||
|
|
||||||
|
SDValue K0, K1;
|
||||||
|
if (SrcVT == MVT::f64) {
|
||||||
|
K0 = DAG.getConstantFP(BitsToDouble(UINT64_C(/*2^-32*/ 0x3df0000000000000)),
|
||||||
|
SL, SrcVT);
|
||||||
|
K1 = DAG.getConstantFP(BitsToDouble(UINT64_C(/*-2^32*/ 0xc1f0000000000000)),
|
||||||
|
SL, SrcVT);
|
||||||
|
} else {
|
||||||
|
K0 = DAG.getConstantFP(BitsToFloat(UINT32_C(/*2^-32*/ 0x2f800000)), SL,
|
||||||
|
SrcVT);
|
||||||
|
K1 = DAG.getConstantFP(BitsToFloat(UINT32_C(/*-2^32*/ 0xcf800000)), SL,
|
||||||
|
SrcVT);
|
||||||
|
}
|
||||||
// TODO: Should this propagate fast-math-flags?
|
// TODO: Should this propagate fast-math-flags?
|
||||||
SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, Trunc, K0);
|
SDValue Mul = DAG.getNode(ISD::FMUL, SL, SrcVT, Trunc, K0);
|
||||||
|
|
||||||
SDValue FloorMul = DAG.getNode(ISD::FFLOOR, SL, MVT::f64, Mul);
|
SDValue FloorMul = DAG.getNode(ISD::FFLOOR, SL, SrcVT, Mul);
|
||||||
|
|
||||||
|
SDValue Fma = DAG.getNode(ISD::FMA, SL, SrcVT, FloorMul, K1, Trunc);
|
||||||
|
|
||||||
SDValue Fma = DAG.getNode(ISD::FMA, SL, MVT::f64, FloorMul, K1, Trunc);
|
SDValue Hi = DAG.getNode((Signed && SrcVT == MVT::f64) ? ISD::FP_TO_SINT
|
||||||
|
: ISD::FP_TO_UINT,
|
||||||
SDValue Hi = DAG.getNode(Signed ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, SL,
|
SL, MVT::i32, FloorMul);
|
||||||
MVT::i32, FloorMul);
|
|
||||||
SDValue Lo = DAG.getNode(ISD::FP_TO_UINT, SL, MVT::i32, Fma);
|
SDValue Lo = DAG.getNode(ISD::FP_TO_UINT, SL, MVT::i32, Fma);
|
||||||
|
|
||||||
SDValue Result = DAG.getBuildVector(MVT::v2i32, SL, {Lo, Hi});
|
SDValue Result = DAG.getNode(ISD::BITCAST, SL, MVT::i64,
|
||||||
|
DAG.getBuildVector(MVT::v2i32, SL, {Lo, Hi}));
|
||||||
|
|
||||||
return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Result);
|
if (Signed && SrcVT == MVT::f32) {
|
||||||
|
assert(Sign);
|
||||||
|
// Flip the result based on the signedness, which is either all 0s or 1s.
|
||||||
|
Sign = DAG.getNode(ISD::BITCAST, SL, MVT::i64,
|
||||||
|
DAG.getBuildVector(MVT::v2i32, SL, {Sign, Sign}));
|
||||||
|
// r := xor(r, sign) - sign;
|
||||||
|
Result =
|
||||||
|
DAG.getNode(ISD::SUB, SL, MVT::i64,
|
||||||
|
DAG.getNode(ISD::XOR, SL, MVT::i64, Result, Sign), Sign);
|
||||||
|
}
|
||||||
|
|
||||||
|
return Result;
|
||||||
}
|
}
|
||||||
|
|
||||||
SDValue AMDGPUTargetLowering::LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) const {
|
SDValue AMDGPUTargetLowering::LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) const {
|
||||||
@ -2748,8 +2792,8 @@ SDValue AMDGPUTargetLowering::LowerFP_TO_INT(SDValue Op,
|
|||||||
return DAG.getNode(Ext, DL, MVT::i64, FpToInt32);
|
return DAG.getNode(Ext, DL, MVT::i64, FpToInt32);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (DestVT == MVT::i64 && SrcVT == MVT::f64)
|
if (DestVT == MVT::i64 && (SrcVT == MVT::f32 || SrcVT == MVT::f64))
|
||||||
return LowerFP64_TO_INT(Op, DAG, OpOpcode == ISD::FP_TO_SINT);
|
return LowerFP_TO_INT64(Op, DAG, OpOpcode == ISD::FP_TO_SINT);
|
||||||
|
|
||||||
return SDValue();
|
return SDValue();
|
||||||
}
|
}
|
||||||
|
@ -64,7 +64,7 @@ protected:
|
|||||||
SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
|
SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
|
||||||
SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
|
SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
|
||||||
|
|
||||||
SDValue LowerFP64_TO_INT(SDValue Op, SelectionDAG &DAG, bool Signed) const;
|
SDValue LowerFP_TO_INT64(SDValue Op, SelectionDAG &DAG, bool Signed) const;
|
||||||
SDValue LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) const;
|
SDValue LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) const;
|
||||||
SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
|
SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
|
||||||
|
|
||||||
|
@ -820,7 +820,7 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
|
|||||||
|
|
||||||
auto &FPToI = getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
|
auto &FPToI = getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
|
||||||
.legalFor({{S32, S32}, {S32, S64}, {S32, S16}})
|
.legalFor({{S32, S32}, {S32, S64}, {S32, S16}})
|
||||||
.customFor({{S64, S64}})
|
.customFor({{S64, S32}, {S64, S64}})
|
||||||
.narrowScalarFor({{S64, S16}}, changeTo(0, S32));
|
.narrowScalarFor({{S64, S16}}, changeTo(0, S32));
|
||||||
if (ST.has16BitInsts())
|
if (ST.has16BitInsts())
|
||||||
FPToI.legalFor({{S16, S16}});
|
FPToI.legalFor({{S16, S16}});
|
||||||
@ -2070,9 +2070,10 @@ bool AMDGPULegalizerInfo::legalizeITOFP(
|
|||||||
|
|
||||||
// TODO: Copied from DAG implementation. Verify logic and document how this
|
// TODO: Copied from DAG implementation. Verify logic and document how this
|
||||||
// actually works.
|
// actually works.
|
||||||
bool AMDGPULegalizerInfo::legalizeFPTOI(
|
bool AMDGPULegalizerInfo::legalizeFPTOI(MachineInstr &MI,
|
||||||
MachineInstr &MI, MachineRegisterInfo &MRI,
|
MachineRegisterInfo &MRI,
|
||||||
MachineIRBuilder &B, bool Signed) const {
|
MachineIRBuilder &B,
|
||||||
|
bool Signed) const {
|
||||||
|
|
||||||
Register Dst = MI.getOperand(0).getReg();
|
Register Dst = MI.getOperand(0).getReg();
|
||||||
Register Src = MI.getOperand(1).getReg();
|
Register Src = MI.getOperand(1).getReg();
|
||||||
@ -2080,24 +2081,59 @@ bool AMDGPULegalizerInfo::legalizeFPTOI(
|
|||||||
const LLT S64 = LLT::scalar(64);
|
const LLT S64 = LLT::scalar(64);
|
||||||
const LLT S32 = LLT::scalar(32);
|
const LLT S32 = LLT::scalar(32);
|
||||||
|
|
||||||
assert(MRI.getType(Src) == S64 && MRI.getType(Dst) == S64);
|
const LLT SrcLT = MRI.getType(Src);
|
||||||
|
const LLT DstLT = MRI.getType(Dst);
|
||||||
|
|
||||||
|
assert((SrcLT == S32 || SrcLT == S64) && DstLT == S64);
|
||||||
|
|
||||||
unsigned Flags = MI.getFlags();
|
unsigned Flags = MI.getFlags();
|
||||||
|
|
||||||
auto Trunc = B.buildIntrinsicTrunc(S64, Src, Flags);
|
// The basic idea of converting a floating point number into a pair of 32-bit
|
||||||
auto K0 = B.buildFConstant(S64, BitsToDouble(UINT64_C(0x3df0000000000000)));
|
// integers is illustrated as follows:
|
||||||
auto K1 = B.buildFConstant(S64, BitsToDouble(UINT64_C(0xc1f0000000000000)));
|
//
|
||||||
|
// tf := trunc(val);
|
||||||
|
// hif := floor(tf * 2^-32);
|
||||||
|
// lof := tf - hif * 2^32; // lof is always positive due to floor.
|
||||||
|
// hi := fptoi(hif);
|
||||||
|
// lo := fptoi(lof);
|
||||||
|
//
|
||||||
|
auto Trunc = B.buildIntrinsicTrunc(SrcLT, Src, Flags);
|
||||||
|
MachineInstrBuilder Sign;
|
||||||
|
if (Signed && SrcLT == S32) {
|
||||||
|
// However, a 32-bit floating point number has only 23 bits mantissa and
|
||||||
|
// it's not enough to hold all the significant bits of `lof` if val is
|
||||||
|
// negative. To avoid the loss of precision, We need to take the absolute
|
||||||
|
// value after truncating and flip the result back based on the original
|
||||||
|
// signedness.
|
||||||
|
Sign = B.buildAShr(S32, Src, B.buildConstant(S32, 31));
|
||||||
|
Trunc = B.buildFAbs(S32, Trunc, Flags);
|
||||||
|
}
|
||||||
|
MachineInstrBuilder K0, K1;
|
||||||
|
if (SrcLT == S64) {
|
||||||
|
K0 = B.buildFConstant(S64,
|
||||||
|
BitsToDouble(UINT64_C(/*2^-32*/ 0x3df0000000000000)));
|
||||||
|
K1 = B.buildFConstant(S64,
|
||||||
|
BitsToDouble(UINT64_C(/*-2^32*/ 0xc1f0000000000000)));
|
||||||
|
} else {
|
||||||
|
K0 = B.buildFConstant(S32, BitsToFloat(UINT32_C(/*2^-32*/ 0x2f800000)));
|
||||||
|
K1 = B.buildFConstant(S32, BitsToFloat(UINT32_C(/*-2^32*/ 0xcf800000)));
|
||||||
|
}
|
||||||
|
|
||||||
auto Mul = B.buildFMul(S64, Trunc, K0, Flags);
|
auto Mul = B.buildFMul(SrcLT, Trunc, K0, Flags);
|
||||||
auto FloorMul = B.buildFFloor(S64, Mul, Flags);
|
auto FloorMul = B.buildFFloor(SrcLT, Mul, Flags);
|
||||||
auto Fma = B.buildFMA(S64, FloorMul, K1, Trunc, Flags);
|
auto Fma = B.buildFMA(SrcLT, FloorMul, K1, Trunc, Flags);
|
||||||
|
|
||||||
auto Hi = Signed ?
|
auto Hi = (Signed && SrcLT == S64) ? B.buildFPTOSI(S32, FloorMul)
|
||||||
B.buildFPTOSI(S32, FloorMul) :
|
: B.buildFPTOUI(S32, FloorMul);
|
||||||
B.buildFPTOUI(S32, FloorMul);
|
|
||||||
auto Lo = B.buildFPTOUI(S32, Fma);
|
auto Lo = B.buildFPTOUI(S32, Fma);
|
||||||
|
|
||||||
B.buildMerge(Dst, { Lo, Hi });
|
if (Signed && SrcLT == S32) {
|
||||||
|
// Flip the result based on the signedness, which is either all 0s or 1s.
|
||||||
|
Sign = B.buildMerge(S64, {Sign, Sign});
|
||||||
|
// r := xor({lo, hi}, sign) - sign;
|
||||||
|
B.buildSub(Dst, B.buildXor(S64, B.buildMerge(S64, {Lo, Hi}), Sign), Sign);
|
||||||
|
} else
|
||||||
|
B.buildMerge(Dst, {Lo, Hi});
|
||||||
MI.eraseFromParent();
|
MI.eraseFromParent();
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
@ -380,74 +380,46 @@ body: |
|
|||||||
|
|
||||||
; SI-LABEL: name: test_fptosi_s32_to_s64
|
; SI-LABEL: name: test_fptosi_s32_to_s64
|
||||||
; SI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
|
; SI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||||
; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2139095040
|
; SI: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s32) = G_INTRINSIC_TRUNC [[COPY]]
|
||||||
; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
|
; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
|
||||||
; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
|
; SI: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[COPY]], [[C]](s32)
|
||||||
; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C1]](s32)
|
; SI: [[FABS:%[0-9]+]]:_(s32) = G_FABS [[INTRINSIC_TRUNC]]
|
||||||
; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
|
; SI: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x3DF0000000000000
|
||||||
; SI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C2]]
|
; SI: [[C2:%[0-9]+]]:_(s32) = G_FCONSTANT float 0xC1F0000000000000
|
||||||
; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
|
; SI: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FABS]], [[C1]]
|
||||||
; SI: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[AND1]], [[C3]](s32)
|
; SI: [[FFLOOR:%[0-9]+]]:_(s32) = G_FFLOOR [[FMUL]]
|
||||||
; SI: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[ASHR]](s32)
|
; SI: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FFLOOR]], [[C2]], [[FABS]]
|
||||||
; SI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8388607
|
; SI: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[FFLOOR]](s32)
|
||||||
; SI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C4]]
|
; SI: [[FPTOUI1:%[0-9]+]]:_(s32) = G_FPTOUI [[FMA]](s32)
|
||||||
; SI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 8388608
|
; SI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[ASHR]](s32), [[ASHR]](s32)
|
||||||
; SI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[C5]]
|
; SI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[FPTOUI1]](s32), [[FPTOUI]](s32)
|
||||||
; SI: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
|
; SI: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[MV1]], [[MV]]
|
||||||
; SI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 127
|
|
||||||
; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[LSHR]], [[C6]]
|
|
||||||
; SI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[SUB]], [[C1]]
|
|
||||||
; SI: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SUB]]
|
|
||||||
; SI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ZEXT]], [[SUB1]](s32)
|
|
||||||
; SI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[ZEXT]], [[SUB2]](s32)
|
|
||||||
; SI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB]](s32), [[C1]]
|
|
||||||
; SI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SHL]], [[LSHR1]]
|
|
||||||
; SI: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[SELECT]], [[SEXT]]
|
|
||||||
; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[XOR]](s64)
|
; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[XOR]](s64)
|
||||||
; SI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT]](s64)
|
; SI: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV]], [[ASHR]]
|
||||||
; SI: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV]], [[UV2]]
|
; SI: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV1]], [[ASHR]], [[USUBO1]]
|
||||||
; SI: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV1]], [[UV3]], [[USUBO1]]
|
; SI: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32)
|
||||||
; SI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32)
|
; SI: $vgpr0_vgpr1 = COPY [[MV2]](s64)
|
||||||
; SI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
|
||||||
; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SUB]](s32), [[C7]]
|
|
||||||
; SI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
||||||
; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[C8]], [[MV]]
|
|
||||||
; SI: $vgpr0_vgpr1 = COPY [[SELECT1]](s64)
|
|
||||||
; VI-LABEL: name: test_fptosi_s32_to_s64
|
; VI-LABEL: name: test_fptosi_s32_to_s64
|
||||||
; VI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
|
; VI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||||
; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2139095040
|
; VI: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s32) = G_INTRINSIC_TRUNC [[COPY]]
|
||||||
; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
|
; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
|
||||||
; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
|
; VI: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[COPY]], [[C]](s32)
|
||||||
; VI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C1]](s32)
|
; VI: [[FABS:%[0-9]+]]:_(s32) = G_FABS [[INTRINSIC_TRUNC]]
|
||||||
; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
|
; VI: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x3DF0000000000000
|
||||||
; VI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C2]]
|
; VI: [[C2:%[0-9]+]]:_(s32) = G_FCONSTANT float 0xC1F0000000000000
|
||||||
; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
|
; VI: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FABS]], [[C1]]
|
||||||
; VI: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[AND1]], [[C3]](s32)
|
; VI: [[FFLOOR:%[0-9]+]]:_(s32) = G_FFLOOR [[FMUL]]
|
||||||
; VI: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[ASHR]](s32)
|
; VI: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FFLOOR]], [[C2]], [[FABS]]
|
||||||
; VI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8388607
|
; VI: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[FFLOOR]](s32)
|
||||||
; VI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C4]]
|
; VI: [[FPTOUI1:%[0-9]+]]:_(s32) = G_FPTOUI [[FMA]](s32)
|
||||||
; VI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 8388608
|
; VI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[ASHR]](s32), [[ASHR]](s32)
|
||||||
; VI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[C5]]
|
; VI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[FPTOUI1]](s32), [[FPTOUI]](s32)
|
||||||
; VI: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
|
; VI: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[MV1]], [[MV]]
|
||||||
; VI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 127
|
|
||||||
; VI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[LSHR]], [[C6]]
|
|
||||||
; VI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[SUB]], [[C1]]
|
|
||||||
; VI: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SUB]]
|
|
||||||
; VI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ZEXT]], [[SUB1]](s32)
|
|
||||||
; VI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[ZEXT]], [[SUB2]](s32)
|
|
||||||
; VI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB]](s32), [[C1]]
|
|
||||||
; VI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SHL]], [[LSHR1]]
|
|
||||||
; VI: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[SELECT]], [[SEXT]]
|
|
||||||
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[XOR]](s64)
|
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[XOR]](s64)
|
||||||
; VI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT]](s64)
|
; VI: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV]], [[ASHR]]
|
||||||
; VI: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV]], [[UV2]]
|
; VI: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV1]], [[ASHR]], [[USUBO1]]
|
||||||
; VI: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV1]], [[UV3]], [[USUBO1]]
|
; VI: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32)
|
||||||
; VI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32)
|
; VI: $vgpr0_vgpr1 = COPY [[MV2]](s64)
|
||||||
; VI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
|
||||||
; VI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SUB]](s32), [[C7]]
|
|
||||||
; VI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
||||||
; VI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[C8]], [[MV]]
|
|
||||||
; VI: $vgpr0_vgpr1 = COPY [[SELECT1]](s64)
|
|
||||||
%0:_(s32) = COPY $vgpr0
|
%0:_(s32) = COPY $vgpr0
|
||||||
%1:_(s64) = G_FPTOSI %0
|
%1:_(s64) = G_FPTOSI %0
|
||||||
$vgpr0_vgpr1 = COPY %1
|
$vgpr0_vgpr1 = COPY %1
|
||||||
@ -462,122 +434,78 @@ body: |
|
|||||||
; SI-LABEL: name: test_fptosi_v2s32_to_v2s64
|
; SI-LABEL: name: test_fptosi_v2s32_to_v2s64
|
||||||
; SI: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
|
; SI: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
|
||||||
; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
|
; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
|
||||||
; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2139095040
|
; SI: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s32) = G_INTRINSIC_TRUNC [[UV]]
|
||||||
; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
|
; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
|
||||||
; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[UV]], [[C]]
|
; SI: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[UV]], [[C]](s32)
|
||||||
; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C1]](s32)
|
; SI: [[FABS:%[0-9]+]]:_(s32) = G_FABS [[INTRINSIC_TRUNC]]
|
||||||
; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
|
; SI: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x3DF0000000000000
|
||||||
; SI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[UV]], [[C2]]
|
; SI: [[C2:%[0-9]+]]:_(s32) = G_FCONSTANT float 0xC1F0000000000000
|
||||||
; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
|
; SI: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FABS]], [[C1]]
|
||||||
; SI: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[AND1]], [[C3]](s32)
|
; SI: [[FFLOOR:%[0-9]+]]:_(s32) = G_FFLOOR [[FMUL]]
|
||||||
; SI: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[ASHR]](s32)
|
; SI: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FFLOOR]], [[C2]], [[FABS]]
|
||||||
; SI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8388607
|
; SI: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[FFLOOR]](s32)
|
||||||
; SI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[UV]], [[C4]]
|
; SI: [[FPTOUI1:%[0-9]+]]:_(s32) = G_FPTOUI [[FMA]](s32)
|
||||||
; SI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 8388608
|
; SI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[ASHR]](s32), [[ASHR]](s32)
|
||||||
; SI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[C5]]
|
; SI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[FPTOUI1]](s32), [[FPTOUI]](s32)
|
||||||
; SI: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
|
; SI: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[MV1]], [[MV]]
|
||||||
; SI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 127
|
|
||||||
; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[LSHR]], [[C6]]
|
|
||||||
; SI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[SUB]], [[C1]]
|
|
||||||
; SI: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SUB]]
|
|
||||||
; SI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ZEXT]], [[SUB1]](s32)
|
|
||||||
; SI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[ZEXT]], [[SUB2]](s32)
|
|
||||||
; SI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB]](s32), [[C1]]
|
|
||||||
; SI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SHL]], [[LSHR1]]
|
|
||||||
; SI: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[SELECT]], [[SEXT]]
|
|
||||||
; SI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[XOR]](s64)
|
; SI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[XOR]](s64)
|
||||||
; SI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT]](s64)
|
; SI: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV2]], [[ASHR]]
|
||||||
; SI: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV2]], [[UV4]]
|
; SI: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV3]], [[ASHR]], [[USUBO1]]
|
||||||
; SI: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV3]], [[UV5]], [[USUBO1]]
|
; SI: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32)
|
||||||
; SI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32)
|
; SI: [[INTRINSIC_TRUNC1:%[0-9]+]]:_(s32) = G_INTRINSIC_TRUNC [[UV1]]
|
||||||
; SI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
; SI: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[UV1]], [[C]](s32)
|
||||||
; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SUB]](s32), [[C7]]
|
; SI: [[FABS1:%[0-9]+]]:_(s32) = G_FABS [[INTRINSIC_TRUNC1]]
|
||||||
; SI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
; SI: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[FABS1]], [[C1]]
|
||||||
; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[C8]], [[MV]]
|
; SI: [[FFLOOR1:%[0-9]+]]:_(s32) = G_FFLOOR [[FMUL1]]
|
||||||
; SI: [[AND3:%[0-9]+]]:_(s32) = G_AND [[UV1]], [[C]]
|
; SI: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FFLOOR1]], [[C2]], [[FABS1]]
|
||||||
; SI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[AND3]], [[C1]](s32)
|
; SI: [[FPTOUI2:%[0-9]+]]:_(s32) = G_FPTOUI [[FFLOOR1]](s32)
|
||||||
; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[UV1]], [[C2]]
|
; SI: [[FPTOUI3:%[0-9]+]]:_(s32) = G_FPTOUI [[FMA1]](s32)
|
||||||
; SI: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[AND4]], [[C3]](s32)
|
; SI: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[ASHR1]](s32), [[ASHR1]](s32)
|
||||||
; SI: [[SEXT1:%[0-9]+]]:_(s64) = G_SEXT [[ASHR1]](s32)
|
; SI: [[MV4:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[FPTOUI3]](s32), [[FPTOUI2]](s32)
|
||||||
; SI: [[AND5:%[0-9]+]]:_(s32) = G_AND [[UV1]], [[C4]]
|
; SI: [[XOR1:%[0-9]+]]:_(s64) = G_XOR [[MV4]], [[MV3]]
|
||||||
; SI: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND5]], [[C5]]
|
; SI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[XOR1]](s64)
|
||||||
; SI: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR1]](s32)
|
; SI: [[USUBO2:%[0-9]+]]:_(s32), [[USUBO3:%[0-9]+]]:_(s1) = G_USUBO [[UV4]], [[ASHR1]]
|
||||||
; SI: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[LSHR2]], [[C6]]
|
; SI: [[USUBE2:%[0-9]+]]:_(s32), [[USUBE3:%[0-9]+]]:_(s1) = G_USUBE [[UV5]], [[ASHR1]], [[USUBO3]]
|
||||||
; SI: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[SUB3]], [[C1]]
|
; SI: [[MV5:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO2]](s32), [[USUBE2]](s32)
|
||||||
; SI: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SUB3]]
|
; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV2]](s64), [[MV5]](s64)
|
||||||
; SI: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[ZEXT1]], [[SUB4]](s32)
|
|
||||||
; SI: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[ZEXT1]], [[SUB5]](s32)
|
|
||||||
; SI: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB3]](s32), [[C1]]
|
|
||||||
; SI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[SHL1]], [[LSHR3]]
|
|
||||||
; SI: [[XOR1:%[0-9]+]]:_(s64) = G_XOR [[SELECT2]], [[SEXT1]]
|
|
||||||
; SI: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[XOR1]](s64)
|
|
||||||
; SI: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT1]](s64)
|
|
||||||
; SI: [[USUBO2:%[0-9]+]]:_(s32), [[USUBO3:%[0-9]+]]:_(s1) = G_USUBO [[UV6]], [[UV8]]
|
|
||||||
; SI: [[USUBE2:%[0-9]+]]:_(s32), [[USUBE3:%[0-9]+]]:_(s1) = G_USUBE [[UV7]], [[UV9]], [[USUBO3]]
|
|
||||||
; SI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO2]](s32), [[USUBE2]](s32)
|
|
||||||
; SI: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SUB3]](s32), [[C7]]
|
|
||||||
; SI: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[C8]], [[MV1]]
|
|
||||||
; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT1]](s64), [[SELECT3]](s64)
|
|
||||||
; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
|
; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
|
||||||
; VI-LABEL: name: test_fptosi_v2s32_to_v2s64
|
; VI-LABEL: name: test_fptosi_v2s32_to_v2s64
|
||||||
; VI: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
|
; VI: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
|
||||||
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
|
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
|
||||||
; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2139095040
|
; VI: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s32) = G_INTRINSIC_TRUNC [[UV]]
|
||||||
; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
|
; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
|
||||||
; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[UV]], [[C]]
|
; VI: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[UV]], [[C]](s32)
|
||||||
; VI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C1]](s32)
|
; VI: [[FABS:%[0-9]+]]:_(s32) = G_FABS [[INTRINSIC_TRUNC]]
|
||||||
; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
|
; VI: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x3DF0000000000000
|
||||||
; VI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[UV]], [[C2]]
|
; VI: [[C2:%[0-9]+]]:_(s32) = G_FCONSTANT float 0xC1F0000000000000
|
||||||
; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
|
; VI: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FABS]], [[C1]]
|
||||||
; VI: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[AND1]], [[C3]](s32)
|
; VI: [[FFLOOR:%[0-9]+]]:_(s32) = G_FFLOOR [[FMUL]]
|
||||||
; VI: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[ASHR]](s32)
|
; VI: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FFLOOR]], [[C2]], [[FABS]]
|
||||||
; VI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8388607
|
; VI: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[FFLOOR]](s32)
|
||||||
; VI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[UV]], [[C4]]
|
; VI: [[FPTOUI1:%[0-9]+]]:_(s32) = G_FPTOUI [[FMA]](s32)
|
||||||
; VI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 8388608
|
; VI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[ASHR]](s32), [[ASHR]](s32)
|
||||||
; VI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[C5]]
|
; VI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[FPTOUI1]](s32), [[FPTOUI]](s32)
|
||||||
; VI: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
|
; VI: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[MV1]], [[MV]]
|
||||||
; VI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 127
|
|
||||||
; VI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[LSHR]], [[C6]]
|
|
||||||
; VI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[SUB]], [[C1]]
|
|
||||||
; VI: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SUB]]
|
|
||||||
; VI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ZEXT]], [[SUB1]](s32)
|
|
||||||
; VI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[ZEXT]], [[SUB2]](s32)
|
|
||||||
; VI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB]](s32), [[C1]]
|
|
||||||
; VI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SHL]], [[LSHR1]]
|
|
||||||
; VI: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[SELECT]], [[SEXT]]
|
|
||||||
; VI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[XOR]](s64)
|
; VI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[XOR]](s64)
|
||||||
; VI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT]](s64)
|
; VI: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV2]], [[ASHR]]
|
||||||
; VI: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV2]], [[UV4]]
|
; VI: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV3]], [[ASHR]], [[USUBO1]]
|
||||||
; VI: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV3]], [[UV5]], [[USUBO1]]
|
; VI: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32)
|
||||||
; VI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32)
|
; VI: [[INTRINSIC_TRUNC1:%[0-9]+]]:_(s32) = G_INTRINSIC_TRUNC [[UV1]]
|
||||||
; VI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
; VI: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[UV1]], [[C]](s32)
|
||||||
; VI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SUB]](s32), [[C7]]
|
; VI: [[FABS1:%[0-9]+]]:_(s32) = G_FABS [[INTRINSIC_TRUNC1]]
|
||||||
; VI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
; VI: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[FABS1]], [[C1]]
|
||||||
; VI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[C8]], [[MV]]
|
; VI: [[FFLOOR1:%[0-9]+]]:_(s32) = G_FFLOOR [[FMUL1]]
|
||||||
; VI: [[AND3:%[0-9]+]]:_(s32) = G_AND [[UV1]], [[C]]
|
; VI: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FFLOOR1]], [[C2]], [[FABS1]]
|
||||||
; VI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[AND3]], [[C1]](s32)
|
; VI: [[FPTOUI2:%[0-9]+]]:_(s32) = G_FPTOUI [[FFLOOR1]](s32)
|
||||||
; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[UV1]], [[C2]]
|
; VI: [[FPTOUI3:%[0-9]+]]:_(s32) = G_FPTOUI [[FMA1]](s32)
|
||||||
; VI: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[AND4]], [[C3]](s32)
|
; VI: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[ASHR1]](s32), [[ASHR1]](s32)
|
||||||
; VI: [[SEXT1:%[0-9]+]]:_(s64) = G_SEXT [[ASHR1]](s32)
|
; VI: [[MV4:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[FPTOUI3]](s32), [[FPTOUI2]](s32)
|
||||||
; VI: [[AND5:%[0-9]+]]:_(s32) = G_AND [[UV1]], [[C4]]
|
; VI: [[XOR1:%[0-9]+]]:_(s64) = G_XOR [[MV4]], [[MV3]]
|
||||||
; VI: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND5]], [[C5]]
|
; VI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[XOR1]](s64)
|
||||||
; VI: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR1]](s32)
|
; VI: [[USUBO2:%[0-9]+]]:_(s32), [[USUBO3:%[0-9]+]]:_(s1) = G_USUBO [[UV4]], [[ASHR1]]
|
||||||
; VI: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[LSHR2]], [[C6]]
|
; VI: [[USUBE2:%[0-9]+]]:_(s32), [[USUBE3:%[0-9]+]]:_(s1) = G_USUBE [[UV5]], [[ASHR1]], [[USUBO3]]
|
||||||
; VI: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[SUB3]], [[C1]]
|
; VI: [[MV5:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO2]](s32), [[USUBE2]](s32)
|
||||||
; VI: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SUB3]]
|
; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV2]](s64), [[MV5]](s64)
|
||||||
; VI: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[ZEXT1]], [[SUB4]](s32)
|
|
||||||
; VI: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[ZEXT1]], [[SUB5]](s32)
|
|
||||||
; VI: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB3]](s32), [[C1]]
|
|
||||||
; VI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[SHL1]], [[LSHR3]]
|
|
||||||
; VI: [[XOR1:%[0-9]+]]:_(s64) = G_XOR [[SELECT2]], [[SEXT1]]
|
|
||||||
; VI: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[XOR1]](s64)
|
|
||||||
; VI: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT1]](s64)
|
|
||||||
; VI: [[USUBO2:%[0-9]+]]:_(s32), [[USUBO3:%[0-9]+]]:_(s1) = G_USUBO [[UV6]], [[UV8]]
|
|
||||||
; VI: [[USUBE2:%[0-9]+]]:_(s32), [[USUBE3:%[0-9]+]]:_(s1) = G_USUBE [[UV7]], [[UV9]], [[USUBO3]]
|
|
||||||
; VI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO2]](s32), [[USUBE2]](s32)
|
|
||||||
; VI: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SUB3]](s32), [[C7]]
|
|
||||||
; VI: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[C8]], [[MV1]]
|
|
||||||
; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT1]](s64), [[SELECT3]](s64)
|
|
||||||
; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
|
; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
|
||||||
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
|
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
|
||||||
%1:_(<2 x s64>) = G_FPTOSI %0
|
%1:_(<2 x s64>) = G_FPTOSI %0
|
||||||
@ -729,75 +657,47 @@ body: |
|
|||||||
|
|
||||||
; SI-LABEL: name: test_fptosi_s32_to_s33
|
; SI-LABEL: name: test_fptosi_s32_to_s33
|
||||||
; SI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
|
; SI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||||
; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2139095040
|
; SI: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s32) = G_INTRINSIC_TRUNC [[COPY]]
|
||||||
; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
|
; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
|
||||||
; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
|
; SI: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[COPY]], [[C]](s32)
|
||||||
; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C1]](s32)
|
; SI: [[FABS:%[0-9]+]]:_(s32) = G_FABS [[INTRINSIC_TRUNC]]
|
||||||
; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
|
; SI: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x3DF0000000000000
|
||||||
; SI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C2]]
|
; SI: [[C2:%[0-9]+]]:_(s32) = G_FCONSTANT float 0xC1F0000000000000
|
||||||
; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
|
; SI: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FABS]], [[C1]]
|
||||||
; SI: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[AND1]], [[C3]](s32)
|
; SI: [[FFLOOR:%[0-9]+]]:_(s32) = G_FFLOOR [[FMUL]]
|
||||||
; SI: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[ASHR]](s32)
|
; SI: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FFLOOR]], [[C2]], [[FABS]]
|
||||||
; SI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8388607
|
; SI: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[FFLOOR]](s32)
|
||||||
; SI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C4]]
|
; SI: [[FPTOUI1:%[0-9]+]]:_(s32) = G_FPTOUI [[FMA]](s32)
|
||||||
; SI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 8388608
|
; SI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[ASHR]](s32), [[ASHR]](s32)
|
||||||
; SI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[C5]]
|
; SI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[FPTOUI1]](s32), [[FPTOUI]](s32)
|
||||||
; SI: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
|
; SI: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[MV1]], [[MV]]
|
||||||
; SI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 127
|
|
||||||
; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[LSHR]], [[C6]]
|
|
||||||
; SI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[SUB]], [[C1]]
|
|
||||||
; SI: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SUB]]
|
|
||||||
; SI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ZEXT]], [[SUB1]](s32)
|
|
||||||
; SI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[ZEXT]], [[SUB2]](s32)
|
|
||||||
; SI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB]](s32), [[C1]]
|
|
||||||
; SI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SHL]], [[LSHR1]]
|
|
||||||
; SI: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[SELECT]], [[SEXT]]
|
|
||||||
; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[XOR]](s64)
|
; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[XOR]](s64)
|
||||||
; SI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT]](s64)
|
; SI: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV]], [[ASHR]]
|
||||||
; SI: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV]], [[UV2]]
|
; SI: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV1]], [[ASHR]], [[USUBO1]]
|
||||||
; SI: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV1]], [[UV3]], [[USUBO1]]
|
; SI: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32)
|
||||||
; SI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32)
|
; SI: [[COPY1:%[0-9]+]]:_(s64) = COPY [[MV2]](s64)
|
||||||
; SI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
|
||||||
; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SUB]](s32), [[C7]]
|
|
||||||
; SI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
||||||
; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[C8]], [[MV]]
|
|
||||||
; SI: [[COPY1:%[0-9]+]]:_(s64) = COPY [[SELECT1]](s64)
|
|
||||||
; SI: $vgpr0_vgpr1 = COPY [[COPY1]](s64)
|
; SI: $vgpr0_vgpr1 = COPY [[COPY1]](s64)
|
||||||
; VI-LABEL: name: test_fptosi_s32_to_s33
|
; VI-LABEL: name: test_fptosi_s32_to_s33
|
||||||
; VI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
|
; VI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||||
; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2139095040
|
; VI: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s32) = G_INTRINSIC_TRUNC [[COPY]]
|
||||||
; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
|
; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
|
||||||
; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
|
; VI: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[COPY]], [[C]](s32)
|
||||||
; VI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C1]](s32)
|
; VI: [[FABS:%[0-9]+]]:_(s32) = G_FABS [[INTRINSIC_TRUNC]]
|
||||||
; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
|
; VI: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x3DF0000000000000
|
||||||
; VI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C2]]
|
; VI: [[C2:%[0-9]+]]:_(s32) = G_FCONSTANT float 0xC1F0000000000000
|
||||||
; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
|
; VI: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FABS]], [[C1]]
|
||||||
; VI: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[AND1]], [[C3]](s32)
|
; VI: [[FFLOOR:%[0-9]+]]:_(s32) = G_FFLOOR [[FMUL]]
|
||||||
; VI: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[ASHR]](s32)
|
; VI: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FFLOOR]], [[C2]], [[FABS]]
|
||||||
; VI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8388607
|
; VI: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[FFLOOR]](s32)
|
||||||
; VI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C4]]
|
; VI: [[FPTOUI1:%[0-9]+]]:_(s32) = G_FPTOUI [[FMA]](s32)
|
||||||
; VI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 8388608
|
; VI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[ASHR]](s32), [[ASHR]](s32)
|
||||||
; VI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[C5]]
|
; VI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[FPTOUI1]](s32), [[FPTOUI]](s32)
|
||||||
; VI: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
|
; VI: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[MV1]], [[MV]]
|
||||||
; VI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 127
|
|
||||||
; VI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[LSHR]], [[C6]]
|
|
||||||
; VI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[SUB]], [[C1]]
|
|
||||||
; VI: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SUB]]
|
|
||||||
; VI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ZEXT]], [[SUB1]](s32)
|
|
||||||
; VI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[ZEXT]], [[SUB2]](s32)
|
|
||||||
; VI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB]](s32), [[C1]]
|
|
||||||
; VI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SHL]], [[LSHR1]]
|
|
||||||
; VI: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[SELECT]], [[SEXT]]
|
|
||||||
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[XOR]](s64)
|
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[XOR]](s64)
|
||||||
; VI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT]](s64)
|
; VI: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV]], [[ASHR]]
|
||||||
; VI: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV]], [[UV2]]
|
; VI: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV1]], [[ASHR]], [[USUBO1]]
|
||||||
; VI: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV1]], [[UV3]], [[USUBO1]]
|
; VI: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32)
|
||||||
; VI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32)
|
; VI: [[COPY1:%[0-9]+]]:_(s64) = COPY [[MV2]](s64)
|
||||||
; VI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
|
||||||
; VI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SUB]](s32), [[C7]]
|
|
||||||
; VI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
||||||
; VI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[C8]], [[MV]]
|
|
||||||
; VI: [[COPY1:%[0-9]+]]:_(s64) = COPY [[SELECT1]](s64)
|
|
||||||
; VI: $vgpr0_vgpr1 = COPY [[COPY1]](s64)
|
; VI: $vgpr0_vgpr1 = COPY [[COPY1]](s64)
|
||||||
%0:_(s32) = COPY $vgpr0
|
%0:_(s32) = COPY $vgpr0
|
||||||
%1:_(s33) = G_FPTOSI %0
|
%1:_(s33) = G_FPTOSI %0
|
||||||
|
@ -380,132 +380,28 @@ body: |
|
|||||||
|
|
||||||
; SI-LABEL: name: test_fptoui_s32_to_s64
|
; SI-LABEL: name: test_fptoui_s32_to_s64
|
||||||
; SI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
|
; SI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||||
; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2139095040
|
; SI: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s32) = G_INTRINSIC_TRUNC [[COPY]]
|
||||||
; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
|
; SI: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x3DF0000000000000
|
||||||
; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
|
; SI: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 0xC1F0000000000000
|
||||||
; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C1]](s32)
|
; SI: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[INTRINSIC_TRUNC]], [[C]]
|
||||||
; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
|
; SI: [[FFLOOR:%[0-9]+]]:_(s32) = G_FFLOOR [[FMUL]]
|
||||||
; SI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C2]]
|
; SI: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FFLOOR]], [[C1]], [[INTRINSIC_TRUNC]]
|
||||||
; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
|
; SI: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[FFLOOR]](s32)
|
||||||
; SI: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[AND1]], [[C3]](s32)
|
; SI: [[FPTOUI1:%[0-9]+]]:_(s32) = G_FPTOUI [[FMA]](s32)
|
||||||
; SI: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[ASHR]](s32)
|
; SI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[FPTOUI1]](s32), [[FPTOUI]](s32)
|
||||||
; SI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8388607
|
; SI: $vgpr0_vgpr1 = COPY [[MV]](s64)
|
||||||
; SI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C4]]
|
|
||||||
; SI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 8388608
|
|
||||||
; SI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[C5]]
|
|
||||||
; SI: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
|
|
||||||
; SI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 127
|
|
||||||
; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[LSHR]], [[C6]]
|
|
||||||
; SI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[SUB]], [[C1]]
|
|
||||||
; SI: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SUB]]
|
|
||||||
; SI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ZEXT]], [[SUB1]](s32)
|
|
||||||
; SI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[ZEXT]], [[SUB2]](s32)
|
|
||||||
; SI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB]](s32), [[C1]]
|
|
||||||
; SI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SHL]], [[LSHR1]]
|
|
||||||
; SI: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[SELECT]], [[SEXT]]
|
|
||||||
; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[XOR]](s64)
|
|
||||||
; SI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT]](s64)
|
|
||||||
; SI: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV]], [[UV2]]
|
|
||||||
; SI: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV1]], [[UV3]], [[USUBO1]]
|
|
||||||
; SI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32)
|
|
||||||
; SI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
|
||||||
; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SUB]](s32), [[C7]]
|
|
||||||
; SI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
||||||
; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[C8]], [[MV]]
|
|
||||||
; SI: [[C9:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x43E0000000000000
|
|
||||||
; SI: [[FSUB:%[0-9]+]]:_(s32) = G_FSUB [[COPY]], [[C9]]
|
|
||||||
; SI: [[AND3:%[0-9]+]]:_(s32) = G_AND [[FSUB]], [[C]]
|
|
||||||
; SI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[AND3]], [[C1]](s32)
|
|
||||||
; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[FSUB]], [[C2]]
|
|
||||||
; SI: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[AND4]], [[C3]](s32)
|
|
||||||
; SI: [[SEXT1:%[0-9]+]]:_(s64) = G_SEXT [[ASHR1]](s32)
|
|
||||||
; SI: [[AND5:%[0-9]+]]:_(s32) = G_AND [[FSUB]], [[C4]]
|
|
||||||
; SI: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND5]], [[C5]]
|
|
||||||
; SI: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR1]](s32)
|
|
||||||
; SI: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[LSHR2]], [[C6]]
|
|
||||||
; SI: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[SUB3]], [[C1]]
|
|
||||||
; SI: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SUB3]]
|
|
||||||
; SI: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[ZEXT1]], [[SUB4]](s32)
|
|
||||||
; SI: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[ZEXT1]], [[SUB5]](s32)
|
|
||||||
; SI: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB3]](s32), [[C1]]
|
|
||||||
; SI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[SHL1]], [[LSHR3]]
|
|
||||||
; SI: [[XOR1:%[0-9]+]]:_(s64) = G_XOR [[SELECT2]], [[SEXT1]]
|
|
||||||
; SI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[XOR1]](s64)
|
|
||||||
; SI: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT1]](s64)
|
|
||||||
; SI: [[USUBO2:%[0-9]+]]:_(s32), [[USUBO3:%[0-9]+]]:_(s1) = G_USUBO [[UV4]], [[UV6]]
|
|
||||||
; SI: [[USUBE2:%[0-9]+]]:_(s32), [[USUBE3:%[0-9]+]]:_(s1) = G_USUBE [[UV5]], [[UV7]], [[USUBO3]]
|
|
||||||
; SI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO2]](s32), [[USUBE2]](s32)
|
|
||||||
; SI: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SUB3]](s32), [[C7]]
|
|
||||||
; SI: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[C8]], [[MV1]]
|
|
||||||
; SI: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
|
|
||||||
; SI: [[XOR2:%[0-9]+]]:_(s64) = G_XOR [[SELECT3]], [[C10]]
|
|
||||||
; SI: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ult), [[COPY]](s32), [[C9]]
|
|
||||||
; SI: [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[FCMP]](s1), [[SELECT1]], [[XOR2]]
|
|
||||||
; SI: $vgpr0_vgpr1 = COPY [[SELECT4]](s64)
|
|
||||||
; VI-LABEL: name: test_fptoui_s32_to_s64
|
; VI-LABEL: name: test_fptoui_s32_to_s64
|
||||||
; VI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
|
; VI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||||
; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2139095040
|
; VI: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s32) = G_INTRINSIC_TRUNC [[COPY]]
|
||||||
; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
|
; VI: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x3DF0000000000000
|
||||||
; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
|
; VI: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 0xC1F0000000000000
|
||||||
; VI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C1]](s32)
|
; VI: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[INTRINSIC_TRUNC]], [[C]]
|
||||||
; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
|
; VI: [[FFLOOR:%[0-9]+]]:_(s32) = G_FFLOOR [[FMUL]]
|
||||||
; VI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C2]]
|
; VI: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FFLOOR]], [[C1]], [[INTRINSIC_TRUNC]]
|
||||||
; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
|
; VI: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[FFLOOR]](s32)
|
||||||
; VI: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[AND1]], [[C3]](s32)
|
; VI: [[FPTOUI1:%[0-9]+]]:_(s32) = G_FPTOUI [[FMA]](s32)
|
||||||
; VI: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[ASHR]](s32)
|
; VI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[FPTOUI1]](s32), [[FPTOUI]](s32)
|
||||||
; VI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8388607
|
; VI: $vgpr0_vgpr1 = COPY [[MV]](s64)
|
||||||
; VI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C4]]
|
|
||||||
; VI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 8388608
|
|
||||||
; VI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[C5]]
|
|
||||||
; VI: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
|
|
||||||
; VI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 127
|
|
||||||
; VI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[LSHR]], [[C6]]
|
|
||||||
; VI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[SUB]], [[C1]]
|
|
||||||
; VI: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SUB]]
|
|
||||||
; VI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ZEXT]], [[SUB1]](s32)
|
|
||||||
; VI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[ZEXT]], [[SUB2]](s32)
|
|
||||||
; VI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB]](s32), [[C1]]
|
|
||||||
; VI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SHL]], [[LSHR1]]
|
|
||||||
; VI: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[SELECT]], [[SEXT]]
|
|
||||||
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[XOR]](s64)
|
|
||||||
; VI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT]](s64)
|
|
||||||
; VI: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV]], [[UV2]]
|
|
||||||
; VI: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV1]], [[UV3]], [[USUBO1]]
|
|
||||||
; VI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32)
|
|
||||||
; VI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
|
||||||
; VI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SUB]](s32), [[C7]]
|
|
||||||
; VI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
||||||
; VI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[C8]], [[MV]]
|
|
||||||
; VI: [[C9:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x43E0000000000000
|
|
||||||
; VI: [[FSUB:%[0-9]+]]:_(s32) = G_FSUB [[COPY]], [[C9]]
|
|
||||||
; VI: [[AND3:%[0-9]+]]:_(s32) = G_AND [[FSUB]], [[C]]
|
|
||||||
; VI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[AND3]], [[C1]](s32)
|
|
||||||
; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[FSUB]], [[C2]]
|
|
||||||
; VI: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[AND4]], [[C3]](s32)
|
|
||||||
; VI: [[SEXT1:%[0-9]+]]:_(s64) = G_SEXT [[ASHR1]](s32)
|
|
||||||
; VI: [[AND5:%[0-9]+]]:_(s32) = G_AND [[FSUB]], [[C4]]
|
|
||||||
; VI: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND5]], [[C5]]
|
|
||||||
; VI: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR1]](s32)
|
|
||||||
; VI: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[LSHR2]], [[C6]]
|
|
||||||
; VI: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[SUB3]], [[C1]]
|
|
||||||
; VI: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SUB3]]
|
|
||||||
; VI: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[ZEXT1]], [[SUB4]](s32)
|
|
||||||
; VI: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[ZEXT1]], [[SUB5]](s32)
|
|
||||||
; VI: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB3]](s32), [[C1]]
|
|
||||||
; VI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[SHL1]], [[LSHR3]]
|
|
||||||
; VI: [[XOR1:%[0-9]+]]:_(s64) = G_XOR [[SELECT2]], [[SEXT1]]
|
|
||||||
; VI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[XOR1]](s64)
|
|
||||||
; VI: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT1]](s64)
|
|
||||||
; VI: [[USUBO2:%[0-9]+]]:_(s32), [[USUBO3:%[0-9]+]]:_(s1) = G_USUBO [[UV4]], [[UV6]]
|
|
||||||
; VI: [[USUBE2:%[0-9]+]]:_(s32), [[USUBE3:%[0-9]+]]:_(s1) = G_USUBE [[UV5]], [[UV7]], [[USUBO3]]
|
|
||||||
; VI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO2]](s32), [[USUBE2]](s32)
|
|
||||||
; VI: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SUB3]](s32), [[C7]]
|
|
||||||
; VI: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[C8]], [[MV1]]
|
|
||||||
; VI: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
|
|
||||||
; VI: [[XOR2:%[0-9]+]]:_(s64) = G_XOR [[SELECT3]], [[C10]]
|
|
||||||
; VI: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ult), [[COPY]](s32), [[C9]]
|
|
||||||
; VI: [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[FCMP]](s1), [[SELECT1]], [[XOR2]]
|
|
||||||
; VI: $vgpr0_vgpr1 = COPY [[SELECT4]](s64)
|
|
||||||
%0:_(s32) = COPY $vgpr0
|
%0:_(s32) = COPY $vgpr0
|
||||||
%1:_(s64) = G_FPTOUI %0
|
%1:_(s64) = G_FPTOUI %0
|
||||||
$vgpr0_vgpr1 = COPY %1
|
$vgpr0_vgpr1 = COPY %1
|
||||||
@ -520,234 +416,44 @@ body: |
|
|||||||
; SI-LABEL: name: test_fptoui_v2s32_to_v2s64
|
; SI-LABEL: name: test_fptoui_v2s32_to_v2s64
|
||||||
; SI: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
|
; SI: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
|
||||||
; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
|
; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
|
||||||
; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2139095040
|
; SI: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s32) = G_INTRINSIC_TRUNC [[UV]]
|
||||||
; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
|
; SI: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x3DF0000000000000
|
||||||
; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[UV]], [[C]]
|
; SI: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 0xC1F0000000000000
|
||||||
; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C1]](s32)
|
; SI: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[INTRINSIC_TRUNC]], [[C]]
|
||||||
; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
|
; SI: [[FFLOOR:%[0-9]+]]:_(s32) = G_FFLOOR [[FMUL]]
|
||||||
; SI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[UV]], [[C2]]
|
; SI: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FFLOOR]], [[C1]], [[INTRINSIC_TRUNC]]
|
||||||
; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
|
; SI: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[FFLOOR]](s32)
|
||||||
; SI: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[AND1]], [[C3]](s32)
|
; SI: [[FPTOUI1:%[0-9]+]]:_(s32) = G_FPTOUI [[FMA]](s32)
|
||||||
; SI: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[ASHR]](s32)
|
; SI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[FPTOUI1]](s32), [[FPTOUI]](s32)
|
||||||
; SI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8388607
|
; SI: [[INTRINSIC_TRUNC1:%[0-9]+]]:_(s32) = G_INTRINSIC_TRUNC [[UV1]]
|
||||||
; SI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[UV]], [[C4]]
|
; SI: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[INTRINSIC_TRUNC1]], [[C]]
|
||||||
; SI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 8388608
|
; SI: [[FFLOOR1:%[0-9]+]]:_(s32) = G_FFLOOR [[FMUL1]]
|
||||||
; SI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[C5]]
|
; SI: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FFLOOR1]], [[C1]], [[INTRINSIC_TRUNC1]]
|
||||||
; SI: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
|
; SI: [[FPTOUI2:%[0-9]+]]:_(s32) = G_FPTOUI [[FFLOOR1]](s32)
|
||||||
; SI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 127
|
; SI: [[FPTOUI3:%[0-9]+]]:_(s32) = G_FPTOUI [[FMA1]](s32)
|
||||||
; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[LSHR]], [[C6]]
|
; SI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[FPTOUI3]](s32), [[FPTOUI2]](s32)
|
||||||
; SI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[SUB]], [[C1]]
|
; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64)
|
||||||
; SI: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SUB]]
|
|
||||||
; SI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ZEXT]], [[SUB1]](s32)
|
|
||||||
; SI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[ZEXT]], [[SUB2]](s32)
|
|
||||||
; SI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB]](s32), [[C1]]
|
|
||||||
; SI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SHL]], [[LSHR1]]
|
|
||||||
; SI: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[SELECT]], [[SEXT]]
|
|
||||||
; SI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[XOR]](s64)
|
|
||||||
; SI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT]](s64)
|
|
||||||
; SI: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV2]], [[UV4]]
|
|
||||||
; SI: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV3]], [[UV5]], [[USUBO1]]
|
|
||||||
; SI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32)
|
|
||||||
; SI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
|
||||||
; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SUB]](s32), [[C7]]
|
|
||||||
; SI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
||||||
; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[C8]], [[MV]]
|
|
||||||
; SI: [[C9:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x43E0000000000000
|
|
||||||
; SI: [[FSUB:%[0-9]+]]:_(s32) = G_FSUB [[UV]], [[C9]]
|
|
||||||
; SI: [[AND3:%[0-9]+]]:_(s32) = G_AND [[FSUB]], [[C]]
|
|
||||||
; SI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[AND3]], [[C1]](s32)
|
|
||||||
; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[FSUB]], [[C2]]
|
|
||||||
; SI: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[AND4]], [[C3]](s32)
|
|
||||||
; SI: [[SEXT1:%[0-9]+]]:_(s64) = G_SEXT [[ASHR1]](s32)
|
|
||||||
; SI: [[AND5:%[0-9]+]]:_(s32) = G_AND [[FSUB]], [[C4]]
|
|
||||||
; SI: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND5]], [[C5]]
|
|
||||||
; SI: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR1]](s32)
|
|
||||||
; SI: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[LSHR2]], [[C6]]
|
|
||||||
; SI: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[SUB3]], [[C1]]
|
|
||||||
; SI: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SUB3]]
|
|
||||||
; SI: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[ZEXT1]], [[SUB4]](s32)
|
|
||||||
; SI: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[ZEXT1]], [[SUB5]](s32)
|
|
||||||
; SI: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB3]](s32), [[C1]]
|
|
||||||
; SI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[SHL1]], [[LSHR3]]
|
|
||||||
; SI: [[XOR1:%[0-9]+]]:_(s64) = G_XOR [[SELECT2]], [[SEXT1]]
|
|
||||||
; SI: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[XOR1]](s64)
|
|
||||||
; SI: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT1]](s64)
|
|
||||||
; SI: [[USUBO2:%[0-9]+]]:_(s32), [[USUBO3:%[0-9]+]]:_(s1) = G_USUBO [[UV6]], [[UV8]]
|
|
||||||
; SI: [[USUBE2:%[0-9]+]]:_(s32), [[USUBE3:%[0-9]+]]:_(s1) = G_USUBE [[UV7]], [[UV9]], [[USUBO3]]
|
|
||||||
; SI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO2]](s32), [[USUBE2]](s32)
|
|
||||||
; SI: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SUB3]](s32), [[C7]]
|
|
||||||
; SI: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[C8]], [[MV1]]
|
|
||||||
; SI: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
|
|
||||||
; SI: [[XOR2:%[0-9]+]]:_(s64) = G_XOR [[SELECT3]], [[C10]]
|
|
||||||
; SI: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ult), [[UV]](s32), [[C9]]
|
|
||||||
; SI: [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[FCMP]](s1), [[SELECT1]], [[XOR2]]
|
|
||||||
; SI: [[AND6:%[0-9]+]]:_(s32) = G_AND [[UV1]], [[C]]
|
|
||||||
; SI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[AND6]], [[C1]](s32)
|
|
||||||
; SI: [[AND7:%[0-9]+]]:_(s32) = G_AND [[UV1]], [[C2]]
|
|
||||||
; SI: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[AND7]], [[C3]](s32)
|
|
||||||
; SI: [[SEXT2:%[0-9]+]]:_(s64) = G_SEXT [[ASHR2]](s32)
|
|
||||||
; SI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[UV1]], [[C4]]
|
|
||||||
; SI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[AND8]], [[C5]]
|
|
||||||
; SI: [[ZEXT2:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
|
|
||||||
; SI: [[SUB6:%[0-9]+]]:_(s32) = G_SUB [[LSHR4]], [[C6]]
|
|
||||||
; SI: [[SUB7:%[0-9]+]]:_(s32) = G_SUB [[SUB6]], [[C1]]
|
|
||||||
; SI: [[SUB8:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SUB6]]
|
|
||||||
; SI: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[ZEXT2]], [[SUB7]](s32)
|
|
||||||
; SI: [[LSHR5:%[0-9]+]]:_(s64) = G_LSHR [[ZEXT2]], [[SUB8]](s32)
|
|
||||||
; SI: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB6]](s32), [[C1]]
|
|
||||||
; SI: [[SELECT5:%[0-9]+]]:_(s64) = G_SELECT [[ICMP4]](s1), [[SHL2]], [[LSHR5]]
|
|
||||||
; SI: [[XOR3:%[0-9]+]]:_(s64) = G_XOR [[SELECT5]], [[SEXT2]]
|
|
||||||
; SI: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[XOR3]](s64)
|
|
||||||
; SI: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT2]](s64)
|
|
||||||
; SI: [[USUBO4:%[0-9]+]]:_(s32), [[USUBO5:%[0-9]+]]:_(s1) = G_USUBO [[UV10]], [[UV12]]
|
|
||||||
; SI: [[USUBE4:%[0-9]+]]:_(s32), [[USUBE5:%[0-9]+]]:_(s1) = G_USUBE [[UV11]], [[UV13]], [[USUBO5]]
|
|
||||||
; SI: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO4]](s32), [[USUBE4]](s32)
|
|
||||||
; SI: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SUB6]](s32), [[C7]]
|
|
||||||
; SI: [[SELECT6:%[0-9]+]]:_(s64) = G_SELECT [[ICMP5]](s1), [[C8]], [[MV2]]
|
|
||||||
; SI: [[FSUB1:%[0-9]+]]:_(s32) = G_FSUB [[UV1]], [[C9]]
|
|
||||||
; SI: [[AND9:%[0-9]+]]:_(s32) = G_AND [[FSUB1]], [[C]]
|
|
||||||
; SI: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[AND9]], [[C1]](s32)
|
|
||||||
; SI: [[AND10:%[0-9]+]]:_(s32) = G_AND [[FSUB1]], [[C2]]
|
|
||||||
; SI: [[ASHR3:%[0-9]+]]:_(s32) = G_ASHR [[AND10]], [[C3]](s32)
|
|
||||||
; SI: [[SEXT3:%[0-9]+]]:_(s64) = G_SEXT [[ASHR3]](s32)
|
|
||||||
; SI: [[AND11:%[0-9]+]]:_(s32) = G_AND [[FSUB1]], [[C4]]
|
|
||||||
; SI: [[OR3:%[0-9]+]]:_(s32) = G_OR [[AND11]], [[C5]]
|
|
||||||
; SI: [[ZEXT3:%[0-9]+]]:_(s64) = G_ZEXT [[OR3]](s32)
|
|
||||||
; SI: [[SUB9:%[0-9]+]]:_(s32) = G_SUB [[LSHR6]], [[C6]]
|
|
||||||
; SI: [[SUB10:%[0-9]+]]:_(s32) = G_SUB [[SUB9]], [[C1]]
|
|
||||||
; SI: [[SUB11:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SUB9]]
|
|
||||||
; SI: [[SHL3:%[0-9]+]]:_(s64) = G_SHL [[ZEXT3]], [[SUB10]](s32)
|
|
||||||
; SI: [[LSHR7:%[0-9]+]]:_(s64) = G_LSHR [[ZEXT3]], [[SUB11]](s32)
|
|
||||||
; SI: [[ICMP6:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB9]](s32), [[C1]]
|
|
||||||
; SI: [[SELECT7:%[0-9]+]]:_(s64) = G_SELECT [[ICMP6]](s1), [[SHL3]], [[LSHR7]]
|
|
||||||
; SI: [[XOR4:%[0-9]+]]:_(s64) = G_XOR [[SELECT7]], [[SEXT3]]
|
|
||||||
; SI: [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[XOR4]](s64)
|
|
||||||
; SI: [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT3]](s64)
|
|
||||||
; SI: [[USUBO6:%[0-9]+]]:_(s32), [[USUBO7:%[0-9]+]]:_(s1) = G_USUBO [[UV14]], [[UV16]]
|
|
||||||
; SI: [[USUBE6:%[0-9]+]]:_(s32), [[USUBE7:%[0-9]+]]:_(s1) = G_USUBE [[UV15]], [[UV17]], [[USUBO7]]
|
|
||||||
; SI: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO6]](s32), [[USUBE6]](s32)
|
|
||||||
; SI: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SUB9]](s32), [[C7]]
|
|
||||||
; SI: [[SELECT8:%[0-9]+]]:_(s64) = G_SELECT [[ICMP7]](s1), [[C8]], [[MV3]]
|
|
||||||
; SI: [[XOR5:%[0-9]+]]:_(s64) = G_XOR [[SELECT8]], [[C10]]
|
|
||||||
; SI: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(ult), [[UV1]](s32), [[C9]]
|
|
||||||
; SI: [[SELECT9:%[0-9]+]]:_(s64) = G_SELECT [[FCMP1]](s1), [[SELECT6]], [[XOR5]]
|
|
||||||
; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT4]](s64), [[SELECT9]](s64)
|
|
||||||
; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
|
; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
|
||||||
; VI-LABEL: name: test_fptoui_v2s32_to_v2s64
|
; VI-LABEL: name: test_fptoui_v2s32_to_v2s64
|
||||||
; VI: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
|
; VI: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
|
||||||
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
|
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
|
||||||
; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2139095040
|
; VI: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s32) = G_INTRINSIC_TRUNC [[UV]]
|
||||||
; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
|
; VI: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x3DF0000000000000
|
||||||
; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[UV]], [[C]]
|
; VI: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 0xC1F0000000000000
|
||||||
; VI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C1]](s32)
|
; VI: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[INTRINSIC_TRUNC]], [[C]]
|
||||||
; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
|
; VI: [[FFLOOR:%[0-9]+]]:_(s32) = G_FFLOOR [[FMUL]]
|
||||||
; VI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[UV]], [[C2]]
|
; VI: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FFLOOR]], [[C1]], [[INTRINSIC_TRUNC]]
|
||||||
; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
|
; VI: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[FFLOOR]](s32)
|
||||||
; VI: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[AND1]], [[C3]](s32)
|
; VI: [[FPTOUI1:%[0-9]+]]:_(s32) = G_FPTOUI [[FMA]](s32)
|
||||||
; VI: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[ASHR]](s32)
|
; VI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[FPTOUI1]](s32), [[FPTOUI]](s32)
|
||||||
; VI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8388607
|
; VI: [[INTRINSIC_TRUNC1:%[0-9]+]]:_(s32) = G_INTRINSIC_TRUNC [[UV1]]
|
||||||
; VI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[UV]], [[C4]]
|
; VI: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[INTRINSIC_TRUNC1]], [[C]]
|
||||||
; VI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 8388608
|
; VI: [[FFLOOR1:%[0-9]+]]:_(s32) = G_FFLOOR [[FMUL1]]
|
||||||
; VI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[C5]]
|
; VI: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FFLOOR1]], [[C1]], [[INTRINSIC_TRUNC1]]
|
||||||
; VI: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
|
; VI: [[FPTOUI2:%[0-9]+]]:_(s32) = G_FPTOUI [[FFLOOR1]](s32)
|
||||||
; VI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 127
|
; VI: [[FPTOUI3:%[0-9]+]]:_(s32) = G_FPTOUI [[FMA1]](s32)
|
||||||
; VI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[LSHR]], [[C6]]
|
; VI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[FPTOUI3]](s32), [[FPTOUI2]](s32)
|
||||||
; VI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[SUB]], [[C1]]
|
; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64)
|
||||||
; VI: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SUB]]
|
|
||||||
; VI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ZEXT]], [[SUB1]](s32)
|
|
||||||
; VI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[ZEXT]], [[SUB2]](s32)
|
|
||||||
; VI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB]](s32), [[C1]]
|
|
||||||
; VI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SHL]], [[LSHR1]]
|
|
||||||
; VI: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[SELECT]], [[SEXT]]
|
|
||||||
; VI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[XOR]](s64)
|
|
||||||
; VI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT]](s64)
|
|
||||||
; VI: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV2]], [[UV4]]
|
|
||||||
; VI: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV3]], [[UV5]], [[USUBO1]]
|
|
||||||
; VI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32)
|
|
||||||
; VI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
|
||||||
; VI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SUB]](s32), [[C7]]
|
|
||||||
; VI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
||||||
; VI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[C8]], [[MV]]
|
|
||||||
; VI: [[C9:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x43E0000000000000
|
|
||||||
; VI: [[FSUB:%[0-9]+]]:_(s32) = G_FSUB [[UV]], [[C9]]
|
|
||||||
; VI: [[AND3:%[0-9]+]]:_(s32) = G_AND [[FSUB]], [[C]]
|
|
||||||
; VI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[AND3]], [[C1]](s32)
|
|
||||||
; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[FSUB]], [[C2]]
|
|
||||||
; VI: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[AND4]], [[C3]](s32)
|
|
||||||
; VI: [[SEXT1:%[0-9]+]]:_(s64) = G_SEXT [[ASHR1]](s32)
|
|
||||||
; VI: [[AND5:%[0-9]+]]:_(s32) = G_AND [[FSUB]], [[C4]]
|
|
||||||
; VI: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND5]], [[C5]]
|
|
||||||
; VI: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR1]](s32)
|
|
||||||
; VI: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[LSHR2]], [[C6]]
|
|
||||||
; VI: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[SUB3]], [[C1]]
|
|
||||||
; VI: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SUB3]]
|
|
||||||
; VI: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[ZEXT1]], [[SUB4]](s32)
|
|
||||||
; VI: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[ZEXT1]], [[SUB5]](s32)
|
|
||||||
; VI: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB3]](s32), [[C1]]
|
|
||||||
; VI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[SHL1]], [[LSHR3]]
|
|
||||||
; VI: [[XOR1:%[0-9]+]]:_(s64) = G_XOR [[SELECT2]], [[SEXT1]]
|
|
||||||
; VI: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[XOR1]](s64)
|
|
||||||
; VI: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT1]](s64)
|
|
||||||
; VI: [[USUBO2:%[0-9]+]]:_(s32), [[USUBO3:%[0-9]+]]:_(s1) = G_USUBO [[UV6]], [[UV8]]
|
|
||||||
; VI: [[USUBE2:%[0-9]+]]:_(s32), [[USUBE3:%[0-9]+]]:_(s1) = G_USUBE [[UV7]], [[UV9]], [[USUBO3]]
|
|
||||||
; VI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO2]](s32), [[USUBE2]](s32)
|
|
||||||
; VI: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SUB3]](s32), [[C7]]
|
|
||||||
; VI: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[C8]], [[MV1]]
|
|
||||||
; VI: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
|
|
||||||
; VI: [[XOR2:%[0-9]+]]:_(s64) = G_XOR [[SELECT3]], [[C10]]
|
|
||||||
; VI: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ult), [[UV]](s32), [[C9]]
|
|
||||||
; VI: [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[FCMP]](s1), [[SELECT1]], [[XOR2]]
|
|
||||||
; VI: [[AND6:%[0-9]+]]:_(s32) = G_AND [[UV1]], [[C]]
|
|
||||||
; VI: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[AND6]], [[C1]](s32)
|
|
||||||
; VI: [[AND7:%[0-9]+]]:_(s32) = G_AND [[UV1]], [[C2]]
|
|
||||||
; VI: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[AND7]], [[C3]](s32)
|
|
||||||
; VI: [[SEXT2:%[0-9]+]]:_(s64) = G_SEXT [[ASHR2]](s32)
|
|
||||||
; VI: [[AND8:%[0-9]+]]:_(s32) = G_AND [[UV1]], [[C4]]
|
|
||||||
; VI: [[OR2:%[0-9]+]]:_(s32) = G_OR [[AND8]], [[C5]]
|
|
||||||
; VI: [[ZEXT2:%[0-9]+]]:_(s64) = G_ZEXT [[OR2]](s32)
|
|
||||||
; VI: [[SUB6:%[0-9]+]]:_(s32) = G_SUB [[LSHR4]], [[C6]]
|
|
||||||
; VI: [[SUB7:%[0-9]+]]:_(s32) = G_SUB [[SUB6]], [[C1]]
|
|
||||||
; VI: [[SUB8:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SUB6]]
|
|
||||||
; VI: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[ZEXT2]], [[SUB7]](s32)
|
|
||||||
; VI: [[LSHR5:%[0-9]+]]:_(s64) = G_LSHR [[ZEXT2]], [[SUB8]](s32)
|
|
||||||
; VI: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB6]](s32), [[C1]]
|
|
||||||
; VI: [[SELECT5:%[0-9]+]]:_(s64) = G_SELECT [[ICMP4]](s1), [[SHL2]], [[LSHR5]]
|
|
||||||
; VI: [[XOR3:%[0-9]+]]:_(s64) = G_XOR [[SELECT5]], [[SEXT2]]
|
|
||||||
; VI: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[XOR3]](s64)
|
|
||||||
; VI: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT2]](s64)
|
|
||||||
; VI: [[USUBO4:%[0-9]+]]:_(s32), [[USUBO5:%[0-9]+]]:_(s1) = G_USUBO [[UV10]], [[UV12]]
|
|
||||||
; VI: [[USUBE4:%[0-9]+]]:_(s32), [[USUBE5:%[0-9]+]]:_(s1) = G_USUBE [[UV11]], [[UV13]], [[USUBO5]]
|
|
||||||
; VI: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO4]](s32), [[USUBE4]](s32)
|
|
||||||
; VI: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SUB6]](s32), [[C7]]
|
|
||||||
; VI: [[SELECT6:%[0-9]+]]:_(s64) = G_SELECT [[ICMP5]](s1), [[C8]], [[MV2]]
|
|
||||||
; VI: [[FSUB1:%[0-9]+]]:_(s32) = G_FSUB [[UV1]], [[C9]]
|
|
||||||
; VI: [[AND9:%[0-9]+]]:_(s32) = G_AND [[FSUB1]], [[C]]
|
|
||||||
; VI: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[AND9]], [[C1]](s32)
|
|
||||||
; VI: [[AND10:%[0-9]+]]:_(s32) = G_AND [[FSUB1]], [[C2]]
|
|
||||||
; VI: [[ASHR3:%[0-9]+]]:_(s32) = G_ASHR [[AND10]], [[C3]](s32)
|
|
||||||
; VI: [[SEXT3:%[0-9]+]]:_(s64) = G_SEXT [[ASHR3]](s32)
|
|
||||||
; VI: [[AND11:%[0-9]+]]:_(s32) = G_AND [[FSUB1]], [[C4]]
|
|
||||||
; VI: [[OR3:%[0-9]+]]:_(s32) = G_OR [[AND11]], [[C5]]
|
|
||||||
; VI: [[ZEXT3:%[0-9]+]]:_(s64) = G_ZEXT [[OR3]](s32)
|
|
||||||
; VI: [[SUB9:%[0-9]+]]:_(s32) = G_SUB [[LSHR6]], [[C6]]
|
|
||||||
; VI: [[SUB10:%[0-9]+]]:_(s32) = G_SUB [[SUB9]], [[C1]]
|
|
||||||
; VI: [[SUB11:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SUB9]]
|
|
||||||
; VI: [[SHL3:%[0-9]+]]:_(s64) = G_SHL [[ZEXT3]], [[SUB10]](s32)
|
|
||||||
; VI: [[LSHR7:%[0-9]+]]:_(s64) = G_LSHR [[ZEXT3]], [[SUB11]](s32)
|
|
||||||
; VI: [[ICMP6:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB9]](s32), [[C1]]
|
|
||||||
; VI: [[SELECT7:%[0-9]+]]:_(s64) = G_SELECT [[ICMP6]](s1), [[SHL3]], [[LSHR7]]
|
|
||||||
; VI: [[XOR4:%[0-9]+]]:_(s64) = G_XOR [[SELECT7]], [[SEXT3]]
|
|
||||||
; VI: [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[XOR4]](s64)
|
|
||||||
; VI: [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT3]](s64)
|
|
||||||
; VI: [[USUBO6:%[0-9]+]]:_(s32), [[USUBO7:%[0-9]+]]:_(s1) = G_USUBO [[UV14]], [[UV16]]
|
|
||||||
; VI: [[USUBE6:%[0-9]+]]:_(s32), [[USUBE7:%[0-9]+]]:_(s1) = G_USUBE [[UV15]], [[UV17]], [[USUBO7]]
|
|
||||||
; VI: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO6]](s32), [[USUBE6]](s32)
|
|
||||||
; VI: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SUB9]](s32), [[C7]]
|
|
||||||
; VI: [[SELECT8:%[0-9]+]]:_(s64) = G_SELECT [[ICMP7]](s1), [[C8]], [[MV3]]
|
|
||||||
; VI: [[XOR5:%[0-9]+]]:_(s64) = G_XOR [[SELECT8]], [[C10]]
|
|
||||||
; VI: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(ult), [[UV1]](s32), [[C9]]
|
|
||||||
; VI: [[SELECT9:%[0-9]+]]:_(s64) = G_SELECT [[FCMP1]](s1), [[SELECT6]], [[XOR5]]
|
|
||||||
; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT4]](s64), [[SELECT9]](s64)
|
|
||||||
; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
|
; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
|
||||||
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
|
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
|
||||||
%1:_(<2 x s64>) = G_FPTOUI %0
|
%1:_(<2 x s64>) = G_FPTOUI %0
|
||||||
@ -899,133 +605,29 @@ body: |
|
|||||||
|
|
||||||
; SI-LABEL: name: test_fptoui_s32_to_s33
|
; SI-LABEL: name: test_fptoui_s32_to_s33
|
||||||
; SI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
|
; SI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||||
; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2139095040
|
; SI: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s32) = G_INTRINSIC_TRUNC [[COPY]]
|
||||||
; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
|
; SI: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x3DF0000000000000
|
||||||
; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
|
; SI: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 0xC1F0000000000000
|
||||||
; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C1]](s32)
|
; SI: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[INTRINSIC_TRUNC]], [[C]]
|
||||||
; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
|
; SI: [[FFLOOR:%[0-9]+]]:_(s32) = G_FFLOOR [[FMUL]]
|
||||||
; SI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C2]]
|
; SI: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FFLOOR]], [[C1]], [[INTRINSIC_TRUNC]]
|
||||||
; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
|
; SI: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[FFLOOR]](s32)
|
||||||
; SI: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[AND1]], [[C3]](s32)
|
; SI: [[FPTOUI1:%[0-9]+]]:_(s32) = G_FPTOUI [[FMA]](s32)
|
||||||
; SI: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[ASHR]](s32)
|
; SI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[FPTOUI1]](s32), [[FPTOUI]](s32)
|
||||||
; SI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8388607
|
; SI: [[COPY1:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
|
||||||
; SI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C4]]
|
|
||||||
; SI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 8388608
|
|
||||||
; SI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[C5]]
|
|
||||||
; SI: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
|
|
||||||
; SI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 127
|
|
||||||
; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[LSHR]], [[C6]]
|
|
||||||
; SI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[SUB]], [[C1]]
|
|
||||||
; SI: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SUB]]
|
|
||||||
; SI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ZEXT]], [[SUB1]](s32)
|
|
||||||
; SI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[ZEXT]], [[SUB2]](s32)
|
|
||||||
; SI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB]](s32), [[C1]]
|
|
||||||
; SI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SHL]], [[LSHR1]]
|
|
||||||
; SI: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[SELECT]], [[SEXT]]
|
|
||||||
; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[XOR]](s64)
|
|
||||||
; SI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT]](s64)
|
|
||||||
; SI: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV]], [[UV2]]
|
|
||||||
; SI: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV1]], [[UV3]], [[USUBO1]]
|
|
||||||
; SI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32)
|
|
||||||
; SI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
|
||||||
; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SUB]](s32), [[C7]]
|
|
||||||
; SI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
||||||
; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[C8]], [[MV]]
|
|
||||||
; SI: [[C9:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x43E0000000000000
|
|
||||||
; SI: [[FSUB:%[0-9]+]]:_(s32) = G_FSUB [[COPY]], [[C9]]
|
|
||||||
; SI: [[AND3:%[0-9]+]]:_(s32) = G_AND [[FSUB]], [[C]]
|
|
||||||
; SI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[AND3]], [[C1]](s32)
|
|
||||||
; SI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[FSUB]], [[C2]]
|
|
||||||
; SI: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[AND4]], [[C3]](s32)
|
|
||||||
; SI: [[SEXT1:%[0-9]+]]:_(s64) = G_SEXT [[ASHR1]](s32)
|
|
||||||
; SI: [[AND5:%[0-9]+]]:_(s32) = G_AND [[FSUB]], [[C4]]
|
|
||||||
; SI: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND5]], [[C5]]
|
|
||||||
; SI: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR1]](s32)
|
|
||||||
; SI: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[LSHR2]], [[C6]]
|
|
||||||
; SI: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[SUB3]], [[C1]]
|
|
||||||
; SI: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SUB3]]
|
|
||||||
; SI: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[ZEXT1]], [[SUB4]](s32)
|
|
||||||
; SI: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[ZEXT1]], [[SUB5]](s32)
|
|
||||||
; SI: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB3]](s32), [[C1]]
|
|
||||||
; SI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[SHL1]], [[LSHR3]]
|
|
||||||
; SI: [[XOR1:%[0-9]+]]:_(s64) = G_XOR [[SELECT2]], [[SEXT1]]
|
|
||||||
; SI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[XOR1]](s64)
|
|
||||||
; SI: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT1]](s64)
|
|
||||||
; SI: [[USUBO2:%[0-9]+]]:_(s32), [[USUBO3:%[0-9]+]]:_(s1) = G_USUBO [[UV4]], [[UV6]]
|
|
||||||
; SI: [[USUBE2:%[0-9]+]]:_(s32), [[USUBE3:%[0-9]+]]:_(s1) = G_USUBE [[UV5]], [[UV7]], [[USUBO3]]
|
|
||||||
; SI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO2]](s32), [[USUBE2]](s32)
|
|
||||||
; SI: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SUB3]](s32), [[C7]]
|
|
||||||
; SI: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[C8]], [[MV1]]
|
|
||||||
; SI: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
|
|
||||||
; SI: [[XOR2:%[0-9]+]]:_(s64) = G_XOR [[SELECT3]], [[C10]]
|
|
||||||
; SI: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ult), [[COPY]](s32), [[C9]]
|
|
||||||
; SI: [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[FCMP]](s1), [[SELECT1]], [[XOR2]]
|
|
||||||
; SI: [[COPY1:%[0-9]+]]:_(s64) = COPY [[SELECT4]](s64)
|
|
||||||
; SI: $vgpr0_vgpr1 = COPY [[COPY1]](s64)
|
; SI: $vgpr0_vgpr1 = COPY [[COPY1]](s64)
|
||||||
; VI-LABEL: name: test_fptoui_s32_to_s33
|
; VI-LABEL: name: test_fptoui_s32_to_s33
|
||||||
; VI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
|
; VI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
|
||||||
; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2139095040
|
; VI: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s32) = G_INTRINSIC_TRUNC [[COPY]]
|
||||||
; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
|
; VI: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x3DF0000000000000
|
||||||
; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
|
; VI: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 0xC1F0000000000000
|
||||||
; VI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[C1]](s32)
|
; VI: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[INTRINSIC_TRUNC]], [[C]]
|
||||||
; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
|
; VI: [[FFLOOR:%[0-9]+]]:_(s32) = G_FFLOOR [[FMUL]]
|
||||||
; VI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C2]]
|
; VI: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FFLOOR]], [[C1]], [[INTRINSIC_TRUNC]]
|
||||||
; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
|
; VI: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[FFLOOR]](s32)
|
||||||
; VI: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[AND1]], [[C3]](s32)
|
; VI: [[FPTOUI1:%[0-9]+]]:_(s32) = G_FPTOUI [[FMA]](s32)
|
||||||
; VI: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[ASHR]](s32)
|
; VI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[FPTOUI1]](s32), [[FPTOUI]](s32)
|
||||||
; VI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8388607
|
; VI: [[COPY1:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
|
||||||
; VI: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C4]]
|
|
||||||
; VI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 8388608
|
|
||||||
; VI: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[C5]]
|
|
||||||
; VI: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[OR]](s32)
|
|
||||||
; VI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 127
|
|
||||||
; VI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[LSHR]], [[C6]]
|
|
||||||
; VI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[SUB]], [[C1]]
|
|
||||||
; VI: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SUB]]
|
|
||||||
; VI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ZEXT]], [[SUB1]](s32)
|
|
||||||
; VI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[ZEXT]], [[SUB2]](s32)
|
|
||||||
; VI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB]](s32), [[C1]]
|
|
||||||
; VI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SHL]], [[LSHR1]]
|
|
||||||
; VI: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[SELECT]], [[SEXT]]
|
|
||||||
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[XOR]](s64)
|
|
||||||
; VI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT]](s64)
|
|
||||||
; VI: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV]], [[UV2]]
|
|
||||||
; VI: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV1]], [[UV3]], [[USUBO1]]
|
|
||||||
; VI: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32)
|
|
||||||
; VI: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
|
||||||
; VI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SUB]](s32), [[C7]]
|
|
||||||
; VI: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
||||||
; VI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[C8]], [[MV]]
|
|
||||||
; VI: [[C9:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x43E0000000000000
|
|
||||||
; VI: [[FSUB:%[0-9]+]]:_(s32) = G_FSUB [[COPY]], [[C9]]
|
|
||||||
; VI: [[AND3:%[0-9]+]]:_(s32) = G_AND [[FSUB]], [[C]]
|
|
||||||
; VI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[AND3]], [[C1]](s32)
|
|
||||||
; VI: [[AND4:%[0-9]+]]:_(s32) = G_AND [[FSUB]], [[C2]]
|
|
||||||
; VI: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[AND4]], [[C3]](s32)
|
|
||||||
; VI: [[SEXT1:%[0-9]+]]:_(s64) = G_SEXT [[ASHR1]](s32)
|
|
||||||
; VI: [[AND5:%[0-9]+]]:_(s32) = G_AND [[FSUB]], [[C4]]
|
|
||||||
; VI: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND5]], [[C5]]
|
|
||||||
; VI: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[OR1]](s32)
|
|
||||||
; VI: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[LSHR2]], [[C6]]
|
|
||||||
; VI: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[SUB3]], [[C1]]
|
|
||||||
; VI: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SUB3]]
|
|
||||||
; VI: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[ZEXT1]], [[SUB4]](s32)
|
|
||||||
; VI: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[ZEXT1]], [[SUB5]](s32)
|
|
||||||
; VI: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB3]](s32), [[C1]]
|
|
||||||
; VI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[SHL1]], [[LSHR3]]
|
|
||||||
; VI: [[XOR1:%[0-9]+]]:_(s64) = G_XOR [[SELECT2]], [[SEXT1]]
|
|
||||||
; VI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[XOR1]](s64)
|
|
||||||
; VI: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT1]](s64)
|
|
||||||
; VI: [[USUBO2:%[0-9]+]]:_(s32), [[USUBO3:%[0-9]+]]:_(s1) = G_USUBO [[UV4]], [[UV6]]
|
|
||||||
; VI: [[USUBE2:%[0-9]+]]:_(s32), [[USUBE3:%[0-9]+]]:_(s1) = G_USUBE [[UV5]], [[UV7]], [[USUBO3]]
|
|
||||||
; VI: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO2]](s32), [[USUBE2]](s32)
|
|
||||||
; VI: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SUB3]](s32), [[C7]]
|
|
||||||
; VI: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[C8]], [[MV1]]
|
|
||||||
; VI: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
|
|
||||||
; VI: [[XOR2:%[0-9]+]]:_(s64) = G_XOR [[SELECT3]], [[C10]]
|
|
||||||
; VI: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ult), [[COPY]](s32), [[C9]]
|
|
||||||
; VI: [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[FCMP]](s1), [[SELECT1]], [[XOR2]]
|
|
||||||
; VI: [[COPY1:%[0-9]+]]:_(s64) = COPY [[SELECT4]](s64)
|
|
||||||
; VI: $vgpr0_vgpr1 = COPY [[COPY1]](s64)
|
; VI: $vgpr0_vgpr1 = COPY [[COPY1]](s64)
|
||||||
%0:_(s32) = COPY $vgpr0
|
%0:_(s32) = COPY $vgpr0
|
||||||
%1:_(s33) = G_FPTOUI %0
|
%1:_(s33) = G_FPTOUI %0
|
||||||
|
@ -195,69 +195,43 @@ define amdgpu_kernel void @fp_to_sint_i64 (i64 addrspace(1)* %out, float %in) {
|
|||||||
; SI-NEXT: s_load_dword s0, s[0:1], 0xb
|
; SI-NEXT: s_load_dword s0, s[0:1], 0xb
|
||||||
; SI-NEXT: s_mov_b32 s7, 0xf000
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
||||||
; SI-NEXT: s_mov_b32 s6, -1
|
; SI-NEXT: s_mov_b32 s6, -1
|
||||||
; SI-NEXT: s_mov_b32 s1, 0
|
; SI-NEXT: s_mov_b32 s1, 0x2f800000
|
||||||
|
; SI-NEXT: s_mov_b32 s2, 0xcf800000
|
||||||
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
||||||
; SI-NEXT: s_bfe_u32 s8, s0, 0x80017
|
; SI-NEXT: v_trunc_f32_e32 v0, s0
|
||||||
; SI-NEXT: s_and_b32 s2, s0, 0x7fffff
|
; SI-NEXT: v_mul_f32_e64 v1, |v0|, s1
|
||||||
; SI-NEXT: s_ashr_i32 s9, s0, 31
|
; SI-NEXT: v_ashrrev_i32_e32 v2, 31, v0
|
||||||
; SI-NEXT: s_add_i32 s3, s8, 0xffffff6a
|
; SI-NEXT: v_floor_f32_e32 v1, v1
|
||||||
; SI-NEXT: s_or_b32 s0, s2, 0x800000
|
; SI-NEXT: v_cvt_u32_f32_e32 v3, v1
|
||||||
; SI-NEXT: s_sub_i32 s10, 0x96, s8
|
; SI-NEXT: v_fma_f32 v0, v1, s2, |v0|
|
||||||
; SI-NEXT: s_ashr_i32 s11, s9, 31
|
; SI-NEXT: v_cvt_u32_f32_e32 v0, v0
|
||||||
; SI-NEXT: s_lshl_b64 s[2:3], s[0:1], s3
|
; SI-NEXT: v_xor_b32_e32 v1, v3, v2
|
||||||
; SI-NEXT: s_lshr_b64 s[0:1], s[0:1], s10
|
; SI-NEXT: v_xor_b32_e32 v0, v0, v2
|
||||||
; SI-NEXT: s_addk_i32 s8, 0xff81
|
; SI-NEXT: v_sub_i32_e32 v0, vcc, v0, v2
|
||||||
; SI-NEXT: v_mov_b32_e32 v0, s11
|
; SI-NEXT: v_subb_u32_e32 v1, vcc, v1, v2, vcc
|
||||||
; SI-NEXT: v_mov_b32_e32 v1, s1
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v2, s3
|
|
||||||
; SI-NEXT: v_cmp_gt_i32_e64 vcc, s8, 23
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v2, s0
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v3, s2
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v1, s11, v1
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v2, s9, v2
|
|
||||||
; SI-NEXT: v_subrev_i32_e32 v2, vcc, s9, v2
|
|
||||||
; SI-NEXT: v_subb_u32_e32 v0, vcc, v1, v0, vcc
|
|
||||||
; SI-NEXT: v_cmp_lt_i32_e64 s[0:1], s8, 0
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[0:1]
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v0, v2, 0, s[0:1]
|
|
||||||
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
|
||||||
; SI-NEXT: s_endpgm
|
; SI-NEXT: s_endpgm
|
||||||
;
|
;
|
||||||
; VI-LABEL: fp_to_sint_i64:
|
; VI-LABEL: fp_to_sint_i64:
|
||||||
; VI: ; %bb.0: ; %entry
|
; VI: ; %bb.0: ; %entry
|
||||||
; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
|
; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
|
||||||
; VI-NEXT: s_load_dword s8, s[0:1], 0x2c
|
; VI-NEXT: s_load_dword s0, s[0:1], 0x2c
|
||||||
; VI-NEXT: s_mov_b32 s1, 0
|
; VI-NEXT: s_mov_b32 s2, 0x2f800000
|
||||||
|
; VI-NEXT: s_mov_b32 s3, 0xcf800000
|
||||||
; VI-NEXT: s_mov_b32 s7, 0xf000
|
; VI-NEXT: s_mov_b32 s7, 0xf000
|
||||||
; VI-NEXT: s_mov_b32 s6, -1
|
; VI-NEXT: s_mov_b32 s6, -1
|
||||||
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
||||||
; VI-NEXT: s_bfe_u32 s9, s8, 0x80017
|
; VI-NEXT: v_trunc_f32_e32 v0, s0
|
||||||
; VI-NEXT: s_and_b32 s0, s8, 0x7fffff
|
; VI-NEXT: v_mul_f32_e64 v1, |v0|, s2
|
||||||
; VI-NEXT: s_add_i32 s2, s9, 0xffffff6a
|
; VI-NEXT: v_floor_f32_e32 v1, v1
|
||||||
; VI-NEXT: s_bitset1_b32 s0, 23
|
; VI-NEXT: v_fma_f32 v2, v1, s3, |v0|
|
||||||
; VI-NEXT: s_sub_i32 s10, 0x96, s9
|
; VI-NEXT: v_cvt_u32_f32_e32 v2, v2
|
||||||
; VI-NEXT: s_lshl_b64 s[2:3], s[0:1], s2
|
; VI-NEXT: v_cvt_u32_f32_e32 v1, v1
|
||||||
; VI-NEXT: s_lshr_b64 s[0:1], s[0:1], s10
|
; VI-NEXT: v_ashrrev_i32_e32 v3, 31, v0
|
||||||
; VI-NEXT: s_addk_i32 s9, 0xff81
|
; VI-NEXT: v_xor_b32_e32 v0, v2, v3
|
||||||
; VI-NEXT: v_mov_b32_e32 v0, s1
|
; VI-NEXT: v_xor_b32_e32 v1, v1, v3
|
||||||
; VI-NEXT: v_mov_b32_e32 v1, s3
|
; VI-NEXT: v_sub_u32_e32 v0, vcc, v0, v3
|
||||||
; VI-NEXT: v_cmp_gt_i32_e64 vcc, s9, 23
|
; VI-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v1, s0
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v2, s2
|
|
||||||
; VI-NEXT: s_ashr_i32 s0, s8, 31
|
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
|
|
||||||
; VI-NEXT: s_ashr_i32 s1, s0, 31
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v1, s0, v1
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v0, s1, v0
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v2, s1
|
|
||||||
; VI-NEXT: v_subrev_u32_e32 v3, vcc, s0, v1
|
|
||||||
; VI-NEXT: v_subb_u32_e32 v0, vcc, v0, v2, vcc
|
|
||||||
; VI-NEXT: v_cmp_lt_i32_e64 s[0:1], s9, 0
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[0:1]
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v0, v3, 0, s[0:1]
|
|
||||||
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
|
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
|
||||||
; VI-NEXT: s_endpgm
|
; VI-NEXT: s_endpgm
|
||||||
;
|
;
|
||||||
@ -323,63 +297,31 @@ define amdgpu_kernel void @fp_to_sint_v2i64(<2 x i64> addrspace(1)* %out, <2 x f
|
|||||||
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xb
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xb
|
||||||
; SI-NEXT: s_mov_b32 s7, 0xf000
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
||||||
; SI-NEXT: s_mov_b32 s6, -1
|
; SI-NEXT: s_mov_b32 s6, -1
|
||||||
; SI-NEXT: s_movk_i32 s8, 0xff6a
|
; SI-NEXT: s_mov_b32 s2, 0x2f800000
|
||||||
; SI-NEXT: s_mov_b32 s2, 0x7fffff
|
; SI-NEXT: s_mov_b32 s3, 0xcf800000
|
||||||
; SI-NEXT: s_mov_b32 s10, 0x800000
|
|
||||||
; SI-NEXT: s_mov_b32 s3, 0
|
|
||||||
; SI-NEXT: s_movk_i32 s9, 0x96
|
|
||||||
; SI-NEXT: s_movk_i32 s11, 0xff81
|
|
||||||
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
||||||
; SI-NEXT: s_bfe_u32 s12, s1, 0x80017
|
; SI-NEXT: v_trunc_f32_e32 v0, s1
|
||||||
; SI-NEXT: s_and_b32 s13, s1, s2
|
; SI-NEXT: v_trunc_f32_e32 v1, s0
|
||||||
; SI-NEXT: s_ashr_i32 s14, s1, 31
|
; SI-NEXT: v_mul_f32_e64 v2, |v0|, s2
|
||||||
; SI-NEXT: s_bfe_u32 s1, s0, 0x80017
|
; SI-NEXT: v_ashrrev_i32_e32 v3, 31, v0
|
||||||
; SI-NEXT: s_and_b32 s15, s0, s2
|
; SI-NEXT: v_mul_f32_e64 v4, |v1|, s2
|
||||||
; SI-NEXT: s_ashr_i32 s16, s0, 31
|
; SI-NEXT: v_ashrrev_i32_e32 v5, 31, v1
|
||||||
; SI-NEXT: s_add_i32 s0, s12, s8
|
; SI-NEXT: v_floor_f32_e32 v2, v2
|
||||||
; SI-NEXT: s_or_b32 s2, s13, s10
|
; SI-NEXT: v_floor_f32_e32 v4, v4
|
||||||
; SI-NEXT: s_sub_i32 s13, s9, s12
|
; SI-NEXT: v_cvt_u32_f32_e32 v6, v2
|
||||||
; SI-NEXT: s_add_i32 s12, s12, s11
|
; SI-NEXT: v_fma_f32 v0, v2, s3, |v0|
|
||||||
; SI-NEXT: s_ashr_i32 s17, s14, 31
|
; SI-NEXT: v_cvt_u32_f32_e32 v2, v4
|
||||||
; SI-NEXT: s_add_i32 s18, s1, s8
|
; SI-NEXT: v_fma_f32 v1, v4, s3, |v1|
|
||||||
; SI-NEXT: s_sub_i32 s19, s9, s1
|
; SI-NEXT: v_cvt_u32_f32_e32 v0, v0
|
||||||
; SI-NEXT: s_add_i32 s11, s1, s11
|
; SI-NEXT: v_xor_b32_e32 v4, v6, v3
|
||||||
; SI-NEXT: s_ashr_i32 s20, s16, 31
|
; SI-NEXT: v_cvt_u32_f32_e32 v1, v1
|
||||||
; SI-NEXT: s_lshl_b64 s[0:1], s[2:3], s0
|
; SI-NEXT: v_xor_b32_e32 v6, v2, v5
|
||||||
; SI-NEXT: s_lshr_b64 s[8:9], s[2:3], s13
|
; SI-NEXT: v_xor_b32_e32 v0, v0, v3
|
||||||
; SI-NEXT: v_mov_b32_e32 v0, s17
|
; SI-NEXT: v_xor_b32_e32 v1, v1, v5
|
||||||
; SI-NEXT: s_or_b32 s2, s15, s10
|
; SI-NEXT: v_sub_i32_e32 v2, vcc, v0, v3
|
||||||
; SI-NEXT: v_mov_b32_e32 v1, s20
|
; SI-NEXT: v_subb_u32_e32 v3, vcc, v4, v3, vcc
|
||||||
; SI-NEXT: v_mov_b32_e32 v2, s9
|
; SI-NEXT: v_sub_i32_e32 v0, vcc, v1, v5
|
||||||
; SI-NEXT: v_mov_b32_e32 v3, s1
|
; SI-NEXT: v_subb_u32_e32 v1, vcc, v6, v5, vcc
|
||||||
; SI-NEXT: v_cmp_gt_i32_e64 vcc, s12, 23
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v3, s8
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v4, s0
|
|
||||||
; SI-NEXT: s_lshl_b64 s[0:1], s[2:3], s18
|
|
||||||
; SI-NEXT: s_lshr_b64 s[2:3], s[2:3], s19
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v2, s17, v2
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v4, s3
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v5, s1
|
|
||||||
; SI-NEXT: v_cmp_gt_i32_e64 vcc, s11, 23
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v5, s2
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v6, s0
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v3, s14, v3
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v5, v5, v6, vcc
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v4, s20, v4
|
|
||||||
; SI-NEXT: v_subrev_i32_e32 v6, vcc, s14, v3
|
|
||||||
; SI-NEXT: v_subb_u32_e32 v0, vcc, v2, v0, vcc
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v5, s16, v5
|
|
||||||
; SI-NEXT: v_cmp_lt_i32_e64 s[0:1], s12, 0
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v3, v0, 0, s[0:1]
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v2, v6, 0, s[0:1]
|
|
||||||
; SI-NEXT: v_subrev_i32_e32 v0, vcc, s16, v5
|
|
||||||
; SI-NEXT: v_subb_u32_e32 v1, vcc, v4, v1, vcc
|
|
||||||
; SI-NEXT: v_cmp_lt_i32_e64 s[0:1], s11, 0
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v1, v1, 0, s[0:1]
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[0:1]
|
|
||||||
; SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0
|
; SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0
|
||||||
; SI-NEXT: s_endpgm
|
; SI-NEXT: s_endpgm
|
||||||
;
|
;
|
||||||
@ -387,65 +329,33 @@ define amdgpu_kernel void @fp_to_sint_v2i64(<2 x i64> addrspace(1)* %out, <2 x f
|
|||||||
; VI: ; %bb.0:
|
; VI: ; %bb.0:
|
||||||
; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
|
; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
|
||||||
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x2c
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x2c
|
||||||
; VI-NEXT: s_mov_b32 s14, 0x7fffff
|
; VI-NEXT: s_mov_b32 s2, 0x2f800000
|
||||||
; VI-NEXT: s_movk_i32 s12, 0xff6a
|
; VI-NEXT: s_mov_b32 s3, 0xcf800000
|
||||||
; VI-NEXT: s_mov_b32 s15, 0x800000
|
|
||||||
; VI-NEXT: s_movk_i32 s16, 0x96
|
|
||||||
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
||||||
; VI-NEXT: s_bfe_u32 s13, s1, 0x80017
|
|
||||||
; VI-NEXT: s_and_b32 s2, s1, s14
|
|
||||||
; VI-NEXT: s_add_i32 s8, s13, s12
|
|
||||||
; VI-NEXT: s_or_b32 s2, s2, s15
|
|
||||||
; VI-NEXT: s_mov_b32 s3, 0
|
|
||||||
; VI-NEXT: s_sub_i32 s10, s16, s13
|
|
||||||
; VI-NEXT: s_movk_i32 s17, 0xff81
|
|
||||||
; VI-NEXT: s_lshl_b64 s[8:9], s[2:3], s8
|
|
||||||
; VI-NEXT: s_lshr_b64 s[10:11], s[2:3], s10
|
|
||||||
; VI-NEXT: s_add_i32 s13, s13, s17
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v0, s11
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v1, s9
|
|
||||||
; VI-NEXT: v_cmp_gt_i32_e64 vcc, s13, 23
|
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v2, s8
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v1, s10
|
|
||||||
; VI-NEXT: s_ashr_i32 s1, s1, 31
|
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
|
|
||||||
; VI-NEXT: s_ashr_i32 s2, s1, 31
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v1, s1, v1
|
|
||||||
; VI-NEXT: v_subrev_u32_e32 v1, vcc, s1, v1
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v0, s2, v0
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v2, s2
|
|
||||||
; VI-NEXT: s_and_b32 s2, s0, s14
|
|
||||||
; VI-NEXT: s_bfe_u32 s1, s0, 0x80017
|
|
||||||
; VI-NEXT: v_subb_u32_e32 v0, vcc, v0, v2, vcc
|
|
||||||
; VI-NEXT: v_cmp_lt_i32_e64 s[8:9], s13, 0
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v3, v0, 0, s[8:9]
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v2, v1, 0, s[8:9]
|
|
||||||
; VI-NEXT: s_add_i32 s8, s1, s12
|
|
||||||
; VI-NEXT: s_or_b32 s2, s2, s15
|
|
||||||
; VI-NEXT: s_sub_i32 s10, s16, s1
|
|
||||||
; VI-NEXT: s_lshl_b64 s[8:9], s[2:3], s8
|
|
||||||
; VI-NEXT: s_lshr_b64 s[2:3], s[2:3], s10
|
|
||||||
; VI-NEXT: s_add_i32 s1, s1, s17
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v0, s3
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v1, s9
|
|
||||||
; VI-NEXT: v_cmp_gt_i32_e64 vcc, s1, 23
|
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v1, s2
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v4, s8
|
|
||||||
; VI-NEXT: s_ashr_i32 s0, s0, 31
|
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc
|
|
||||||
; VI-NEXT: s_ashr_i32 s2, s0, 31
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v1, s0, v1
|
|
||||||
; VI-NEXT: v_subrev_u32_e32 v5, vcc, s0, v1
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v0, s2, v0
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v4, s2
|
|
||||||
; VI-NEXT: v_subb_u32_e32 v0, vcc, v0, v4, vcc
|
|
||||||
; VI-NEXT: v_cmp_lt_i32_e64 s[0:1], s1, 0
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[0:1]
|
|
||||||
; VI-NEXT: s_mov_b32 s7, 0xf000
|
; VI-NEXT: s_mov_b32 s7, 0xf000
|
||||||
; VI-NEXT: s_mov_b32 s6, -1
|
; VI-NEXT: s_mov_b32 s6, -1
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v0, v5, 0, s[0:1]
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
||||||
|
; VI-NEXT: v_trunc_f32_e32 v0, s1
|
||||||
|
; VI-NEXT: v_mul_f32_e64 v1, |v0|, s2
|
||||||
|
; VI-NEXT: v_floor_f32_e32 v1, v1
|
||||||
|
; VI-NEXT: v_fma_f32 v2, v1, s3, |v0|
|
||||||
|
; VI-NEXT: v_trunc_f32_e32 v4, s0
|
||||||
|
; VI-NEXT: v_mul_f32_e64 v3, |v4|, s2
|
||||||
|
; VI-NEXT: v_cvt_u32_f32_e32 v2, v2
|
||||||
|
; VI-NEXT: v_floor_f32_e32 v3, v3
|
||||||
|
; VI-NEXT: v_cvt_u32_f32_e32 v1, v1
|
||||||
|
; VI-NEXT: v_cvt_u32_f32_e32 v5, v3
|
||||||
|
; VI-NEXT: v_fma_f32 v3, v3, s3, |v4|
|
||||||
|
; VI-NEXT: v_ashrrev_i32_e32 v0, 31, v0
|
||||||
|
; VI-NEXT: v_cvt_u32_f32_e32 v6, v3
|
||||||
|
; VI-NEXT: v_xor_b32_e32 v2, v2, v0
|
||||||
|
; VI-NEXT: v_xor_b32_e32 v1, v1, v0
|
||||||
|
; VI-NEXT: v_sub_u32_e32 v2, vcc, v2, v0
|
||||||
|
; VI-NEXT: v_subb_u32_e32 v3, vcc, v1, v0, vcc
|
||||||
|
; VI-NEXT: v_ashrrev_i32_e32 v1, 31, v4
|
||||||
|
; VI-NEXT: v_xor_b32_e32 v0, v6, v1
|
||||||
|
; VI-NEXT: v_xor_b32_e32 v4, v5, v1
|
||||||
|
; VI-NEXT: v_sub_u32_e32 v0, vcc, v0, v1
|
||||||
|
; VI-NEXT: v_subb_u32_e32 v1, vcc, v4, v1, vcc
|
||||||
; VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0
|
; VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0
|
||||||
; VI-NEXT: s_endpgm
|
; VI-NEXT: s_endpgm
|
||||||
;
|
;
|
||||||
@ -546,113 +456,53 @@ define amdgpu_kernel void @fp_to_sint_v4i64(<4 x i64> addrspace(1)* %out, <4 x f
|
|||||||
; SI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0xd
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0xd
|
||||||
; SI-NEXT: s_mov_b32 s7, 0xf000
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
||||||
; SI-NEXT: s_mov_b32 s6, -1
|
; SI-NEXT: s_mov_b32 s6, -1
|
||||||
; SI-NEXT: s_movk_i32 s10, 0xff6a
|
; SI-NEXT: s_mov_b32 s8, 0x2f800000
|
||||||
; SI-NEXT: s_mov_b32 s8, 0x7fffff
|
; SI-NEXT: s_mov_b32 s9, 0xcf800000
|
||||||
; SI-NEXT: s_mov_b32 s11, 0x800000
|
|
||||||
; SI-NEXT: s_mov_b32 s9, 0
|
|
||||||
; SI-NEXT: s_movk_i32 s12, 0x96
|
|
||||||
; SI-NEXT: s_movk_i32 s13, 0xff81
|
|
||||||
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
||||||
; SI-NEXT: s_bfe_u32 s14, s1, 0x80017
|
; SI-NEXT: v_trunc_f32_e32 v0, s1
|
||||||
; SI-NEXT: s_and_b32 s15, s1, s8
|
; SI-NEXT: v_trunc_f32_e32 v1, s0
|
||||||
; SI-NEXT: s_ashr_i32 s16, s1, 31
|
; SI-NEXT: v_trunc_f32_e32 v2, s3
|
||||||
; SI-NEXT: s_bfe_u32 s1, s0, 0x80017
|
; SI-NEXT: v_trunc_f32_e32 v3, s2
|
||||||
; SI-NEXT: s_and_b32 s17, s0, s8
|
; SI-NEXT: v_mul_f32_e64 v4, |v0|, s8
|
||||||
; SI-NEXT: s_ashr_i32 s18, s0, 31
|
; SI-NEXT: v_ashrrev_i32_e32 v5, 31, v0
|
||||||
; SI-NEXT: s_bfe_u32 s0, s3, 0x80017
|
; SI-NEXT: v_mul_f32_e64 v6, |v1|, s8
|
||||||
; SI-NEXT: s_and_b32 s19, s3, s8
|
; SI-NEXT: v_ashrrev_i32_e32 v7, 31, v1
|
||||||
; SI-NEXT: s_ashr_i32 s20, s3, 31
|
; SI-NEXT: v_mul_f32_e64 v8, |v2|, s8
|
||||||
; SI-NEXT: s_bfe_u32 s3, s2, 0x80017
|
; SI-NEXT: v_ashrrev_i32_e32 v9, 31, v2
|
||||||
; SI-NEXT: s_and_b32 s21, s2, s8
|
; SI-NEXT: v_mul_f32_e64 v10, |v3|, s8
|
||||||
; SI-NEXT: s_ashr_i32 s22, s2, 31
|
; SI-NEXT: v_ashrrev_i32_e32 v11, 31, v3
|
||||||
; SI-NEXT: s_add_i32 s2, s14, s10
|
; SI-NEXT: v_floor_f32_e32 v4, v4
|
||||||
; SI-NEXT: s_or_b32 s8, s15, s11
|
; SI-NEXT: v_floor_f32_e32 v6, v6
|
||||||
; SI-NEXT: s_sub_i32 s15, s12, s14
|
; SI-NEXT: v_floor_f32_e32 v8, v8
|
||||||
; SI-NEXT: s_add_i32 s14, s14, s13
|
; SI-NEXT: v_floor_f32_e32 v10, v10
|
||||||
; SI-NEXT: s_ashr_i32 s23, s16, 31
|
; SI-NEXT: v_cvt_u32_f32_e32 v12, v4
|
||||||
; SI-NEXT: s_add_i32 s24, s1, s10
|
; SI-NEXT: v_fma_f32 v0, v4, s9, |v0|
|
||||||
; SI-NEXT: s_sub_i32 s25, s12, s1
|
; SI-NEXT: v_cvt_u32_f32_e32 v4, v6
|
||||||
; SI-NEXT: s_add_i32 s26, s1, s13
|
; SI-NEXT: v_fma_f32 v1, v6, s9, |v1|
|
||||||
; SI-NEXT: s_ashr_i32 s27, s18, 31
|
; SI-NEXT: v_cvt_u32_f32_e32 v6, v8
|
||||||
; SI-NEXT: s_add_i32 s28, s0, s10
|
; SI-NEXT: v_fma_f32 v2, v8, s9, |v2|
|
||||||
; SI-NEXT: s_sub_i32 s29, s12, s0
|
; SI-NEXT: v_cvt_u32_f32_e32 v8, v10
|
||||||
; SI-NEXT: s_add_i32 s30, s0, s13
|
; SI-NEXT: v_fma_f32 v3, v10, s9, |v3|
|
||||||
; SI-NEXT: s_ashr_i32 s31, s20, 31
|
; SI-NEXT: v_cvt_u32_f32_e32 v0, v0
|
||||||
; SI-NEXT: s_add_i32 s10, s3, s10
|
; SI-NEXT: v_xor_b32_e32 v10, v12, v5
|
||||||
; SI-NEXT: s_sub_i32 s12, s12, s3
|
; SI-NEXT: v_cvt_u32_f32_e32 v1, v1
|
||||||
; SI-NEXT: s_add_i32 s13, s3, s13
|
; SI-NEXT: v_xor_b32_e32 v4, v4, v7
|
||||||
; SI-NEXT: s_ashr_i32 s33, s22, 31
|
; SI-NEXT: v_cvt_u32_f32_e32 v2, v2
|
||||||
; SI-NEXT: s_lshl_b64 s[0:1], s[8:9], s2
|
; SI-NEXT: v_xor_b32_e32 v12, v6, v9
|
||||||
; SI-NEXT: s_lshr_b64 s[2:3], s[8:9], s15
|
; SI-NEXT: v_cvt_u32_f32_e32 v3, v3
|
||||||
; SI-NEXT: v_mov_b32_e32 v0, s23
|
; SI-NEXT: v_xor_b32_e32 v8, v8, v11
|
||||||
; SI-NEXT: s_or_b32 s8, s17, s11
|
; SI-NEXT: v_xor_b32_e32 v0, v0, v5
|
||||||
; SI-NEXT: v_mov_b32_e32 v1, s27
|
; SI-NEXT: v_xor_b32_e32 v1, v1, v7
|
||||||
; SI-NEXT: v_mov_b32_e32 v4, s31
|
; SI-NEXT: v_xor_b32_e32 v6, v2, v9
|
||||||
; SI-NEXT: v_mov_b32_e32 v5, s33
|
; SI-NEXT: v_xor_b32_e32 v13, v3, v11
|
||||||
; SI-NEXT: v_mov_b32_e32 v2, s3
|
; SI-NEXT: v_sub_i32_e32 v2, vcc, v0, v5
|
||||||
; SI-NEXT: v_mov_b32_e32 v3, s1
|
; SI-NEXT: v_subb_u32_e32 v3, vcc, v10, v5, vcc
|
||||||
; SI-NEXT: v_cmp_gt_i32_e64 vcc, s14, 23
|
; SI-NEXT: v_sub_i32_e32 v0, vcc, v1, v7
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
|
; SI-NEXT: v_subb_u32_e32 v1, vcc, v4, v7, vcc
|
||||||
; SI-NEXT: v_mov_b32_e32 v3, s2
|
; SI-NEXT: v_sub_i32_e32 v6, vcc, v6, v9
|
||||||
; SI-NEXT: v_mov_b32_e32 v6, s0
|
; SI-NEXT: v_subb_u32_e32 v7, vcc, v12, v9, vcc
|
||||||
; SI-NEXT: s_lshl_b64 s[0:1], s[8:9], s24
|
; SI-NEXT: v_sub_i32_e32 v4, vcc, v13, v11
|
||||||
; SI-NEXT: s_lshr_b64 s[2:3], s[8:9], s25
|
; SI-NEXT: v_subb_u32_e32 v5, vcc, v8, v11, vcc
|
||||||
; SI-NEXT: s_or_b32 s8, s19, s11
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v3, v3, v6, vcc
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v2, s23, v2
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v6, s3
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v7, s1
|
|
||||||
; SI-NEXT: v_cmp_gt_i32_e64 vcc, s26, 23
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v7, s2
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v8, s0
|
|
||||||
; SI-NEXT: s_lshl_b64 s[0:1], s[8:9], s28
|
|
||||||
; SI-NEXT: s_lshr_b64 s[2:3], s[8:9], s29
|
|
||||||
; SI-NEXT: s_or_b32 s8, s21, s11
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v3, s16, v3
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v7, v7, v8, vcc
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v6, s27, v6
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v8, s3
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v9, s1
|
|
||||||
; SI-NEXT: v_cmp_gt_i32_e64 vcc, s30, 23
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v9, s2
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v10, s0
|
|
||||||
; SI-NEXT: s_lshl_b64 s[2:3], s[8:9], s10
|
|
||||||
; SI-NEXT: s_lshr_b64 s[8:9], s[8:9], s12
|
|
||||||
; SI-NEXT: v_subrev_i32_e64 v11, s[0:1], s16, v3
|
|
||||||
; SI-NEXT: v_subb_u32_e64 v0, s[0:1], v2, v0, s[0:1]
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v7, s18, v7
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v9, v9, v10, vcc
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v8, s31, v8
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v2, s9
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v3, s3
|
|
||||||
; SI-NEXT: v_cmp_gt_i32_e64 vcc, s13, 23
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v10, v2, v3, vcc
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v12, s8
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v13, s2
|
|
||||||
; SI-NEXT: v_cmp_lt_i32_e64 s[0:1], s14, 0
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v3, v0, 0, s[0:1]
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v2, v11, 0, s[0:1]
|
|
||||||
; SI-NEXT: v_subrev_i32_e64 v0, s[0:1], s18, v7
|
|
||||||
; SI-NEXT: v_subb_u32_e64 v1, s[0:1], v6, v1, s[0:1]
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v6, s20, v9
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v7, v12, v13, vcc
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v9, s33, v10
|
|
||||||
; SI-NEXT: v_cmp_lt_i32_e64 s[0:1], s26, 0
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v1, v1, 0, s[0:1]
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[0:1]
|
|
||||||
; SI-NEXT: v_subrev_i32_e32 v6, vcc, s20, v6
|
|
||||||
; SI-NEXT: v_subb_u32_e32 v4, vcc, v8, v4, vcc
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v8, s22, v7
|
|
||||||
; SI-NEXT: v_cmp_lt_i32_e64 s[0:1], s30, 0
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v7, v4, 0, s[0:1]
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v6, v6, 0, s[0:1]
|
|
||||||
; SI-NEXT: v_subrev_i32_e32 v4, vcc, s22, v8
|
|
||||||
; SI-NEXT: v_subb_u32_e32 v5, vcc, v9, v5, vcc
|
|
||||||
; SI-NEXT: v_cmp_lt_i32_e64 s[0:1], s13, 0
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v5, v5, 0, s[0:1]
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v4, v4, 0, s[0:1]
|
|
||||||
; SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[4:7], 0 offset:16
|
; SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[4:7], 0 offset:16
|
||||||
; SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0
|
; SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0
|
||||||
; SI-NEXT: s_endpgm
|
; SI-NEXT: s_endpgm
|
||||||
@ -661,115 +511,55 @@ define amdgpu_kernel void @fp_to_sint_v4i64(<4 x i64> addrspace(1)* %out, <4 x f
|
|||||||
; VI: ; %bb.0:
|
; VI: ; %bb.0:
|
||||||
; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
|
; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
|
||||||
; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x34
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x34
|
||||||
; VI-NEXT: s_mov_b32 s16, 0x7fffff
|
; VI-NEXT: s_mov_b32 s8, 0x2f800000
|
||||||
; VI-NEXT: s_movk_i32 s14, 0xff6a
|
; VI-NEXT: s_mov_b32 s9, 0xcf800000
|
||||||
; VI-NEXT: s_mov_b32 s17, 0x800000
|
|
||||||
; VI-NEXT: s_movk_i32 s18, 0x96
|
|
||||||
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
||||||
; VI-NEXT: s_bfe_u32 s15, s1, 0x80017
|
|
||||||
; VI-NEXT: s_and_b32 s8, s1, s16
|
|
||||||
; VI-NEXT: s_add_i32 s10, s15, s14
|
|
||||||
; VI-NEXT: s_or_b32 s8, s8, s17
|
|
||||||
; VI-NEXT: s_mov_b32 s9, 0
|
|
||||||
; VI-NEXT: s_sub_i32 s12, s18, s15
|
|
||||||
; VI-NEXT: s_movk_i32 s19, 0xff81
|
|
||||||
; VI-NEXT: s_lshl_b64 s[10:11], s[8:9], s10
|
|
||||||
; VI-NEXT: s_lshr_b64 s[12:13], s[8:9], s12
|
|
||||||
; VI-NEXT: s_add_i32 s15, s15, s19
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v0, s13
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v1, s11
|
|
||||||
; VI-NEXT: v_cmp_gt_i32_e64 vcc, s15, 23
|
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v2, s10
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v1, s12
|
|
||||||
; VI-NEXT: s_ashr_i32 s1, s1, 31
|
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
|
|
||||||
; VI-NEXT: s_ashr_i32 s8, s1, 31
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v1, s1, v1
|
|
||||||
; VI-NEXT: v_subrev_u32_e32 v1, vcc, s1, v1
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v0, s8, v0
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v2, s8
|
|
||||||
; VI-NEXT: s_and_b32 s8, s0, s16
|
|
||||||
; VI-NEXT: s_bfe_u32 s1, s0, 0x80017
|
|
||||||
; VI-NEXT: v_subb_u32_e32 v0, vcc, v0, v2, vcc
|
|
||||||
; VI-NEXT: v_cmp_lt_i32_e64 s[10:11], s15, 0
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v3, v0, 0, s[10:11]
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v2, v1, 0, s[10:11]
|
|
||||||
; VI-NEXT: s_add_i32 s10, s1, s14
|
|
||||||
; VI-NEXT: s_or_b32 s8, s8, s17
|
|
||||||
; VI-NEXT: s_sub_i32 s12, s18, s1
|
|
||||||
; VI-NEXT: s_lshl_b64 s[10:11], s[8:9], s10
|
|
||||||
; VI-NEXT: s_lshr_b64 s[12:13], s[8:9], s12
|
|
||||||
; VI-NEXT: s_add_i32 s1, s1, s19
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v0, s13
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v1, s11
|
|
||||||
; VI-NEXT: v_cmp_gt_i32_e64 vcc, s1, 23
|
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v1, s12
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v4, s10
|
|
||||||
; VI-NEXT: s_ashr_i32 s0, s0, 31
|
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc
|
|
||||||
; VI-NEXT: s_ashr_i32 s8, s0, 31
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v1, s0, v1
|
|
||||||
; VI-NEXT: v_subrev_u32_e32 v5, vcc, s0, v1
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v0, s8, v0
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v4, s8
|
|
||||||
; VI-NEXT: v_subb_u32_e32 v0, vcc, v0, v4, vcc
|
|
||||||
; VI-NEXT: v_cmp_lt_i32_e64 s[0:1], s1, 0
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v1, v0, 0, s[0:1]
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v0, v5, 0, s[0:1]
|
|
||||||
; VI-NEXT: s_bfe_u32 s12, s3, 0x80017
|
|
||||||
; VI-NEXT: s_and_b32 s1, s3, s16
|
|
||||||
; VI-NEXT: s_add_i32 s0, s12, s14
|
|
||||||
; VI-NEXT: s_or_b32 s8, s1, s17
|
|
||||||
; VI-NEXT: s_sub_i32 s10, s18, s12
|
|
||||||
; VI-NEXT: s_lshl_b64 s[0:1], s[8:9], s0
|
|
||||||
; VI-NEXT: s_lshr_b64 s[10:11], s[8:9], s10
|
|
||||||
; VI-NEXT: s_add_i32 s12, s12, s19
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v4, s11
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v5, s1
|
|
||||||
; VI-NEXT: v_cmp_gt_i32_e64 vcc, s12, 23
|
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v6, s0
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v5, s10
|
|
||||||
; VI-NEXT: s_ashr_i32 s0, s3, 31
|
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v5, v5, v6, vcc
|
|
||||||
; VI-NEXT: s_ashr_i32 s1, s0, 31
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v5, s0, v5
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v4, s1, v4
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v6, s1
|
|
||||||
; VI-NEXT: v_subrev_u32_e32 v5, vcc, s0, v5
|
|
||||||
; VI-NEXT: v_subb_u32_e32 v4, vcc, v4, v6, vcc
|
|
||||||
; VI-NEXT: v_cmp_lt_i32_e64 s[0:1], s12, 0
|
|
||||||
; VI-NEXT: s_bfe_u32 s3, s2, 0x80017
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v7, v4, 0, s[0:1]
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v6, v5, 0, s[0:1]
|
|
||||||
; VI-NEXT: s_and_b32 s1, s2, s16
|
|
||||||
; VI-NEXT: s_add_i32 s0, s3, s14
|
|
||||||
; VI-NEXT: s_or_b32 s8, s1, s17
|
|
||||||
; VI-NEXT: s_sub_i32 s10, s18, s3
|
|
||||||
; VI-NEXT: s_lshl_b64 s[0:1], s[8:9], s0
|
|
||||||
; VI-NEXT: s_lshr_b64 s[8:9], s[8:9], s10
|
|
||||||
; VI-NEXT: s_add_i32 s3, s3, s19
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v4, s9
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v5, s1
|
|
||||||
; VI-NEXT: v_cmp_gt_i32_e64 vcc, s3, 23
|
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v8, s0
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v5, s8
|
|
||||||
; VI-NEXT: s_ashr_i32 s0, s2, 31
|
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v5, v5, v8, vcc
|
|
||||||
; VI-NEXT: s_ashr_i32 s1, s0, 31
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v5, s0, v5
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v4, s1, v4
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v8, s1
|
|
||||||
; VI-NEXT: v_subrev_u32_e32 v9, vcc, s0, v5
|
|
||||||
; VI-NEXT: v_subb_u32_e32 v4, vcc, v4, v8, vcc
|
|
||||||
; VI-NEXT: v_cmp_lt_i32_e64 s[0:1], s3, 0
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v5, v4, 0, s[0:1]
|
|
||||||
; VI-NEXT: s_mov_b32 s7, 0xf000
|
; VI-NEXT: s_mov_b32 s7, 0xf000
|
||||||
; VI-NEXT: s_mov_b32 s6, -1
|
; VI-NEXT: s_mov_b32 s6, -1
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v4, v9, 0, s[0:1]
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
||||||
|
; VI-NEXT: v_trunc_f32_e32 v0, s1
|
||||||
|
; VI-NEXT: v_mul_f32_e64 v1, |v0|, s8
|
||||||
|
; VI-NEXT: v_floor_f32_e32 v1, v1
|
||||||
|
; VI-NEXT: v_fma_f32 v2, v1, s9, |v0|
|
||||||
|
; VI-NEXT: v_trunc_f32_e32 v4, s0
|
||||||
|
; VI-NEXT: v_cvt_u32_f32_e32 v2, v2
|
||||||
|
; VI-NEXT: v_mul_f32_e64 v3, |v4|, s8
|
||||||
|
; VI-NEXT: v_cvt_u32_f32_e32 v1, v1
|
||||||
|
; VI-NEXT: v_floor_f32_e32 v3, v3
|
||||||
|
; VI-NEXT: v_ashrrev_i32_e32 v0, 31, v0
|
||||||
|
; VI-NEXT: v_cvt_u32_f32_e32 v5, v3
|
||||||
|
; VI-NEXT: v_fma_f32 v3, v3, s9, |v4|
|
||||||
|
; VI-NEXT: v_xor_b32_e32 v2, v2, v0
|
||||||
|
; VI-NEXT: v_cvt_u32_f32_e32 v6, v3
|
||||||
|
; VI-NEXT: v_xor_b32_e32 v1, v1, v0
|
||||||
|
; VI-NEXT: v_sub_u32_e32 v2, vcc, v2, v0
|
||||||
|
; VI-NEXT: v_subb_u32_e32 v3, vcc, v1, v0, vcc
|
||||||
|
; VI-NEXT: v_ashrrev_i32_e32 v1, 31, v4
|
||||||
|
; VI-NEXT: v_xor_b32_e32 v4, v5, v1
|
||||||
|
; VI-NEXT: v_trunc_f32_e32 v5, s3
|
||||||
|
; VI-NEXT: v_xor_b32_e32 v0, v6, v1
|
||||||
|
; VI-NEXT: v_mul_f32_e64 v6, |v5|, s8
|
||||||
|
; VI-NEXT: v_floor_f32_e32 v6, v6
|
||||||
|
; VI-NEXT: v_cvt_u32_f32_e32 v7, v6
|
||||||
|
; VI-NEXT: v_fma_f32 v6, v6, s9, |v5|
|
||||||
|
; VI-NEXT: v_cvt_u32_f32_e32 v6, v6
|
||||||
|
; VI-NEXT: v_sub_u32_e32 v0, vcc, v0, v1
|
||||||
|
; VI-NEXT: v_subb_u32_e32 v1, vcc, v4, v1, vcc
|
||||||
|
; VI-NEXT: v_ashrrev_i32_e32 v4, 31, v5
|
||||||
|
; VI-NEXT: v_trunc_f32_e32 v8, s2
|
||||||
|
; VI-NEXT: v_xor_b32_e32 v5, v6, v4
|
||||||
|
; VI-NEXT: v_mul_f32_e64 v6, |v8|, s8
|
||||||
|
; VI-NEXT: v_floor_f32_e32 v6, v6
|
||||||
|
; VI-NEXT: v_cvt_u32_f32_e32 v9, v6
|
||||||
|
; VI-NEXT: v_fma_f32 v6, v6, s9, |v8|
|
||||||
|
; VI-NEXT: v_cvt_u32_f32_e32 v10, v6
|
||||||
|
; VI-NEXT: v_sub_u32_e32 v6, vcc, v5, v4
|
||||||
|
; VI-NEXT: v_xor_b32_e32 v7, v7, v4
|
||||||
|
; VI-NEXT: v_ashrrev_i32_e32 v5, 31, v8
|
||||||
|
; VI-NEXT: v_subb_u32_e32 v7, vcc, v7, v4, vcc
|
||||||
|
; VI-NEXT: v_xor_b32_e32 v4, v10, v5
|
||||||
|
; VI-NEXT: v_xor_b32_e32 v8, v9, v5
|
||||||
|
; VI-NEXT: v_sub_u32_e32 v4, vcc, v4, v5
|
||||||
|
; VI-NEXT: v_subb_u32_e32 v5, vcc, v8, v5, vcc
|
||||||
; VI-NEXT: buffer_store_dwordx4 v[4:7], off, s[4:7], 0 offset:16
|
; VI-NEXT: buffer_store_dwordx4 v[4:7], off, s[4:7], 0 offset:16
|
||||||
; VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0
|
; VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0
|
||||||
; VI-NEXT: s_endpgm
|
; VI-NEXT: s_endpgm
|
||||||
|
@ -151,137 +151,35 @@ define amdgpu_kernel void @fp_to_uint_f32_to_i64(i64 addrspace(1)* %out, float %
|
|||||||
; SI-LABEL: fp_to_uint_f32_to_i64:
|
; SI-LABEL: fp_to_uint_f32_to_i64:
|
||||||
; SI: ; %bb.0:
|
; SI: ; %bb.0:
|
||||||
; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
|
; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
|
||||||
; SI-NEXT: s_load_dword s8, s[0:1], 0xb
|
; SI-NEXT: s_load_dword s0, s[0:1], 0xb
|
||||||
; SI-NEXT: s_mov_b32 s7, 0xf000
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
||||||
; SI-NEXT: s_mov_b32 s6, -1
|
; SI-NEXT: s_mov_b32 s6, -1
|
||||||
; SI-NEXT: s_movk_i32 s9, 0xff6a
|
; SI-NEXT: s_mov_b32 s1, 0xcf800000
|
||||||
; SI-NEXT: s_mov_b32 s2, 0x7fffff
|
|
||||||
; SI-NEXT: s_mov_b32 s10, 0x800000
|
|
||||||
; SI-NEXT: s_mov_b32 s1, 0
|
|
||||||
; SI-NEXT: s_movk_i32 s11, 0x96
|
|
||||||
; SI-NEXT: s_movk_i32 s12, 0xff81
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v4, 0x5f000000
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v1, 0
|
|
||||||
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
||||||
; SI-NEXT: s_bfe_u32 s3, s8, 0x80017
|
; SI-NEXT: v_trunc_f32_e32 v0, s0
|
||||||
; SI-NEXT: s_and_b32 s0, s8, s2
|
; SI-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0
|
||||||
; SI-NEXT: s_ashr_i32 s13, s8, 31
|
; SI-NEXT: v_floor_f32_e32 v2, v1
|
||||||
; SI-NEXT: v_sub_f32_e32 v0, s8, v4
|
; SI-NEXT: v_cvt_u32_f32_e32 v1, v2
|
||||||
; SI-NEXT: s_add_i32 s14, s3, s9
|
; SI-NEXT: v_fma_f32 v0, v2, s1, v0
|
||||||
; SI-NEXT: s_or_b32 s0, s0, s10
|
; SI-NEXT: v_cvt_u32_f32_e32 v0, v0
|
||||||
; SI-NEXT: s_sub_i32 s15, s11, s3
|
|
||||||
; SI-NEXT: s_add_i32 s16, s3, s12
|
|
||||||
; SI-NEXT: s_ashr_i32 s17, s13, 31
|
|
||||||
; SI-NEXT: v_bfe_u32 v2, v0, 23, 8
|
|
||||||
; SI-NEXT: v_and_b32_e32 v3, s2, v0
|
|
||||||
; SI-NEXT: v_ashrrev_i32_e32 v5, 31, v0
|
|
||||||
; SI-NEXT: s_lshl_b64 s[2:3], s[0:1], s14
|
|
||||||
; SI-NEXT: s_lshr_b64 s[0:1], s[0:1], s15
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v6, s17
|
|
||||||
; SI-NEXT: v_add_i32_e32 v7, vcc, s9, v2
|
|
||||||
; SI-NEXT: v_or_b32_e32 v0, s10, v3
|
|
||||||
; SI-NEXT: v_sub_i32_e32 v8, vcc, s11, v2
|
|
||||||
; SI-NEXT: v_add_i32_e32 v9, vcc, s12, v2
|
|
||||||
; SI-NEXT: v_ashrrev_i32_e32 v10, 31, v5
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v2, s1
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v3, s3
|
|
||||||
; SI-NEXT: v_cmp_gt_i32_e64 vcc, s16, 23
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v11, v2, v3, vcc
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v12, s0
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v13, s2
|
|
||||||
; SI-NEXT: v_lshl_b64 v[2:3], v[0:1], v7
|
|
||||||
; SI-NEXT: v_lshr_b64 v[0:1], v[0:1], v8
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v7, v12, v13, vcc
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v8, s17, v11
|
|
||||||
; SI-NEXT: v_cmp_lt_i32_e32 vcc, 23, v9
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v2, s13, v7
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v0, v0, v5
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v1, v1, v10
|
|
||||||
; SI-NEXT: v_subrev_i32_e32 v2, vcc, s13, v2
|
|
||||||
; SI-NEXT: v_subb_u32_e32 v3, vcc, v8, v6, vcc
|
|
||||||
; SI-NEXT: v_sub_i32_e32 v0, vcc, v0, v5
|
|
||||||
; SI-NEXT: v_subb_u32_e32 v1, vcc, v1, v10, vcc
|
|
||||||
; SI-NEXT: v_cmp_lt_i32_e64 s[0:1], s16, 0
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v2, v2, 0, s[0:1]
|
|
||||||
; SI-NEXT: v_cmp_gt_i32_e32 vcc, 0, v9
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v3, v3, 0, s[0:1]
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
|
|
||||||
; SI-NEXT: v_cmp_lt_f32_e32 vcc, s8, v4
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
|
|
||||||
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
|
||||||
; SI-NEXT: s_endpgm
|
; SI-NEXT: s_endpgm
|
||||||
;
|
;
|
||||||
; VI-LABEL: fp_to_uint_f32_to_i64:
|
; VI-LABEL: fp_to_uint_f32_to_i64:
|
||||||
; VI: ; %bb.0:
|
; VI: ; %bb.0:
|
||||||
; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
|
; VI-NEXT: s_load_dword s2, s[0:1], 0x2c
|
||||||
; VI-NEXT: s_load_dword s9, s[0:1], 0x2c
|
; VI-NEXT: s_mov_b32 s3, 0xcf800000
|
||||||
; VI-NEXT: s_mov_b32 s11, 0x7fffff
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
||||||
; VI-NEXT: s_movk_i32 s8, 0xff6a
|
|
||||||
; VI-NEXT: s_mov_b32 s12, 0x800000
|
|
||||||
; VI-NEXT: s_movk_i32 s13, 0x96
|
|
||||||
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
||||||
; VI-NEXT: s_bfe_u32 s10, s9, 0x80017
|
; VI-NEXT: v_trunc_f32_e32 v0, s2
|
||||||
; VI-NEXT: s_and_b32 s0, s9, s11
|
; VI-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0
|
||||||
; VI-NEXT: s_add_i32 s2, s10, s8
|
; VI-NEXT: v_floor_f32_e32 v2, v1
|
||||||
; VI-NEXT: s_or_b32 s0, s0, s12
|
; VI-NEXT: v_fma_f32 v0, v2, s3, v0
|
||||||
; VI-NEXT: s_mov_b32 s1, 0
|
; VI-NEXT: v_cvt_u32_f32_e32 v1, v2
|
||||||
; VI-NEXT: s_sub_i32 s14, s13, s10
|
; VI-NEXT: v_cvt_u32_f32_e32 v0, v0
|
||||||
; VI-NEXT: s_lshl_b64 s[2:3], s[0:1], s2
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
||||||
; VI-NEXT: s_lshr_b64 s[0:1], s[0:1], s14
|
; VI-NEXT: s_mov_b32 s2, -1
|
||||||
; VI-NEXT: s_movk_i32 s14, 0xff81
|
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
||||||
; VI-NEXT: s_add_i32 s10, s10, s14
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v0, s1
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v1, s3
|
|
||||||
; VI-NEXT: v_cmp_gt_i32_e64 vcc, s10, 23
|
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v1, s0
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v2, s2
|
|
||||||
; VI-NEXT: s_ashr_i32 s0, s9, 31
|
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
|
|
||||||
; VI-NEXT: s_ashr_i32 s1, s0, 31
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v1, s0, v1
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v6, 0x5f000000
|
|
||||||
; VI-NEXT: v_sub_f32_e32 v7, s9, v6
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v0, s1, v0
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v2, s1
|
|
||||||
; VI-NEXT: v_subrev_u32_e32 v1, vcc, s0, v1
|
|
||||||
; VI-NEXT: v_subb_u32_e32 v4, vcc, v0, v2, vcc
|
|
||||||
; VI-NEXT: v_bfe_u32 v8, v7, 23, 8
|
|
||||||
; VI-NEXT: v_and_b32_e32 v0, s11, v7
|
|
||||||
; VI-NEXT: v_cmp_lt_i32_e64 s[2:3], s10, 0
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v5, v1, 0, s[2:3]
|
|
||||||
; VI-NEXT: v_add_u32_e32 v2, vcc, s8, v8
|
|
||||||
; VI-NEXT: v_sub_u32_e32 v9, vcc, s13, v8
|
|
||||||
; VI-NEXT: v_or_b32_e32 v0, s12, v0
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v1, 0
|
|
||||||
; VI-NEXT: v_add_u32_e32 v8, vcc, s14, v8
|
|
||||||
; VI-NEXT: v_lshlrev_b64 v[2:3], v2, v[0:1]
|
|
||||||
; VI-NEXT: v_lshrrev_b64 v[0:1], v9, v[0:1]
|
|
||||||
; VI-NEXT: v_cmp_lt_i32_e32 vcc, 23, v8
|
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
|
|
||||||
; VI-NEXT: v_ashrrev_i32_e32 v2, 31, v7
|
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v0, v0, v2
|
|
||||||
; VI-NEXT: v_ashrrev_i32_e32 v3, 31, v2
|
|
||||||
; VI-NEXT: v_sub_u32_e32 v0, vcc, v0, v2
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v1, v1, v3
|
|
||||||
; VI-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc
|
|
||||||
; VI-NEXT: v_cmp_gt_i32_e32 vcc, 0, v8
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
|
|
||||||
; VI-NEXT: v_cmp_lt_f32_e64 s[0:1], s9, v6
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v2, v4, 0, s[2:3]
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
|
|
||||||
; VI-NEXT: s_mov_b32 s7, 0xf000
|
|
||||||
; VI-NEXT: s_mov_b32 s6, -1
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v0, v0, v5, s[0:1]
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v1, v1, v2, s[0:1]
|
|
||||||
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
|
|
||||||
; VI-NEXT: s_endpgm
|
; VI-NEXT: s_endpgm
|
||||||
;
|
;
|
||||||
; EG-LABEL: fp_to_uint_f32_to_i64:
|
; EG-LABEL: fp_to_uint_f32_to_i64:
|
||||||
@ -342,239 +240,49 @@ define amdgpu_kernel void @fp_to_uint_v2f32_to_v2i64(<2 x i64> addrspace(1)* %ou
|
|||||||
; SI-LABEL: fp_to_uint_v2f32_to_v2i64:
|
; SI-LABEL: fp_to_uint_v2f32_to_v2i64:
|
||||||
; SI: ; %bb.0:
|
; SI: ; %bb.0:
|
||||||
; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
|
; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
|
||||||
; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xb
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xb
|
||||||
; SI-NEXT: s_mov_b32 s7, 0xf000
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
||||||
; SI-NEXT: s_mov_b32 s6, -1
|
; SI-NEXT: s_mov_b32 s6, -1
|
||||||
; SI-NEXT: s_movk_i32 s12, 0xff6a
|
; SI-NEXT: s_mov_b32 s2, 0x2f800000
|
||||||
; SI-NEXT: s_mov_b32 s8, 0x7fffff
|
; SI-NEXT: s_mov_b32 s3, 0xcf800000
|
||||||
; SI-NEXT: s_mov_b32 s13, 0x800000
|
|
||||||
; SI-NEXT: s_mov_b32 s1, 0
|
|
||||||
; SI-NEXT: s_movk_i32 s14, 0x96
|
|
||||||
; SI-NEXT: s_movk_i32 s15, 0xff81
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v6, 0x5f000000
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v1, 0
|
|
||||||
; SI-NEXT: s_brev_b32 s16, 1
|
|
||||||
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
||||||
; SI-NEXT: s_bfe_u32 s9, s3, 0x80017
|
; SI-NEXT: v_trunc_f32_e32 v0, s1
|
||||||
; SI-NEXT: s_and_b32 s0, s3, s8
|
; SI-NEXT: v_trunc_f32_e32 v2, s0
|
||||||
; SI-NEXT: s_ashr_i32 s17, s3, 31
|
; SI-NEXT: v_mul_f32_e32 v1, s2, v0
|
||||||
; SI-NEXT: v_sub_f32_e32 v0, s3, v6
|
; SI-NEXT: v_mul_f32_e32 v3, s2, v2
|
||||||
; SI-NEXT: s_bfe_u32 s10, s2, 0x80017
|
; SI-NEXT: v_floor_f32_e32 v4, v1
|
||||||
; SI-NEXT: s_and_b32 s18, s2, s8
|
; SI-NEXT: v_floor_f32_e32 v5, v3
|
||||||
; SI-NEXT: s_ashr_i32 s19, s2, 31
|
; SI-NEXT: v_cvt_u32_f32_e32 v3, v4
|
||||||
; SI-NEXT: v_sub_f32_e32 v2, s2, v6
|
; SI-NEXT: v_cvt_u32_f32_e32 v1, v5
|
||||||
; SI-NEXT: s_add_i32 s11, s9, s12
|
; SI-NEXT: v_fma_f32 v0, v4, s3, v0
|
||||||
; SI-NEXT: s_or_b32 s0, s0, s13
|
; SI-NEXT: v_fma_f32 v4, v5, s3, v2
|
||||||
; SI-NEXT: s_sub_i32 s20, s14, s9
|
; SI-NEXT: v_cvt_u32_f32_e32 v2, v0
|
||||||
; SI-NEXT: s_add_i32 s21, s9, s15
|
; SI-NEXT: v_cvt_u32_f32_e32 v0, v4
|
||||||
; SI-NEXT: s_ashr_i32 s22, s17, 31
|
|
||||||
; SI-NEXT: v_bfe_u32 v3, v0, 23, 8
|
|
||||||
; SI-NEXT: v_and_b32_e32 v4, s8, v0
|
|
||||||
; SI-NEXT: v_ashrrev_i32_e32 v7, 31, v0
|
|
||||||
; SI-NEXT: s_add_i32 s23, s10, s12
|
|
||||||
; SI-NEXT: s_sub_i32 s24, s14, s10
|
|
||||||
; SI-NEXT: s_add_i32 s25, s10, s15
|
|
||||||
; SI-NEXT: s_ashr_i32 s26, s19, 31
|
|
||||||
; SI-NEXT: v_bfe_u32 v5, v2, 23, 8
|
|
||||||
; SI-NEXT: v_and_b32_e32 v8, s8, v2
|
|
||||||
; SI-NEXT: v_ashrrev_i32_e32 v9, 31, v2
|
|
||||||
; SI-NEXT: s_lshl_b64 s[8:9], s[0:1], s11
|
|
||||||
; SI-NEXT: s_lshr_b64 s[10:11], s[0:1], s20
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v10, s22
|
|
||||||
; SI-NEXT: v_add_i32_e32 v2, vcc, s12, v3
|
|
||||||
; SI-NEXT: v_or_b32_e32 v0, s13, v4
|
|
||||||
; SI-NEXT: v_sub_i32_e32 v4, vcc, s14, v3
|
|
||||||
; SI-NEXT: v_add_i32_e32 v11, vcc, s15, v3
|
|
||||||
; SI-NEXT: v_ashrrev_i32_e32 v12, 31, v7
|
|
||||||
; SI-NEXT: s_or_b32 s0, s18, s13
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v13, s26
|
|
||||||
; SI-NEXT: v_add_i32_e32 v14, vcc, s12, v5
|
|
||||||
; SI-NEXT: v_sub_i32_e32 v15, vcc, s14, v5
|
|
||||||
; SI-NEXT: v_add_i32_e32 v16, vcc, s15, v5
|
|
||||||
; SI-NEXT: v_ashrrev_i32_e32 v17, 31, v9
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v3, s11
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v5, s9
|
|
||||||
; SI-NEXT: v_cmp_gt_i32_e64 vcc, s21, 23
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v18, v3, v5, vcc
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v19, s10
|
|
||||||
; SI-NEXT: v_lshl_b64 v[2:3], v[0:1], v2
|
|
||||||
; SI-NEXT: v_lshr_b64 v[4:5], v[0:1], v4
|
|
||||||
; SI-NEXT: v_or_b32_e32 v0, s13, v8
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v8, s8
|
|
||||||
; SI-NEXT: s_lshl_b64 s[8:9], s[0:1], s23
|
|
||||||
; SI-NEXT: s_lshr_b64 s[0:1], s[0:1], s24
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v8, v19, v8, vcc
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v18, s22, v18
|
|
||||||
; SI-NEXT: v_cmp_lt_i32_e32 vcc, 23, v11
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v5, v5, v3, vcc
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v4, v4, v2, vcc
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v2, s1
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v3, s9
|
|
||||||
; SI-NEXT: v_cmp_gt_i32_e64 vcc, s25, 23
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v19, v2, v3, vcc
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v20, s0
|
|
||||||
; SI-NEXT: v_lshl_b64 v[2:3], v[0:1], v14
|
|
||||||
; SI-NEXT: v_lshr_b64 v[0:1], v[0:1], v15
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v14, s8
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v8, s17, v8
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v4, v4, v7
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v5, v5, v12
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v14, v20, v14, vcc
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v15, s26, v19
|
|
||||||
; SI-NEXT: v_cmp_lt_i32_e32 vcc, 23, v16
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
|
|
||||||
; SI-NEXT: v_subrev_i32_e32 v2, vcc, s17, v8
|
|
||||||
; SI-NEXT: v_subb_u32_e32 v3, vcc, v18, v10, vcc
|
|
||||||
; SI-NEXT: v_sub_i32_e32 v4, vcc, v4, v7
|
|
||||||
; SI-NEXT: v_subb_u32_e32 v5, vcc, v5, v12, vcc
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v7, s19, v14
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v0, v0, v9
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v1, v1, v17
|
|
||||||
; SI-NEXT: v_cmp_lt_i32_e64 s[8:9], s21, 0
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v2, v2, 0, s[8:9]
|
|
||||||
; SI-NEXT: v_cmp_gt_i32_e32 vcc, 0, v11
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v4, v4, 0, vcc
|
|
||||||
; SI-NEXT: v_subrev_i32_e64 v7, s[0:1], s19, v7
|
|
||||||
; SI-NEXT: v_subb_u32_e64 v8, s[0:1], v15, v13, s[0:1]
|
|
||||||
; SI-NEXT: v_sub_i32_e64 v0, s[0:1], v0, v9
|
|
||||||
; SI-NEXT: v_subb_u32_e64 v1, s[0:1], v1, v17, s[0:1]
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v3, v3, 0, s[8:9]
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v5, v5, 0, vcc
|
|
||||||
; SI-NEXT: v_cmp_lt_f32_e32 vcc, s3, v6
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
|
|
||||||
; SI-NEXT: v_cmp_lt_i32_e64 s[8:9], s25, 0
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v4, v7, 0, s[8:9]
|
|
||||||
; SI-NEXT: v_cmp_gt_i32_e64 s[0:1], 0, v16
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[0:1]
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v5, s16, v5
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v7, v8, 0, s[8:9]
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v1, v1, 0, s[0:1]
|
|
||||||
; SI-NEXT: v_cmp_lt_f32_e64 s[0:1], s2, v6
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v0, v0, v4, s[0:1]
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v1, s16, v1
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v1, v1, v7, s[0:1]
|
|
||||||
; SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0
|
; SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0
|
||||||
; SI-NEXT: s_endpgm
|
; SI-NEXT: s_endpgm
|
||||||
;
|
;
|
||||||
; VI-LABEL: fp_to_uint_v2f32_to_v2i64:
|
; VI-LABEL: fp_to_uint_v2f32_to_v2i64:
|
||||||
; VI: ; %bb.0:
|
; VI: ; %bb.0:
|
||||||
; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x24
|
; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x2c
|
||||||
; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x2c
|
; VI-NEXT: s_mov_b32 s4, 0x2f800000
|
||||||
; VI-NEXT: s_mov_b32 s17, 0x7fffff
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
||||||
; VI-NEXT: s_movk_i32 s16, 0xff6a
|
|
||||||
; VI-NEXT: s_mov_b32 s18, 0x800000
|
|
||||||
; VI-NEXT: s_movk_i32 s19, 0x96
|
|
||||||
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
||||||
; VI-NEXT: s_bfe_u32 s12, s5, 0x80017
|
; VI-NEXT: v_trunc_f32_e32 v0, s3
|
||||||
; VI-NEXT: s_and_b32 s1, s5, s17
|
; VI-NEXT: v_trunc_f32_e32 v4, s2
|
||||||
; VI-NEXT: s_add_i32 s0, s12, s16
|
; VI-NEXT: v_mul_f32_e32 v1, s4, v0
|
||||||
; VI-NEXT: s_or_b32 s6, s1, s18
|
; VI-NEXT: v_mul_f32_e32 v2, s4, v4
|
||||||
; VI-NEXT: s_mov_b32 s7, 0
|
; VI-NEXT: v_floor_f32_e32 v5, v1
|
||||||
; VI-NEXT: s_sub_i32 s2, s19, s12
|
; VI-NEXT: s_mov_b32 s2, 0xcf800000
|
||||||
; VI-NEXT: s_movk_i32 s20, 0xff81
|
; VI-NEXT: v_floor_f32_e32 v6, v2
|
||||||
; VI-NEXT: s_lshl_b64 s[0:1], s[6:7], s0
|
; VI-NEXT: v_fma_f32 v0, v5, s2, v0
|
||||||
; VI-NEXT: s_lshr_b64 s[2:3], s[6:7], s2
|
; VI-NEXT: v_cvt_u32_f32_e32 v2, v0
|
||||||
; VI-NEXT: s_add_i32 s12, s12, s20
|
; VI-NEXT: v_fma_f32 v0, v6, s2, v4
|
||||||
; VI-NEXT: v_cmp_gt_i32_e64 vcc, s12, 23
|
; VI-NEXT: v_cvt_u32_f32_e32 v3, v5
|
||||||
; VI-NEXT: v_mov_b32_e32 v0, s3
|
; VI-NEXT: v_cvt_u32_f32_e32 v1, v6
|
||||||
; VI-NEXT: v_mov_b32_e32 v1, s1
|
; VI-NEXT: v_cvt_u32_f32_e32 v0, v0
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
||||||
; VI-NEXT: v_mov_b32_e32 v2, s0
|
; VI-NEXT: s_mov_b32 s2, -1
|
||||||
; VI-NEXT: v_mov_b32_e32 v1, s2
|
; VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
|
||||||
; VI-NEXT: s_ashr_i32 s0, s5, 31
|
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
|
|
||||||
; VI-NEXT: s_ashr_i32 s1, s0, 31
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v1, s0, v1
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v8, 0x5f000000
|
|
||||||
; VI-NEXT: v_sub_f32_e32 v9, s5, v8
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v0, s1, v0
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v2, s1
|
|
||||||
; VI-NEXT: v_subrev_u32_e32 v1, vcc, s0, v1
|
|
||||||
; VI-NEXT: v_subb_u32_e32 v6, vcc, v0, v2, vcc
|
|
||||||
; VI-NEXT: v_bfe_u32 v10, v9, 23, 8
|
|
||||||
; VI-NEXT: v_and_b32_e32 v0, s17, v9
|
|
||||||
; VI-NEXT: v_cmp_lt_i32_e64 s[12:13], s12, 0
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v7, v1, 0, s[12:13]
|
|
||||||
; VI-NEXT: v_add_u32_e32 v2, vcc, s16, v10
|
|
||||||
; VI-NEXT: v_or_b32_e32 v0, s18, v0
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v1, 0
|
|
||||||
; VI-NEXT: v_sub_u32_e32 v4, vcc, s19, v10
|
|
||||||
; VI-NEXT: v_lshlrev_b64 v[2:3], v2, v[0:1]
|
|
||||||
; VI-NEXT: v_lshrrev_b64 v[4:5], v4, v[0:1]
|
|
||||||
; VI-NEXT: v_add_u32_e32 v0, vcc, s20, v10
|
|
||||||
; VI-NEXT: v_cmp_lt_i32_e32 vcc, 23, v0
|
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
|
|
||||||
; VI-NEXT: v_ashrrev_i32_e32 v4, 31, v9
|
|
||||||
; VI-NEXT: s_and_b32 s6, s4, s17
|
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v2, v2, v4
|
|
||||||
; VI-NEXT: v_ashrrev_i32_e32 v5, 31, v4
|
|
||||||
; VI-NEXT: v_cmp_lt_f32_e64 s[2:3], s5, v8
|
|
||||||
; VI-NEXT: s_bfe_u32 s5, s4, 0x80017
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v3, v3, v5
|
|
||||||
; VI-NEXT: v_sub_u32_e32 v2, vcc, v2, v4
|
|
||||||
; VI-NEXT: v_cmp_gt_i32_e64 s[0:1], 0, v0
|
|
||||||
; VI-NEXT: s_add_i32 s14, s5, s16
|
|
||||||
; VI-NEXT: s_or_b32 s6, s6, s18
|
|
||||||
; VI-NEXT: s_sub_i32 s21, s19, s5
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v0, v2, 0, s[0:1]
|
|
||||||
; VI-NEXT: s_lshl_b64 s[14:15], s[6:7], s14
|
|
||||||
; VI-NEXT: v_subb_u32_e32 v5, vcc, v3, v5, vcc
|
|
||||||
; VI-NEXT: s_add_i32 s5, s5, s20
|
|
||||||
; VI-NEXT: s_lshr_b64 s[6:7], s[6:7], s21
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v2, v0, v7, s[2:3]
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v0, s7
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v3, s15
|
|
||||||
; VI-NEXT: v_cmp_gt_i32_e64 vcc, s5, 23
|
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v3, s6
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v4, s14
|
|
||||||
; VI-NEXT: s_ashr_i32 s6, s4, 31
|
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
|
|
||||||
; VI-NEXT: s_ashr_i32 s7, s6, 31
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v3, s6, v3
|
|
||||||
; VI-NEXT: v_sub_f32_e32 v10, s4, v8
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v0, s7, v0
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v4, s7
|
|
||||||
; VI-NEXT: v_subrev_u32_e32 v3, vcc, s6, v3
|
|
||||||
; VI-NEXT: v_cmp_lt_i32_e64 s[6:7], s5, 0
|
|
||||||
; VI-NEXT: v_subb_u32_e32 v7, vcc, v0, v4, vcc
|
|
||||||
; VI-NEXT: v_and_b32_e32 v0, s17, v10
|
|
||||||
; VI-NEXT: v_bfe_u32 v11, v10, 23, 8
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v9, v3, 0, s[6:7]
|
|
||||||
; VI-NEXT: v_add_u32_e32 v3, vcc, s16, v11
|
|
||||||
; VI-NEXT: v_or_b32_e32 v0, s18, v0
|
|
||||||
; VI-NEXT: v_sub_u32_e32 v12, vcc, s19, v11
|
|
||||||
; VI-NEXT: v_add_u32_e32 v11, vcc, s20, v11
|
|
||||||
; VI-NEXT: v_lshlrev_b64 v[3:4], v3, v[0:1]
|
|
||||||
; VI-NEXT: v_lshrrev_b64 v[0:1], v12, v[0:1]
|
|
||||||
; VI-NEXT: v_cmp_lt_i32_e32 vcc, 23, v11
|
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
|
|
||||||
; VI-NEXT: v_ashrrev_i32_e32 v3, 31, v10
|
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v0, v0, v3
|
|
||||||
; VI-NEXT: v_ashrrev_i32_e32 v4, 31, v3
|
|
||||||
; VI-NEXT: v_sub_u32_e32 v0, vcc, v0, v3
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v1, v1, v4
|
|
||||||
; VI-NEXT: v_subb_u32_e32 v1, vcc, v1, v4, vcc
|
|
||||||
; VI-NEXT: v_cmp_gt_i32_e32 vcc, 0, v11
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v4, v5, 0, s[0:1]
|
|
||||||
; VI-NEXT: s_brev_b32 s0, 1
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v3, v6, 0, s[12:13]
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v4, s0, v4
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v3, v4, v3, s[2:3]
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
|
|
||||||
; VI-NEXT: v_cmp_lt_f32_e64 s[4:5], s4, v8
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v4, v7, 0, s[6:7]
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v1, s0, v1
|
|
||||||
; VI-NEXT: s_mov_b32 s11, 0xf000
|
|
||||||
; VI-NEXT: s_mov_b32 s10, -1
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v0, v0, v9, s[4:5]
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v1, v1, v4, s[4:5]
|
|
||||||
; VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[8:11], 0
|
|
||||||
; VI-NEXT: s_endpgm
|
; VI-NEXT: s_endpgm
|
||||||
;
|
;
|
||||||
; EG-LABEL: fp_to_uint_v2f32_to_v2i64:
|
; EG-LABEL: fp_to_uint_v2f32_to_v2i64:
|
||||||
@ -670,443 +378,76 @@ define amdgpu_kernel void @fp_to_uint_v2f32_to_v2i64(<2 x i64> addrspace(1)* %ou
|
|||||||
define amdgpu_kernel void @fp_to_uint_v4f32_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x float> %x) {
|
define amdgpu_kernel void @fp_to_uint_v4f32_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x float> %x) {
|
||||||
; SI-LABEL: fp_to_uint_v4f32_to_v4i64:
|
; SI-LABEL: fp_to_uint_v4f32_to_v4i64:
|
||||||
; SI: ; %bb.0:
|
; SI: ; %bb.0:
|
||||||
; SI-NEXT: s_load_dwordx4 s[8:11], s[0:1], 0xd
|
; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
|
||||||
; SI-NEXT: s_movk_i32 s6, 0xff6a
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0xd
|
||||||
; SI-NEXT: s_mov_b32 s7, 0x7fffff
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
||||||
; SI-NEXT: s_mov_b32 s18, 0x800000
|
; SI-NEXT: s_mov_b32 s6, -1
|
||||||
; SI-NEXT: s_mov_b32 s13, 0
|
; SI-NEXT: s_mov_b32 s8, 0x2f800000
|
||||||
; SI-NEXT: s_movk_i32 s19, 0x96
|
; SI-NEXT: s_mov_b32 s9, 0xcf800000
|
||||||
; SI-NEXT: s_movk_i32 s20, 0xff81
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v7, 0x5f000000
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v1, 0
|
|
||||||
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
||||||
; SI-NEXT: s_bfe_u32 s2, s9, 0x80017
|
; SI-NEXT: v_trunc_f32_e32 v0, s1
|
||||||
; SI-NEXT: s_and_b32 s3, s9, s7
|
; SI-NEXT: v_trunc_f32_e32 v2, s0
|
||||||
; SI-NEXT: v_sub_f32_e32 v6, s9, v7
|
; SI-NEXT: v_trunc_f32_e32 v4, s3
|
||||||
; SI-NEXT: s_bfe_u32 s4, s8, 0x80017
|
; SI-NEXT: v_trunc_f32_e32 v6, s2
|
||||||
; SI-NEXT: s_and_b32 s14, s8, s7
|
; SI-NEXT: v_mul_f32_e32 v1, s8, v0
|
||||||
; SI-NEXT: v_sub_f32_e32 v8, s8, v7
|
; SI-NEXT: v_mul_f32_e32 v3, s8, v2
|
||||||
; SI-NEXT: v_sub_f32_e32 v9, s11, v7
|
; SI-NEXT: v_mul_f32_e32 v5, s8, v4
|
||||||
; SI-NEXT: v_sub_f32_e32 v10, s10, v7
|
; SI-NEXT: v_mul_f32_e32 v7, s8, v6
|
||||||
; SI-NEXT: s_add_i32 s5, s2, s6
|
; SI-NEXT: v_floor_f32_e32 v8, v1
|
||||||
; SI-NEXT: s_or_b32 s12, s3, s18
|
; SI-NEXT: v_floor_f32_e32 v9, v3
|
||||||
; SI-NEXT: s_sub_i32 s15, s19, s2
|
; SI-NEXT: v_floor_f32_e32 v10, v5
|
||||||
; SI-NEXT: s_add_i32 s21, s2, s20
|
; SI-NEXT: v_floor_f32_e32 v11, v7
|
||||||
; SI-NEXT: v_bfe_u32 v2, v6, 23, 8
|
; SI-NEXT: v_cvt_u32_f32_e32 v3, v8
|
||||||
; SI-NEXT: v_and_b32_e32 v0, s7, v6
|
; SI-NEXT: v_cvt_u32_f32_e32 v1, v9
|
||||||
; SI-NEXT: s_add_i32 s16, s4, s6
|
; SI-NEXT: v_fma_f32 v0, v8, s9, v0
|
||||||
; SI-NEXT: s_sub_i32 s17, s19, s4
|
; SI-NEXT: v_fma_f32 v8, v9, s9, v2
|
||||||
; SI-NEXT: s_add_i32 s22, s4, s20
|
; SI-NEXT: v_cvt_u32_f32_e32 v7, v10
|
||||||
; SI-NEXT: v_bfe_u32 v3, v8, 23, 8
|
; SI-NEXT: v_cvt_u32_f32_e32 v5, v11
|
||||||
; SI-NEXT: v_and_b32_e32 v11, s7, v8
|
; SI-NEXT: v_fma_f32 v4, v10, s9, v4
|
||||||
; SI-NEXT: v_bfe_u32 v12, v9, 23, 8
|
; SI-NEXT: v_fma_f32 v9, v11, s9, v6
|
||||||
; SI-NEXT: v_and_b32_e32 v13, s7, v9
|
; SI-NEXT: v_cvt_u32_f32_e32 v2, v0
|
||||||
; SI-NEXT: v_bfe_u32 v14, v10, 23, 8
|
; SI-NEXT: v_cvt_u32_f32_e32 v0, v8
|
||||||
; SI-NEXT: v_add_i32_e32 v4, vcc, s6, v2
|
; SI-NEXT: v_cvt_u32_f32_e32 v6, v4
|
||||||
; SI-NEXT: v_or_b32_e32 v0, s18, v0
|
; SI-NEXT: v_cvt_u32_f32_e32 v4, v9
|
||||||
; SI-NEXT: v_sub_i32_e32 v5, vcc, s19, v2
|
; SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[4:7], 0 offset:16
|
||||||
; SI-NEXT: v_add_i32_e32 v15, vcc, s20, v2
|
; SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0
|
||||||
; SI-NEXT: v_add_i32_e32 v16, vcc, s6, v3
|
|
||||||
; SI-NEXT: v_sub_i32_e32 v17, vcc, s19, v3
|
|
||||||
; SI-NEXT: v_add_i32_e32 v18, vcc, s20, v3
|
|
||||||
; SI-NEXT: v_lshl_b64 v[2:3], v[0:1], v4
|
|
||||||
; SI-NEXT: v_lshr_b64 v[4:5], v[0:1], v5
|
|
||||||
; SI-NEXT: v_cmp_lt_i32_e32 vcc, 23, v15
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v19, v5, v3, vcc
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v20, v4, v2, vcc
|
|
||||||
; SI-NEXT: v_add_i32_e32 v21, vcc, s6, v12
|
|
||||||
; SI-NEXT: v_sub_i32_e32 v22, vcc, s19, v12
|
|
||||||
; SI-NEXT: v_add_i32_e32 v12, vcc, s20, v12
|
|
||||||
; SI-NEXT: v_or_b32_e32 v0, s18, v11
|
|
||||||
; SI-NEXT: v_lshl_b64 v[2:3], v[0:1], v16
|
|
||||||
; SI-NEXT: v_lshr_b64 v[4:5], v[0:1], v17
|
|
||||||
; SI-NEXT: v_or_b32_e32 v0, s18, v13
|
|
||||||
; SI-NEXT: v_cmp_lt_i32_e32 vcc, 23, v18
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v11, v5, v3, vcc
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v13, v4, v2, vcc
|
|
||||||
; SI-NEXT: v_lshl_b64 v[2:3], v[0:1], v21
|
|
||||||
; SI-NEXT: v_lshr_b64 v[4:5], v[0:1], v22
|
|
||||||
; SI-NEXT: v_cmp_lt_i32_e32 vcc, 23, v12
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v5, v5, v3, vcc
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v4, v4, v2, vcc
|
|
||||||
; SI-NEXT: v_and_b32_e32 v0, s7, v10
|
|
||||||
; SI-NEXT: s_lshl_b64 s[2:3], s[12:13], s5
|
|
||||||
; SI-NEXT: s_lshr_b64 s[4:5], s[12:13], s15
|
|
||||||
; SI-NEXT: s_or_b32 s12, s14, s18
|
|
||||||
; SI-NEXT: v_add_i32_e32 v2, vcc, s6, v14
|
|
||||||
; SI-NEXT: v_sub_i32_e32 v16, vcc, s19, v14
|
|
||||||
; SI-NEXT: v_add_i32_e32 v14, vcc, s20, v14
|
|
||||||
; SI-NEXT: v_or_b32_e32 v0, s18, v0
|
|
||||||
; SI-NEXT: v_lshl_b64 v[2:3], v[0:1], v2
|
|
||||||
; SI-NEXT: v_lshr_b64 v[0:1], v[0:1], v16
|
|
||||||
; SI-NEXT: v_cmp_lt_i32_e32 vcc, 23, v14
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v16, v0, v2, vcc
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v0, s5
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v2, s3
|
|
||||||
; SI-NEXT: v_cmp_gt_i32_e64 vcc, s21, 23
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v2, s4
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v3, s2
|
|
||||||
; SI-NEXT: s_lshl_b64 s[2:3], s[12:13], s16
|
|
||||||
; SI-NEXT: s_lshr_b64 s[4:5], s[12:13], s17
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v3, s5
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v17, s3
|
|
||||||
; SI-NEXT: v_cmp_gt_i32_e64 vcc, s22, 23
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v3, v3, v17, vcc
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v17, s4
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v21, s2
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v17, v17, v21, vcc
|
|
||||||
; SI-NEXT: s_bfe_u32 s2, s11, 0x80017
|
|
||||||
; SI-NEXT: s_and_b32 s3, s11, s7
|
|
||||||
; SI-NEXT: s_add_i32 s4, s2, s6
|
|
||||||
; SI-NEXT: s_sub_i32 s5, s19, s2
|
|
||||||
; SI-NEXT: s_or_b32 s12, s3, s18
|
|
||||||
; SI-NEXT: s_lshl_b64 s[14:15], s[12:13], s4
|
|
||||||
; SI-NEXT: s_lshr_b64 s[16:17], s[12:13], s5
|
|
||||||
; SI-NEXT: s_add_i32 s24, s2, s20
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v21, s17
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v22, s15
|
|
||||||
; SI-NEXT: v_cmp_gt_i32_e64 vcc, s24, 23
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v21, v21, v22, vcc
|
|
||||||
; SI-NEXT: s_ashr_i32 s2, s9, 31
|
|
||||||
; SI-NEXT: s_ashr_i32 s3, s2, 31
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v0, s3, v0
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v2, s2, v2
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v22, s3
|
|
||||||
; SI-NEXT: v_subrev_i32_e64 v2, s[2:3], s2, v2
|
|
||||||
; SI-NEXT: v_subb_u32_e64 v22, s[2:3], v0, v22, s[2:3]
|
|
||||||
; SI-NEXT: v_ashrrev_i32_e32 v0, 31, v6
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v6, v20, v0
|
|
||||||
; SI-NEXT: v_ashrrev_i32_e32 v20, 31, v0
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v19, v19, v20
|
|
||||||
; SI-NEXT: v_sub_i32_e64 v0, s[2:3], v6, v0
|
|
||||||
; SI-NEXT: v_subb_u32_e64 v6, s[2:3], v19, v20, s[2:3]
|
|
||||||
; SI-NEXT: v_cmp_gt_i32_e64 s[2:3], 0, v15
|
|
||||||
; SI-NEXT: s_ashr_i32 s4, s8, 31
|
|
||||||
; SI-NEXT: s_ashr_i32 s5, s4, 31
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v3, s5, v3
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v15, s4, v17
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v17, s5
|
|
||||||
; SI-NEXT: v_subrev_i32_e64 v15, s[4:5], s4, v15
|
|
||||||
; SI-NEXT: v_subb_u32_e64 v17, s[4:5], v3, v17, s[4:5]
|
|
||||||
; SI-NEXT: v_ashrrev_i32_e32 v3, 31, v8
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v8, v13, v3
|
|
||||||
; SI-NEXT: v_ashrrev_i32_e32 v13, 31, v3
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v11, v11, v13
|
|
||||||
; SI-NEXT: v_sub_i32_e64 v3, s[4:5], v8, v3
|
|
||||||
; SI-NEXT: s_bfe_u32 s12, s10, 0x80017
|
|
||||||
; SI-NEXT: s_and_b32 s7, s10, s7
|
|
||||||
; SI-NEXT: s_add_i32 s15, s12, s6
|
|
||||||
; SI-NEXT: s_sub_i32 s23, s19, s12
|
|
||||||
; SI-NEXT: s_add_i32 s25, s12, s20
|
|
||||||
; SI-NEXT: s_or_b32 s12, s7, s18
|
|
||||||
; SI-NEXT: v_cmp_lt_i32_e64 s[18:19], s21, 0
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v2, v2, 0, s[18:19]
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[2:3]
|
|
||||||
; SI-NEXT: v_subb_u32_e64 v8, s[4:5], v11, v13, s[4:5]
|
|
||||||
; SI-NEXT: v_cmp_gt_i32_e64 s[4:5], 0, v18
|
|
||||||
; SI-NEXT: v_cmp_lt_f32_e64 s[6:7], s9, v7
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v2, v0, v2, s[6:7]
|
|
||||||
; SI-NEXT: v_cmp_lt_i32_e64 s[20:21], s22, 0
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v0, v15, 0, s[20:21]
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v3, v3, 0, s[4:5]
|
|
||||||
; SI-NEXT: v_cmp_lt_f32_e64 s[8:9], s8, v7
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v0, v3, v0, s[8:9]
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v3, s16
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v11, s14
|
|
||||||
; SI-NEXT: s_lshl_b64 s[16:17], s[12:13], s15
|
|
||||||
; SI-NEXT: s_lshr_b64 s[22:23], s[12:13], s23
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v11, v3, v11, vcc
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v3, s23
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v13, s17
|
|
||||||
; SI-NEXT: v_cmp_gt_i32_e64 vcc, s25, 23
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v13, v3, v13, vcc
|
|
||||||
; SI-NEXT: s_load_dwordx2 s[12:13], s[0:1], 0x9
|
|
||||||
; SI-NEXT: s_mov_b32 s15, 0xf000
|
|
||||||
; SI-NEXT: s_mov_b32 s14, -1
|
|
||||||
; SI-NEXT: s_brev_b32 s17, 1
|
|
||||||
; SI-NEXT: s_ashr_i32 s0, s11, 31
|
|
||||||
; SI-NEXT: s_ashr_i32 s1, s10, 31
|
|
||||||
; SI-NEXT: s_ashr_i32 s23, s0, 31
|
|
||||||
; SI-NEXT: v_ashrrev_i32_e32 v9, 31, v9
|
|
||||||
; SI-NEXT: s_ashr_i32 s26, s1, 31
|
|
||||||
; SI-NEXT: v_ashrrev_i32_e32 v10, 31, v10
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v3, v22, 0, s[18:19]
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v6, v6, 0, s[2:3]
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v6, s17, v6
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v3, v6, v3, s[6:7]
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v6, s22
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v15, s16
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v6, v6, v15, vcc
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v15, s23
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v18, s23, v21
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v11, s0, v11
|
|
||||||
; SI-NEXT: v_subrev_i32_e32 v11, vcc, s0, v11
|
|
||||||
; SI-NEXT: v_subb_u32_e32 v15, vcc, v18, v15, vcc
|
|
||||||
; SI-NEXT: v_ashrrev_i32_e32 v18, 31, v9
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v4, v4, v9
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v5, v5, v18
|
|
||||||
; SI-NEXT: v_sub_i32_e32 v4, vcc, v4, v9
|
|
||||||
; SI-NEXT: v_mov_b32_e32 v9, s26
|
|
||||||
; SI-NEXT: v_subb_u32_e32 v5, vcc, v5, v18, vcc
|
|
||||||
; SI-NEXT: v_ashrrev_i32_e32 v18, 31, v10
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v13, s26, v13
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v17, v17, 0, s[20:21]
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v8, v8, 0, s[4:5]
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v6, s1, v6
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v16, v16, v10
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v1, v1, v18
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v8, s17, v8
|
|
||||||
; SI-NEXT: v_cmp_lt_i32_e64 s[2:3], s24, 0
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v11, v11, 0, s[2:3]
|
|
||||||
; SI-NEXT: v_cmp_gt_i32_e32 vcc, 0, v12
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v4, v4, 0, vcc
|
|
||||||
; SI-NEXT: v_subrev_i32_e64 v12, s[0:1], s1, v6
|
|
||||||
; SI-NEXT: v_subb_u32_e64 v9, s[0:1], v13, v9, s[0:1]
|
|
||||||
; SI-NEXT: v_sub_i32_e64 v10, s[0:1], v16, v10
|
|
||||||
; SI-NEXT: v_subb_u32_e64 v13, s[0:1], v1, v18, s[0:1]
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v15, v15, 0, s[2:3]
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v5, v5, 0, vcc
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v1, v8, v17, s[8:9]
|
|
||||||
; SI-NEXT: v_cmp_lt_f32_e32 vcc, s11, v7
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v6, v4, v11, vcc
|
|
||||||
; SI-NEXT: v_cmp_lt_i32_e64 s[2:3], s25, 0
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v4, v12, 0, s[2:3]
|
|
||||||
; SI-NEXT: v_cmp_gt_i32_e64 s[0:1], 0, v14
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v8, v10, 0, s[0:1]
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v5, s17, v5
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v9, v9, 0, s[2:3]
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v10, v13, 0, s[0:1]
|
|
||||||
; SI-NEXT: v_cmp_lt_f32_e64 s[0:1], s10, v7
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v4, v8, v4, s[0:1]
|
|
||||||
; SI-NEXT: v_cndmask_b32_e32 v7, v5, v15, vcc
|
|
||||||
; SI-NEXT: v_xor_b32_e32 v5, s17, v10
|
|
||||||
; SI-NEXT: v_cndmask_b32_e64 v5, v5, v9, s[0:1]
|
|
||||||
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
||||||
; SI-NEXT: buffer_store_dwordx4 v[4:7], off, s[12:15], 0 offset:16
|
|
||||||
; SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[12:15], 0
|
|
||||||
; SI-NEXT: s_endpgm
|
; SI-NEXT: s_endpgm
|
||||||
;
|
;
|
||||||
; VI-LABEL: fp_to_uint_v4f32_to_v4i64:
|
; VI-LABEL: fp_to_uint_v4f32_to_v4i64:
|
||||||
; VI: ; %bb.0:
|
; VI: ; %bb.0:
|
||||||
; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x24
|
; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
|
||||||
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x34
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x34
|
||||||
; VI-NEXT: s_mov_b32 s21, 0x7fffff
|
; VI-NEXT: s_mov_b32 s6, 0x2f800000
|
||||||
; VI-NEXT: s_movk_i32 s20, 0xff6a
|
; VI-NEXT: s_mov_b32 s7, 0xf000
|
||||||
; VI-NEXT: s_mov_b32 s22, 0x800000
|
|
||||||
; VI-NEXT: s_movk_i32 s23, 0x96
|
|
||||||
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
||||||
; VI-NEXT: s_bfe_u32 s14, s5, 0x80017
|
; VI-NEXT: v_trunc_f32_e32 v0, s1
|
||||||
; VI-NEXT: s_and_b32 s1, s5, s21
|
; VI-NEXT: v_trunc_f32_e32 v4, s0
|
||||||
; VI-NEXT: s_add_i32 s0, s14, s20
|
; VI-NEXT: v_mul_f32_e32 v1, s6, v0
|
||||||
; VI-NEXT: s_or_b32 s12, s1, s22
|
; VI-NEXT: v_mul_f32_e32 v2, s6, v4
|
||||||
; VI-NEXT: s_mov_b32 s13, 0
|
; VI-NEXT: v_floor_f32_e32 v5, v1
|
||||||
; VI-NEXT: s_sub_i32 s2, s23, s14
|
; VI-NEXT: s_mov_b32 s0, 0xcf800000
|
||||||
; VI-NEXT: s_movk_i32 s24, 0xff81
|
; VI-NEXT: v_floor_f32_e32 v6, v2
|
||||||
; VI-NEXT: s_lshl_b64 s[0:1], s[12:13], s0
|
; VI-NEXT: v_fma_f32 v0, v5, s0, v0
|
||||||
; VI-NEXT: s_lshr_b64 s[2:3], s[12:13], s2
|
; VI-NEXT: v_cvt_u32_f32_e32 v2, v0
|
||||||
; VI-NEXT: s_add_i32 s14, s14, s24
|
; VI-NEXT: v_fma_f32 v0, v6, s0, v4
|
||||||
; VI-NEXT: v_cmp_gt_i32_e64 vcc, s14, 23
|
; VI-NEXT: v_trunc_f32_e32 v4, s3
|
||||||
; VI-NEXT: v_mov_b32_e32 v0, s3
|
; VI-NEXT: v_cvt_u32_f32_e32 v3, v5
|
||||||
; VI-NEXT: v_mov_b32_e32 v1, s1
|
; VI-NEXT: v_mul_f32_e32 v5, s6, v4
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
|
; VI-NEXT: v_trunc_f32_e32 v8, s2
|
||||||
; VI-NEXT: v_mov_b32_e32 v2, s0
|
; VI-NEXT: v_cvt_u32_f32_e32 v1, v6
|
||||||
; VI-NEXT: v_mov_b32_e32 v1, s2
|
; VI-NEXT: v_floor_f32_e32 v6, v5
|
||||||
; VI-NEXT: s_ashr_i32 s0, s5, 31
|
; VI-NEXT: v_mul_f32_e32 v5, s6, v8
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
|
; VI-NEXT: v_floor_f32_e32 v9, v5
|
||||||
; VI-NEXT: v_mov_b32_e32 v10, 0x5f000000
|
; VI-NEXT: v_fma_f32 v4, v6, s0, v4
|
||||||
; VI-NEXT: v_xor_b32_e32 v1, s0, v1
|
; VI-NEXT: v_cvt_u32_f32_e32 v7, v6
|
||||||
; VI-NEXT: s_ashr_i32 s1, s0, 31
|
; VI-NEXT: v_cvt_u32_f32_e32 v6, v4
|
||||||
; VI-NEXT: v_sub_f32_e32 v8, s5, v10
|
; VI-NEXT: v_fma_f32 v4, v9, s0, v8
|
||||||
; VI-NEXT: v_subrev_u32_e32 v1, vcc, s0, v1
|
; VI-NEXT: v_cvt_u32_f32_e32 v5, v9
|
||||||
; VI-NEXT: v_cmp_lt_i32_e64 s[14:15], s14, 0
|
; VI-NEXT: v_cvt_u32_f32_e32 v4, v4
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v7, v1, 0, s[14:15]
|
; VI-NEXT: v_cvt_u32_f32_e32 v0, v0
|
||||||
; VI-NEXT: v_and_b32_e32 v1, s21, v8
|
; VI-NEXT: s_mov_b32 s6, -1
|
||||||
; VI-NEXT: v_xor_b32_e32 v0, s1, v0
|
; VI-NEXT: buffer_store_dwordx4 v[4:7], off, s[4:7], 0 offset:16
|
||||||
; VI-NEXT: v_mov_b32_e32 v2, s1
|
; VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0
|
||||||
; VI-NEXT: v_bfe_u32 v9, v8, 23, 8
|
|
||||||
; VI-NEXT: v_subb_u32_e32 v6, vcc, v0, v2, vcc
|
|
||||||
; VI-NEXT: v_add_u32_e32 v0, vcc, s20, v9
|
|
||||||
; VI-NEXT: v_or_b32_e32 v4, s22, v1
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v5, 0
|
|
||||||
; VI-NEXT: v_sub_u32_e32 v2, vcc, s23, v9
|
|
||||||
; VI-NEXT: v_lshlrev_b64 v[0:1], v0, v[4:5]
|
|
||||||
; VI-NEXT: v_lshrrev_b64 v[2:3], v2, v[4:5]
|
|
||||||
; VI-NEXT: v_add_u32_e32 v4, vcc, s24, v9
|
|
||||||
; VI-NEXT: v_cmp_lt_i32_e32 vcc, 23, v4
|
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
|
|
||||||
; VI-NEXT: v_ashrrev_i32_e32 v2, 31, v8
|
|
||||||
; VI-NEXT: s_and_b32 s12, s4, s21
|
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v0, v0, v2
|
|
||||||
; VI-NEXT: v_ashrrev_i32_e32 v3, 31, v2
|
|
||||||
; VI-NEXT: v_cmp_lt_f32_e64 s[2:3], s5, v10
|
|
||||||
; VI-NEXT: s_bfe_u32 s5, s4, 0x80017
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v1, v1, v3
|
|
||||||
; VI-NEXT: v_sub_u32_e32 v0, vcc, v0, v2
|
|
||||||
; VI-NEXT: v_cmp_gt_i32_e64 s[0:1], 0, v4
|
|
||||||
; VI-NEXT: s_add_i32 s16, s5, s20
|
|
||||||
; VI-NEXT: s_or_b32 s12, s12, s22
|
|
||||||
; VI-NEXT: s_sub_i32 s18, s23, s5
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v0, v0, 0, s[0:1]
|
|
||||||
; VI-NEXT: v_subb_u32_e32 v8, vcc, v1, v3, vcc
|
|
||||||
; VI-NEXT: s_lshl_b64 s[16:17], s[12:13], s16
|
|
||||||
; VI-NEXT: s_lshr_b64 s[18:19], s[12:13], s18
|
|
||||||
; VI-NEXT: s_add_i32 s5, s5, s24
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v2, v0, v7, s[2:3]
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v0, s19
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v1, s17
|
|
||||||
; VI-NEXT: v_cmp_gt_i32_e64 vcc, s5, 23
|
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v1, s18
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v3, s16
|
|
||||||
; VI-NEXT: s_ashr_i32 s12, s4, 31
|
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
|
|
||||||
; VI-NEXT: s_ashr_i32 s16, s12, 31
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v1, s12, v1
|
|
||||||
; VI-NEXT: v_sub_f32_e32 v11, s4, v10
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v0, s16, v0
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v3, s16
|
|
||||||
; VI-NEXT: v_subrev_u32_e32 v1, vcc, s12, v1
|
|
||||||
; VI-NEXT: v_cmp_lt_i32_e64 s[16:17], s5, 0
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v9, v1, 0, s[16:17]
|
|
||||||
; VI-NEXT: v_and_b32_e32 v1, s21, v11
|
|
||||||
; VI-NEXT: v_bfe_u32 v12, v11, 23, 8
|
|
||||||
; VI-NEXT: v_subb_u32_e32 v7, vcc, v0, v3, vcc
|
|
||||||
; VI-NEXT: v_add_u32_e32 v0, vcc, s20, v12
|
|
||||||
; VI-NEXT: v_or_b32_e32 v4, s22, v1
|
|
||||||
; VI-NEXT: v_sub_u32_e32 v3, vcc, s23, v12
|
|
||||||
; VI-NEXT: v_add_u32_e32 v12, vcc, s24, v12
|
|
||||||
; VI-NEXT: v_lshlrev_b64 v[0:1], v0, v[4:5]
|
|
||||||
; VI-NEXT: v_lshrrev_b64 v[3:4], v3, v[4:5]
|
|
||||||
; VI-NEXT: v_cmp_lt_i32_e32 vcc, 23, v12
|
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
|
|
||||||
; VI-NEXT: v_ashrrev_i32_e32 v3, 31, v11
|
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v1, v4, v1, vcc
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v0, v0, v3
|
|
||||||
; VI-NEXT: v_ashrrev_i32_e32 v4, 31, v3
|
|
||||||
; VI-NEXT: v_sub_u32_e32 v0, vcc, v0, v3
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v1, v1, v4
|
|
||||||
; VI-NEXT: v_subb_u32_e32 v1, vcc, v1, v4, vcc
|
|
||||||
; VI-NEXT: v_cmp_gt_i32_e32 vcc, 0, v12
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v4, v8, 0, s[0:1]
|
|
||||||
; VI-NEXT: s_brev_b32 s18, 1
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v3, v6, 0, s[14:15]
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v4, s18, v4
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v3, v4, v3, s[2:3]
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
|
|
||||||
; VI-NEXT: v_cmp_lt_f32_e64 s[4:5], s4, v10
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v4, v7, 0, s[16:17]
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v1, s18, v1
|
|
||||||
; VI-NEXT: s_and_b32 s1, s7, s21
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v0, v0, v9, s[4:5]
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v1, v1, v4, s[4:5]
|
|
||||||
; VI-NEXT: s_bfe_u32 s4, s7, 0x80017
|
|
||||||
; VI-NEXT: s_add_i32 s0, s4, s20
|
|
||||||
; VI-NEXT: s_or_b32 s12, s1, s22
|
|
||||||
; VI-NEXT: s_sub_i32 s2, s23, s4
|
|
||||||
; VI-NEXT: s_lshl_b64 s[0:1], s[12:13], s0
|
|
||||||
; VI-NEXT: s_add_i32 s4, s4, s24
|
|
||||||
; VI-NEXT: s_lshr_b64 s[2:3], s[12:13], s2
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v4, s3
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v6, s1
|
|
||||||
; VI-NEXT: v_cmp_gt_i32_e64 vcc, s4, 23
|
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v6, s2
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v7, s0
|
|
||||||
; VI-NEXT: s_ashr_i32 s0, s7, 31
|
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc
|
|
||||||
; VI-NEXT: s_ashr_i32 s1, s0, 31
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v6, s0, v6
|
|
||||||
; VI-NEXT: v_sub_f32_e32 v13, s7, v10
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v4, s1, v4
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v7, s1
|
|
||||||
; VI-NEXT: v_subrev_u32_e32 v6, vcc, s0, v6
|
|
||||||
; VI-NEXT: v_subb_u32_e32 v11, vcc, v4, v7, vcc
|
|
||||||
; VI-NEXT: v_and_b32_e32 v4, s21, v13
|
|
||||||
; VI-NEXT: v_cmp_lt_i32_e64 s[14:15], s4, 0
|
|
||||||
; VI-NEXT: v_bfe_u32 v14, v13, 23, 8
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v12, v6, 0, s[14:15]
|
|
||||||
; VI-NEXT: v_add_u32_e32 v6, vcc, s20, v14
|
|
||||||
; VI-NEXT: v_or_b32_e32 v4, s22, v4
|
|
||||||
; VI-NEXT: v_sub_u32_e32 v8, vcc, s23, v14
|
|
||||||
; VI-NEXT: v_lshlrev_b64 v[6:7], v6, v[4:5]
|
|
||||||
; VI-NEXT: v_lshrrev_b64 v[8:9], v8, v[4:5]
|
|
||||||
; VI-NEXT: v_add_u32_e32 v4, vcc, s24, v14
|
|
||||||
; VI-NEXT: v_cmp_lt_i32_e32 vcc, 23, v4
|
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v6, v8, v6, vcc
|
|
||||||
; VI-NEXT: v_ashrrev_i32_e32 v8, 31, v13
|
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v7, v9, v7, vcc
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v6, v6, v8
|
|
||||||
; VI-NEXT: v_ashrrev_i32_e32 v9, 31, v8
|
|
||||||
; VI-NEXT: v_cmp_lt_f32_e64 s[2:3], s7, v10
|
|
||||||
; VI-NEXT: s_bfe_u32 s7, s6, 0x80017
|
|
||||||
; VI-NEXT: s_and_b32 s5, s6, s21
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v7, v7, v9
|
|
||||||
; VI-NEXT: v_sub_u32_e32 v6, vcc, v6, v8
|
|
||||||
; VI-NEXT: v_cmp_gt_i32_e64 s[0:1], 0, v4
|
|
||||||
; VI-NEXT: s_add_i32 s4, s7, s20
|
|
||||||
; VI-NEXT: s_or_b32 s12, s5, s22
|
|
||||||
; VI-NEXT: s_sub_i32 s16, s23, s7
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v4, v6, 0, s[0:1]
|
|
||||||
; VI-NEXT: s_lshl_b64 s[4:5], s[12:13], s4
|
|
||||||
; VI-NEXT: v_subb_u32_e32 v9, vcc, v7, v9, vcc
|
|
||||||
; VI-NEXT: s_add_i32 s7, s7, s24
|
|
||||||
; VI-NEXT: s_lshr_b64 s[12:13], s[12:13], s16
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v6, v4, v12, s[2:3]
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v4, s13
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v7, s5
|
|
||||||
; VI-NEXT: v_cmp_gt_i32_e64 vcc, s7, 23
|
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v7, s12
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v8, s4
|
|
||||||
; VI-NEXT: s_ashr_i32 s4, s6, 31
|
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v7, v7, v8, vcc
|
|
||||||
; VI-NEXT: s_ashr_i32 s5, s4, 31
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v7, s4, v7
|
|
||||||
; VI-NEXT: v_sub_f32_e32 v14, s6, v10
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v4, s5, v4
|
|
||||||
; VI-NEXT: v_mov_b32_e32 v8, s5
|
|
||||||
; VI-NEXT: v_subrev_u32_e32 v7, vcc, s4, v7
|
|
||||||
; VI-NEXT: v_subb_u32_e32 v12, vcc, v4, v8, vcc
|
|
||||||
; VI-NEXT: v_and_b32_e32 v4, s21, v14
|
|
||||||
; VI-NEXT: v_cmp_lt_i32_e64 s[12:13], s7, 0
|
|
||||||
; VI-NEXT: v_bfe_u32 v15, v14, 23, 8
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v13, v7, 0, s[12:13]
|
|
||||||
; VI-NEXT: v_add_u32_e32 v7, vcc, s20, v15
|
|
||||||
; VI-NEXT: v_or_b32_e32 v4, s22, v4
|
|
||||||
; VI-NEXT: v_sub_u32_e32 v16, vcc, s23, v15
|
|
||||||
; VI-NEXT: v_add_u32_e32 v15, vcc, s24, v15
|
|
||||||
; VI-NEXT: v_lshlrev_b64 v[7:8], v7, v[4:5]
|
|
||||||
; VI-NEXT: v_lshrrev_b64 v[4:5], v16, v[4:5]
|
|
||||||
; VI-NEXT: v_cmp_lt_i32_e32 vcc, 23, v15
|
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v4, v4, v7, vcc
|
|
||||||
; VI-NEXT: v_ashrrev_i32_e32 v7, 31, v14
|
|
||||||
; VI-NEXT: v_cndmask_b32_e32 v5, v5, v8, vcc
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v4, v4, v7
|
|
||||||
; VI-NEXT: v_ashrrev_i32_e32 v8, 31, v7
|
|
||||||
; VI-NEXT: v_sub_u32_e32 v4, vcc, v4, v7
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v5, v5, v8
|
|
||||||
; VI-NEXT: v_subb_u32_e32 v5, vcc, v5, v8, vcc
|
|
||||||
; VI-NEXT: v_cmp_gt_i32_e32 vcc, 0, v15
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v8, v9, 0, s[0:1]
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v5, v5, 0, vcc
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v7, v11, 0, s[14:15]
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v8, s18, v8
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v7, v8, v7, s[2:3]
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v4, v4, 0, vcc
|
|
||||||
; VI-NEXT: v_cmp_lt_f32_e64 s[4:5], s6, v10
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v8, v12, 0, s[12:13]
|
|
||||||
; VI-NEXT: v_xor_b32_e32 v5, s18, v5
|
|
||||||
; VI-NEXT: s_mov_b32 s11, 0xf000
|
|
||||||
; VI-NEXT: s_mov_b32 s10, -1
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v4, v4, v13, s[4:5]
|
|
||||||
; VI-NEXT: v_cndmask_b32_e64 v5, v5, v8, s[4:5]
|
|
||||||
; VI-NEXT: buffer_store_dwordx4 v[4:7], off, s[8:11], 0 offset:16
|
|
||||||
; VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[8:11], 0
|
|
||||||
; VI-NEXT: s_endpgm
|
; VI-NEXT: s_endpgm
|
||||||
;
|
;
|
||||||
; EG-LABEL: fp_to_uint_v4f32_to_v4i64:
|
; EG-LABEL: fp_to_uint_v4f32_to_v4i64:
|
||||||
|
Loading…
Reference in New Issue
Block a user