//===-- SOPInstructions.td - SOP Instruction Definitions ------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// def GPRIdxModeMatchClass : AsmOperandClass { let Name = "GPRIdxMode"; let PredicateMethod = "isGPRIdxMode"; let ParserMethod = "parseGPRIdxMode"; let RenderMethod = "addImmOperands"; } def GPRIdxMode : Operand { let PrintMethod = "printVGPRIndexMode"; let ParserMatchClass = GPRIdxModeMatchClass; let OperandType = "OPERAND_IMMEDIATE"; } class SOP_Pseudo pattern=[]> : InstSI, SIMCInstr { let isPseudo = 1; let isCodeGenOnly = 1; string Mnemonic = opName; string AsmOperands = asmOps; bits<1> has_sdst = 0; } //===----------------------------------------------------------------------===// // SOP1 Instructions //===----------------------------------------------------------------------===// class SOP1_Pseudo pattern=[]> : SOP_Pseudo { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; let SALU = 1; let SOP1 = 1; let SchedRW = [WriteSALU]; let Size = 4; let UseNamedOperandTable = 1; bits<1> has_src0 = 1; bits<1> has_sdst = 1; } class SOP1_Real op, SOP1_Pseudo ps, string real_name = ps.Mnemonic> : InstSI , Enc32 { let SALU = 1; let SOP1 = 1; let isPseudo = 0; let isCodeGenOnly = 0; let Size = 4; // copy relevant pseudo op flags let SubtargetPredicate = ps.SubtargetPredicate; let AsmMatchConverter = ps.AsmMatchConverter; let SchedRW = ps.SchedRW; let mayLoad = ps.mayLoad; let mayStore = ps.mayStore; // encoding bits<7> sdst; bits<8> src0; let Inst{7-0} = !if(ps.has_src0, src0, ?); let Inst{15-8} = op; let Inst{22-16} = !if(ps.has_sdst, sdst, ?); let Inst{31-23} = 0x17d; //encoding; } class SOP1_32 pattern=[], bit tied_in = 0> : SOP1_Pseudo < opName, (outs SReg_32:$sdst), !if(tied_in, (ins SSrc_b32:$src0, SReg_32:$sdst_in), (ins SSrc_b32:$src0)), "$sdst, $src0", pattern> { let Constraints = !if(tied_in, "$sdst = $sdst_in", ""); } // Only register input allowed. class SOP1_32R pattern=[]> : SOP1_Pseudo < opName, (outs SReg_32:$sdst), (ins SReg_32:$src0), "$sdst, $src0", pattern>; // 32-bit input, no output. class SOP1_0_32 pattern = []> : SOP1_Pseudo < opName, (outs), (ins SSrc_b32:$src0), "$src0", pattern> { let has_sdst = 0; } // Special case for movreld where sdst is treated as a use operand. class SOP1_32_movreld pattern=[]> : SOP1_Pseudo < opName, (outs), (ins SReg_32:$sdst, SSrc_b32:$src0), "$sdst, $src0", pattern>; // Special case for movreld where sdst is treated as a use operand. class SOP1_64_movreld pattern=[]> : SOP1_Pseudo < opName, (outs), (ins SReg_64:$sdst, SSrc_b64:$src0), "$sdst, $src0", pattern >; class SOP1_0_32R pattern = []> : SOP1_Pseudo < opName, (outs), (ins SReg_32:$src0), "$src0", pattern> { let has_sdst = 0; } class SOP1_64 pattern=[]> : SOP1_Pseudo < opName, (outs SReg_64:$sdst), (ins SSrc_b64:$src0), "$sdst, $src0", pattern >; // Only register input allowed. class SOP1_64R pattern=[]> : SOP1_Pseudo < opName, (outs SReg_64:$sdst), (ins SReg_64:$src0), "$sdst, $src0", pattern >; // 64-bit input, 32-bit output. class SOP1_32_64 pattern=[]> : SOP1_Pseudo < opName, (outs SReg_32:$sdst), (ins SSrc_b64:$src0), "$sdst, $src0", pattern >; // 32-bit input, 64-bit output. class SOP1_64_32 pattern=[], bit tied_in = 0> : SOP1_Pseudo < opName, (outs SReg_64:$sdst), !if(tied_in, (ins SSrc_b32:$src0, SReg_64:$sdst_in), (ins SSrc_b32:$src0)), "$sdst, $src0", pattern> { let Constraints = !if(tied_in, "$sdst = $sdst_in", ""); } // no input, 64-bit output. class SOP1_64_0 pattern=[]> : SOP1_Pseudo < opName, (outs SReg_64:$sdst), (ins), "$sdst", pattern> { let has_src0 = 0; } // 64-bit input, no output class SOP1_1 pattern=[]> : SOP1_Pseudo < opName, (outs), (ins rc:$src0), "$src0", pattern> { let has_sdst = 0; } let isMoveImm = 1 in { let isReMaterializable = 1, isAsCheapAsAMove = 1 in { def S_MOV_B32 : SOP1_32 <"s_mov_b32">; def S_MOV_B64 : SOP1_64 <"s_mov_b64">; } // End isReMaterializable = 1 let Uses = [SCC] in { def S_CMOV_B32 : SOP1_32 <"s_cmov_b32">; def S_CMOV_B64 : SOP1_64 <"s_cmov_b64">; } // End Uses = [SCC] } // End isMoveImm = 1 let Defs = [SCC] in { def S_NOT_B32 : SOP1_32 <"s_not_b32", [(set i32:$sdst, (not i32:$src0))] >; def S_NOT_B64 : SOP1_64 <"s_not_b64", [(set i64:$sdst, (not i64:$src0))] >; def S_WQM_B32 : SOP1_32 <"s_wqm_b32">; def S_WQM_B64 : SOP1_64 <"s_wqm_b64">; } // End Defs = [SCC] let WaveSizePredicate = isWave32 in { def : GCNPat < (int_amdgcn_wqm_vote i1:$src0), (S_WQM_B32 SSrc_b32:$src0) >; } let WaveSizePredicate = isWave64 in { def : GCNPat < (int_amdgcn_wqm_vote i1:$src0), (S_WQM_B64 SSrc_b64:$src0) >; } let isReMaterializable = 1, isAsCheapAsAMove = 1 in { def S_BREV_B32 : SOP1_32 <"s_brev_b32", [(set i32:$sdst, (bitreverse i32:$src0))] >; def S_BREV_B64 : SOP1_64 <"s_brev_b64", [(set i64:$sdst, (bitreverse i64:$src0))] >; } // End isReMaterializable = 1, isAsCheapAsAMove = 1 let Defs = [SCC] in { def S_BCNT0_I32_B32 : SOP1_32 <"s_bcnt0_i32_b32">; def S_BCNT0_I32_B64 : SOP1_32_64 <"s_bcnt0_i32_b64">; def S_BCNT1_I32_B32 : SOP1_32 <"s_bcnt1_i32_b32", [(set i32:$sdst, (ctpop i32:$src0))] >; def S_BCNT1_I32_B64 : SOP1_32_64 <"s_bcnt1_i32_b64", [(set i32:$sdst, (ctpop i64:$src0))] >; } // End Defs = [SCC] let isReMaterializable = 1 in { def S_FF0_I32_B32 : SOP1_32 <"s_ff0_i32_b32">; def S_FF0_I32_B64 : SOP1_32_64 <"s_ff0_i32_b64">; def S_FF1_I32_B64 : SOP1_32_64 <"s_ff1_i32_b64", [(set i32:$sdst, (AMDGPUffbl_b32 i64:$src0))] >; def S_FF1_I32_B32 : SOP1_32 <"s_ff1_i32_b32", [(set i32:$sdst, (AMDGPUffbl_b32 i32:$src0))] >; def S_FLBIT_I32_B32 : SOP1_32 <"s_flbit_i32_b32", [(set i32:$sdst, (AMDGPUffbh_u32 i32:$src0))] >; def S_FLBIT_I32_B64 : SOP1_32_64 <"s_flbit_i32_b64", [(set i32:$sdst, (AMDGPUffbh_u32 i64:$src0))] >; def S_FLBIT_I32 : SOP1_32 <"s_flbit_i32", [(set i32:$sdst, (AMDGPUffbh_i32 i32:$src0))] >; def S_FLBIT_I32_I64 : SOP1_32_64 <"s_flbit_i32_i64">; def S_SEXT_I32_I8 : SOP1_32 <"s_sext_i32_i8", [(set i32:$sdst, (sext_inreg i32:$src0, i8))] >; def S_SEXT_I32_I16 : SOP1_32 <"s_sext_i32_i16", [(set i32:$sdst, (sext_inreg i32:$src0, i16))] >; } // End isReMaterializable = 1 def S_BITSET0_B32 : SOP1_32 <"s_bitset0_b32", [], 1>; def S_BITSET0_B64 : SOP1_64_32 <"s_bitset0_b64", [], 1>; def S_BITSET1_B32 : SOP1_32 <"s_bitset1_b32", [], 1>; def S_BITSET1_B64 : SOP1_64_32 <"s_bitset1_b64", [], 1>; def S_GETPC_B64 : SOP1_64_0 <"s_getpc_b64", [(set i64:$sdst, (int_amdgcn_s_getpc))] >; let isTerminator = 1, isBarrier = 1, SchedRW = [WriteBranch] in { let isBranch = 1, isIndirectBranch = 1 in { def S_SETPC_B64 : SOP1_1 <"s_setpc_b64">; } // End isBranch = 1, isIndirectBranch = 1 let isReturn = 1 in { // Define variant marked as return rather than branch. def S_SETPC_B64_return : SOP1_1<"", CCR_SGPR_64, [(AMDGPUret_flag i64:$src0)]>; } } // End isTerminator = 1, isBarrier = 1 let isCall = 1 in { def S_SWAPPC_B64 : SOP1_64 <"s_swappc_b64" >; } def S_RFE_B64 : SOP1_1 <"s_rfe_b64">; let hasSideEffects = 1, Uses = [EXEC], Defs = [EXEC, SCC] in { def S_AND_SAVEEXEC_B64 : SOP1_64 <"s_and_saveexec_b64">; def S_OR_SAVEEXEC_B64 : SOP1_64 <"s_or_saveexec_b64">; def S_XOR_SAVEEXEC_B64 : SOP1_64 <"s_xor_saveexec_b64">; def S_ANDN2_SAVEEXEC_B64 : SOP1_64 <"s_andn2_saveexec_b64">; def S_ORN2_SAVEEXEC_B64 : SOP1_64 <"s_orn2_saveexec_b64">; def S_NAND_SAVEEXEC_B64 : SOP1_64 <"s_nand_saveexec_b64">; def S_NOR_SAVEEXEC_B64 : SOP1_64 <"s_nor_saveexec_b64">; def S_XNOR_SAVEEXEC_B64 : SOP1_64 <"s_xnor_saveexec_b64">; } // End hasSideEffects = 1, Uses = [EXEC], Defs = [EXEC, SCC] def S_QUADMASK_B32 : SOP1_32 <"s_quadmask_b32">; def S_QUADMASK_B64 : SOP1_64 <"s_quadmask_b64">; let Uses = [M0] in { def S_MOVRELS_B32 : SOP1_32R <"s_movrels_b32">; def S_MOVRELS_B64 : SOP1_64R <"s_movrels_b64">; def S_MOVRELD_B32 : SOP1_32_movreld <"s_movreld_b32">; def S_MOVRELD_B64 : SOP1_64_movreld <"s_movreld_b64">; } // End Uses = [M0] let SubtargetPredicate = isGFX6GFX7GFX8GFX9 in { def S_CBRANCH_JOIN : SOP1_0_32R <"s_cbranch_join">; } // End SubtargetPredicate = isGFX6GFX7GFX8GFX9 let Defs = [SCC] in { def S_ABS_I32 : SOP1_32 <"s_abs_i32", [(set i32:$sdst, (abs i32:$src0))] >; } // End Defs = [SCC] let SubtargetPredicate = HasVGPRIndexMode in { def S_SET_GPR_IDX_IDX : SOP1_0_32<"s_set_gpr_idx_idx"> { let Uses = [M0, MODE]; let Defs = [M0, MODE]; } } let SubtargetPredicate = isGFX9Plus in { let hasSideEffects = 1, Defs = [EXEC, SCC], Uses = [EXEC] in { def S_ANDN1_SAVEEXEC_B64 : SOP1_64<"s_andn1_saveexec_b64">; def S_ORN1_SAVEEXEC_B64 : SOP1_64<"s_orn1_saveexec_b64">; def S_ANDN1_WREXEC_B64 : SOP1_64<"s_andn1_wrexec_b64">; def S_ANDN2_WREXEC_B64 : SOP1_64<"s_andn2_wrexec_b64">; } // End hasSideEffects = 1, Defs = [EXEC, SCC], Uses = [EXEC] let isReMaterializable = 1 in def S_BITREPLICATE_B64_B32 : SOP1_64_32<"s_bitreplicate_b64_b32">; } // End SubtargetPredicate = isGFX9Plus let SubtargetPredicate = isGFX10Plus in { let hasSideEffects = 1, Defs = [EXEC, SCC], Uses = [EXEC] in { def S_AND_SAVEEXEC_B32 : SOP1_32<"s_and_saveexec_b32">; def S_OR_SAVEEXEC_B32 : SOP1_32<"s_or_saveexec_b32">; def S_XOR_SAVEEXEC_B32 : SOP1_32<"s_xor_saveexec_b32">; def S_ANDN2_SAVEEXEC_B32 : SOP1_32<"s_andn2_saveexec_b32">; def S_ORN2_SAVEEXEC_B32 : SOP1_32<"s_orn2_saveexec_b32">; def S_NAND_SAVEEXEC_B32 : SOP1_32<"s_nand_saveexec_b32">; def S_NOR_SAVEEXEC_B32 : SOP1_32<"s_nor_saveexec_b32">; def S_XNOR_SAVEEXEC_B32 : SOP1_32<"s_xnor_saveexec_b32">; def S_ANDN1_SAVEEXEC_B32 : SOP1_32<"s_andn1_saveexec_b32">; def S_ORN1_SAVEEXEC_B32 : SOP1_32<"s_orn1_saveexec_b32">; def S_ANDN1_WREXEC_B32 : SOP1_32<"s_andn1_wrexec_b32">; def S_ANDN2_WREXEC_B32 : SOP1_32<"s_andn2_wrexec_b32">; } // End hasSideEffects = 1, Defs = [EXEC, SCC], Uses = [EXEC] let Uses = [M0] in { def S_MOVRELSD_2_B32 : SOP1_32<"s_movrelsd_2_b32">; } // End Uses = [M0] } // End SubtargetPredicate = isGFX10Plus //===----------------------------------------------------------------------===// // SOP2 Instructions //===----------------------------------------------------------------------===// class SOP2_Pseudo pattern=[]> : SOP_Pseudo { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; let SALU = 1; let SOP2 = 1; let SchedRW = [WriteSALU]; let UseNamedOperandTable = 1; let has_sdst = 1; // Pseudo instructions have no encodings, but adding this field here allows // us to do: // let sdst = xxx in { // for multiclasses that include both real and pseudo instructions. // field bits<7> sdst = 0; // let Size = 4; // Do we need size here? } class SOP2_Real op, SOP_Pseudo ps, string real_name = ps.Mnemonic> : InstSI , Enc32 { let SALU = 1; let SOP2 = 1; let isPseudo = 0; let isCodeGenOnly = 0; // copy relevant pseudo op flags let SubtargetPredicate = ps.SubtargetPredicate; let AsmMatchConverter = ps.AsmMatchConverter; let UseNamedOperandTable = ps.UseNamedOperandTable; let TSFlags = ps.TSFlags; let SchedRW = ps.SchedRW; let mayLoad = ps.mayLoad; let mayStore = ps.mayStore; // encoding bits<7> sdst; bits<8> src0; bits<8> src1; let Inst{7-0} = src0; let Inst{15-8} = src1; let Inst{22-16} = !if(ps.has_sdst, sdst, ?); let Inst{29-23} = op; let Inst{31-30} = 0x2; // encoding } class SOP2_32 pattern=[]> : SOP2_Pseudo < opName, (outs SReg_32:$sdst), (ins SSrc_b32:$src0, SSrc_b32:$src1), "$sdst, $src0, $src1", pattern >; class SOP2_64 pattern=[]> : SOP2_Pseudo < opName, (outs SReg_64:$sdst), (ins SSrc_b64:$src0, SSrc_b64:$src1), "$sdst, $src0, $src1", pattern >; class SOP2_64_32 pattern=[]> : SOP2_Pseudo < opName, (outs SReg_64:$sdst), (ins SSrc_b64:$src0, SSrc_b32:$src1), "$sdst, $src0, $src1", pattern >; class SOP2_64_32_32 pattern=[]> : SOP2_Pseudo < opName, (outs SReg_64:$sdst), (ins SSrc_b32:$src0, SSrc_b32:$src1), "$sdst, $src0, $src1", pattern >; class UniformUnaryFrag : PatFrag < (ops node:$src0), (Op $src0), [{ return !N->isDivergent(); }]> { // This check is unnecessary as it's captured by the result register // bank constraint. // // FIXME: Should add a way for the emitter to recognize this is a // trivially true predicate to eliminate the check. let GISelPredicateCode = [{return true;}]; } class UniformBinFrag : PatFrag < (ops node:$src0, node:$src1), (Op $src0, $src1), [{ return !N->isDivergent(); }]> { // This check is unnecessary as it's captured by the result register // bank constraint. // // FIXME: Should add a way for the emitter to recognize this is a // trivially true predicate to eliminate the check. let GISelPredicateCode = [{return true;}]; } class DivergentBinFrag : PatFrag < (ops node:$src0, node:$src1), (Op $src0, $src1), [{ return N->isDivergent(); }]> { // This check is unnecessary as it's captured by the result register // bank constraint. // // FIXME: Should add a way for the emitter to recognize this is a // trivially true predicate to eliminate the check. let GISelPredicateCode = [{return true;}]; } let Defs = [SCC] in { // Carry out goes to SCC let isCommutable = 1 in { def S_ADD_U32 : SOP2_32 <"s_add_u32">; def S_ADD_I32 : SOP2_32 <"s_add_i32", [(set i32:$sdst, (UniformBinFrag SSrc_b32:$src0, SSrc_b32:$src1))] >; } // End isCommutable = 1 def S_SUB_U32 : SOP2_32 <"s_sub_u32">; def S_SUB_I32 : SOP2_32 <"s_sub_i32", [(set i32:$sdst, (UniformBinFrag SSrc_b32:$src0, SSrc_b32:$src1))] >; let Uses = [SCC] in { // Carry in comes from SCC let isCommutable = 1 in { def S_ADDC_U32 : SOP2_32 <"s_addc_u32", [(set i32:$sdst, (UniformBinFrag (i32 SSrc_b32:$src0), (i32 SSrc_b32:$src1)))]>; } // End isCommutable = 1 def S_SUBB_U32 : SOP2_32 <"s_subb_u32", [(set i32:$sdst, (UniformBinFrag (i32 SSrc_b32:$src0), (i32 SSrc_b32:$src1)))]>; } // End Uses = [SCC] let isCommutable = 1 in { def S_MIN_I32 : SOP2_32 <"s_min_i32", [(set i32:$sdst, (smin i32:$src0, i32:$src1))] >; def S_MIN_U32 : SOP2_32 <"s_min_u32", [(set i32:$sdst, (umin i32:$src0, i32:$src1))] >; def S_MAX_I32 : SOP2_32 <"s_max_i32", [(set i32:$sdst, (smax i32:$src0, i32:$src1))] >; def S_MAX_U32 : SOP2_32 <"s_max_u32", [(set i32:$sdst, (umax i32:$src0, i32:$src1))] >; } // End isCommutable = 1 } // End Defs = [SCC] // This pattern is restricted to certain subtargets (practically GFX8Plus) // because isel sometimes produces an sreg_64 copy to SCC as a by-product // of this pattern, and only for subtargets with hasScalarCompareEq64 // is it possible to map such copy to a single instruction (S_CMP_LG_U64). class SelectPat : PatFrag < (ops node:$src1, node:$src2), (select SCC, $src1, $src2), [{ return Subtarget->hasScalarCompareEq64() && N->getOperand(0)->hasOneUse() && !N->isDivergent(); }] >; let Uses = [SCC] in { let AddedComplexity = 20 in { def S_CSELECT_B32 : SOP2_32 <"s_cselect_b32", [(set i32:$sdst, (SelectPat