mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-23 03:02:36 +01:00
[AMDGPU] Refine -O0 and -O1 passes.
Differential Revision: https://reviews.llvm.org/D105579
This commit is contained in:
parent
5aac6f60bc
commit
5c27957af8
@ -1105,7 +1105,8 @@ void AMDGPUPassConfig::addCodeGenPrepare() {
|
|||||||
EnableLowerKernelArguments)
|
EnableLowerKernelArguments)
|
||||||
addPass(createAMDGPULowerKernelArgumentsPass());
|
addPass(createAMDGPULowerKernelArgumentsPass());
|
||||||
|
|
||||||
addPass(&AMDGPUPerfHintAnalysisID);
|
if (TM->getOptLevel() > CodeGenOpt::Less)
|
||||||
|
addPass(&AMDGPUPerfHintAnalysisID);
|
||||||
|
|
||||||
TargetPassConfig::addCodeGenPrepare();
|
TargetPassConfig::addCodeGenPrepare();
|
||||||
|
|
||||||
@ -1120,7 +1121,8 @@ void AMDGPUPassConfig::addCodeGenPrepare() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool AMDGPUPassConfig::addPreISel() {
|
bool AMDGPUPassConfig::addPreISel() {
|
||||||
addPass(createFlattenCFGPass());
|
if (TM->getOptLevel() > CodeGenOpt::None)
|
||||||
|
addPass(createFlattenCFGPass());
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1193,14 +1195,13 @@ bool GCNPassConfig::addPreISel() {
|
|||||||
if (TM->getOptLevel() > CodeGenOpt::None)
|
if (TM->getOptLevel() > CodeGenOpt::None)
|
||||||
addPass(createAMDGPULateCodeGenPreparePass());
|
addPass(createAMDGPULateCodeGenPreparePass());
|
||||||
|
|
||||||
if (EnableAtomicOptimizations) {
|
if (isPassEnabled(EnableAtomicOptimizations, CodeGenOpt::Less)) {
|
||||||
addPass(createAMDGPUAtomicOptimizerPass());
|
addPass(createAMDGPUAtomicOptimizerPass());
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: We need to run a pass to propagate the attributes when calls are
|
if (TM->getOptLevel() > CodeGenOpt::None)
|
||||||
// supported.
|
addPass(createSinkingPass());
|
||||||
|
|
||||||
addPass(createSinkingPass());
|
|
||||||
// Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
|
// Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
|
||||||
// regions formed by them.
|
// regions formed by them.
|
||||||
addPass(&AMDGPUUnifyDivergentExitNodesID);
|
addPass(&AMDGPUUnifyDivergentExitNodesID);
|
||||||
@ -1443,7 +1444,10 @@ void GCNPassConfig::addPreSched2() {
|
|||||||
void GCNPassConfig::addPreEmitPass() {
|
void GCNPassConfig::addPreEmitPass() {
|
||||||
addPass(createSIMemoryLegalizerPass());
|
addPass(createSIMemoryLegalizerPass());
|
||||||
addPass(createSIInsertWaitcntsPass());
|
addPass(createSIInsertWaitcntsPass());
|
||||||
addPass(createSIShrinkInstructionsPass());
|
|
||||||
|
if (TM->getOptLevel() > CodeGenOpt::None)
|
||||||
|
addPass(createSIShrinkInstructionsPass());
|
||||||
|
|
||||||
addPass(createSIModeRegisterPass());
|
addPass(createSIModeRegisterPass());
|
||||||
|
|
||||||
if (getOptLevel() > CodeGenOpt::None)
|
if (getOptLevel() > CodeGenOpt::None)
|
||||||
|
@ -19,7 +19,7 @@
|
|||||||
|
|
||||||
; Spill load
|
; Spill load
|
||||||
; GCN: buffer_store_dword [[LOAD0]], off, s[0:3], 0 offset:[[LOAD0_OFFSET:[0-9]+]] ; 4-byte Folded Spill
|
; GCN: buffer_store_dword [[LOAD0]], off, s[0:3], 0 offset:[[LOAD0_OFFSET:[0-9]+]] ; 4-byte Folded Spill
|
||||||
; GCN: v_cmp_eq_u32_e64 [[CMP0:s\[[0-9]+:[0-9]\]]], s{{[0-9]+}}, v0
|
; GCN: v_cmp_eq_u32_e64 [[CMP0:s\[[0-9]+:[0-9]\]]], v0, s{{[0-9]+}}
|
||||||
|
|
||||||
; Spill saved exec
|
; Spill saved exec
|
||||||
; GCN: s_mov_b64 s{{\[}}[[SAVEEXEC_LO:[0-9]+]]:[[SAVEEXEC_HI:[0-9]+]]{{\]}}, exec
|
; GCN: s_mov_b64 s{{\[}}[[SAVEEXEC_LO:[0-9]+]]:[[SAVEEXEC_HI:[0-9]+]]{{\]}}, exec
|
||||||
@ -88,7 +88,7 @@ endif:
|
|||||||
; GCN-DAG: s_mov_b32 m0, -1
|
; GCN-DAG: s_mov_b32 m0, -1
|
||||||
; GCN-DAG: v_mov_b32_e32 [[PTR0:v[0-9]+]], 0{{$}}
|
; GCN-DAG: v_mov_b32_e32 [[PTR0:v[0-9]+]], 0{{$}}
|
||||||
; GCN: ds_read_b32 [[LOAD0:v[0-9]+]], [[PTR0]]
|
; GCN: ds_read_b32 [[LOAD0:v[0-9]+]], [[PTR0]]
|
||||||
; GCN: v_cmp_eq_u32_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]], s{{[0-9]+}}, v0
|
; GCN: v_cmp_eq_u32_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]], v0, s{{[0-9]+}}
|
||||||
|
|
||||||
; Spill load
|
; Spill load
|
||||||
; GCN: buffer_store_dword [[LOAD0]], off, s[0:3], 0 offset:[[LOAD0_OFFSET:[0-9]+]] ; 4-byte Folded Spill
|
; GCN: buffer_store_dword [[LOAD0]], off, s[0:3], 0 offset:[[LOAD0_OFFSET:[0-9]+]] ; 4-byte Folded Spill
|
||||||
@ -166,7 +166,7 @@ end:
|
|||||||
; GCN: buffer_store_dword [[LOAD0]], off, s[0:3], 0 offset:[[LOAD0_OFFSET:[0-9]+]] ; 4-byte Folded Spill
|
; GCN: buffer_store_dword [[LOAD0]], off, s[0:3], 0 offset:[[LOAD0_OFFSET:[0-9]+]] ; 4-byte Folded Spill
|
||||||
|
|
||||||
; GCN: s_mov_b32 [[ZERO:s[0-9]+]], 0
|
; GCN: s_mov_b32 [[ZERO:s[0-9]+]], 0
|
||||||
; GCN: v_cmp_ne_u32_e64 [[CMP0:s\[[0-9]+:[0-9]\]]], [[ZERO]], v0
|
; GCN: v_cmp_ne_u32_e64 [[CMP0:s\[[0-9]+:[0-9]\]]], v0, [[ZERO]]
|
||||||
|
|
||||||
; GCN: s_mov_b64 s{{\[}}[[SAVEEXEC_LO:[0-9]+]]:[[SAVEEXEC_HI:[0-9]+]]{{\]}}, exec
|
; GCN: s_mov_b64 s{{\[}}[[SAVEEXEC_LO:[0-9]+]]:[[SAVEEXEC_HI:[0-9]+]]{{\]}}, exec
|
||||||
; GCN: s_and_b64 s{{\[}}[[ANDEXEC_LO:[0-9]+]]:[[ANDEXEC_HI:[0-9]+]]{{\]}}, s{{\[}}[[SAVEEXEC_LO:[0-9]+]]:[[SAVEEXEC_HI:[0-9]+]]{{\]}}, [[CMP0]]
|
; GCN: s_and_b64 s{{\[}}[[ANDEXEC_LO:[0-9]+]]:[[ANDEXEC_HI:[0-9]+]]{{\]}}, s{{\[}}[[SAVEEXEC_LO:[0-9]+]]:[[SAVEEXEC_HI:[0-9]+]]{{\]}}, [[CMP0]]
|
||||||
|
@ -54,22 +54,13 @@
|
|||||||
; GCN-O0-NEXT: AMDGPU Annotate Kernel Features
|
; GCN-O0-NEXT: AMDGPU Annotate Kernel Features
|
||||||
; GCN-O0-NEXT: FunctionPass Manager
|
; GCN-O0-NEXT: FunctionPass Manager
|
||||||
; GCN-O0-NEXT: AMDGPU Lower Kernel Arguments
|
; GCN-O0-NEXT: AMDGPU Lower Kernel Arguments
|
||||||
; GCN-O0-NEXT: Analysis if a function is memory bound
|
|
||||||
; GCN-O0-NEXT: FunctionPass Manager
|
|
||||||
; GCN-O0-NEXT: Lazy Value Information Analysis
|
; GCN-O0-NEXT: Lazy Value Information Analysis
|
||||||
; GCN-O0-NEXT: Lower SwitchInst's to branches
|
; GCN-O0-NEXT: Lower SwitchInst's to branches
|
||||||
; GCN-O0-NEXT: Lower invoke and unwind, for unwindless code generators
|
; GCN-O0-NEXT: Lower invoke and unwind, for unwindless code generators
|
||||||
; GCN-O0-NEXT: Remove unreachable blocks from the CFG
|
; GCN-O0-NEXT: Remove unreachable blocks from the CFG
|
||||||
; GCN-O0-NEXT: Dominator Tree Construction
|
|
||||||
; GCN-O0-NEXT: Basic Alias Analysis (stateless AA impl)
|
|
||||||
; GCN-O0-NEXT: Function Alias Analysis Results
|
|
||||||
; GCN-O0-NEXT: Flatten the CFG
|
|
||||||
; GCN-O0-NEXT: Dominator Tree Construction
|
|
||||||
; GCN-O0-NEXT: Basic Alias Analysis (stateless AA impl)
|
|
||||||
; GCN-O0-NEXT: Function Alias Analysis Results
|
|
||||||
; GCN-O0-NEXT: Natural Loop Information
|
|
||||||
; GCN-O0-NEXT: Code sinking
|
|
||||||
; GCN-O0-NEXT: Post-Dominator Tree Construction
|
; GCN-O0-NEXT: Post-Dominator Tree Construction
|
||||||
|
; GCN-O0-NEXT: Dominator Tree Construction
|
||||||
|
; GCN-O0-NEXT: Natural Loop Information
|
||||||
; GCN-O0-NEXT: Legacy Divergence Analysis
|
; GCN-O0-NEXT: Legacy Divergence Analysis
|
||||||
; GCN-O0-NEXT: Unify divergent function exit nodes
|
; GCN-O0-NEXT: Unify divergent function exit nodes
|
||||||
; GCN-O0-NEXT: Lazy Value Information Analysis
|
; GCN-O0-NEXT: Lazy Value Information Analysis
|
||||||
@ -138,7 +129,6 @@
|
|||||||
; GCN-O0-NEXT: SI Memory Legalizer
|
; GCN-O0-NEXT: SI Memory Legalizer
|
||||||
; GCN-O0-NEXT: MachinePostDominator Tree Construction
|
; GCN-O0-NEXT: MachinePostDominator Tree Construction
|
||||||
; GCN-O0-NEXT: SI insert wait instructions
|
; GCN-O0-NEXT: SI insert wait instructions
|
||||||
; GCN-O0-NEXT: SI Shrink Instructions
|
|
||||||
; GCN-O0-NEXT: Insert required mode register values
|
; GCN-O0-NEXT: Insert required mode register values
|
||||||
; GCN-O0-NEXT: MachineDominator Tree Construction
|
; GCN-O0-NEXT: MachineDominator Tree Construction
|
||||||
; GCN-O0-NEXT: SI Final Branch Preparation
|
; GCN-O0-NEXT: SI Final Branch Preparation
|
||||||
@ -225,8 +215,6 @@
|
|||||||
; GCN-O1-NEXT: AMDGPU Annotate Kernel Features
|
; GCN-O1-NEXT: AMDGPU Annotate Kernel Features
|
||||||
; GCN-O1-NEXT: FunctionPass Manager
|
; GCN-O1-NEXT: FunctionPass Manager
|
||||||
; GCN-O1-NEXT: AMDGPU Lower Kernel Arguments
|
; GCN-O1-NEXT: AMDGPU Lower Kernel Arguments
|
||||||
; GCN-O1-NEXT: Analysis if a function is memory bound
|
|
||||||
; GCN-O1-NEXT: FunctionPass Manager
|
|
||||||
; GCN-O1-NEXT: Dominator Tree Construction
|
; GCN-O1-NEXT: Dominator Tree Construction
|
||||||
; GCN-O1-NEXT: Natural Loop Information
|
; GCN-O1-NEXT: Natural Loop Information
|
||||||
; GCN-O1-NEXT: CodeGen Prepare
|
; GCN-O1-NEXT: CodeGen Prepare
|
||||||
@ -495,8 +483,6 @@
|
|||||||
; GCN-O1-OPTS-NEXT: AMDGPU Annotate Kernel Features
|
; GCN-O1-OPTS-NEXT: AMDGPU Annotate Kernel Features
|
||||||
; GCN-O1-OPTS-NEXT: FunctionPass Manager
|
; GCN-O1-OPTS-NEXT: FunctionPass Manager
|
||||||
; GCN-O1-OPTS-NEXT: AMDGPU Lower Kernel Arguments
|
; GCN-O1-OPTS-NEXT: AMDGPU Lower Kernel Arguments
|
||||||
; GCN-O1-OPTS-NEXT: Analysis if a function is memory bound
|
|
||||||
; GCN-O1-OPTS-NEXT: FunctionPass Manager
|
|
||||||
; GCN-O1-OPTS-NEXT: Dominator Tree Construction
|
; GCN-O1-OPTS-NEXT: Dominator Tree Construction
|
||||||
; GCN-O1-OPTS-NEXT: Natural Loop Information
|
; GCN-O1-OPTS-NEXT: Natural Loop Information
|
||||||
; GCN-O1-OPTS-NEXT: CodeGen Prepare
|
; GCN-O1-OPTS-NEXT: CodeGen Prepare
|
||||||
|
@ -67,7 +67,7 @@ define amdgpu_kernel void @update_dpp64_test(i64 addrspace(1)* %arg, i64 %in1, i
|
|||||||
; GCN-OPT-DAG: v_mov_b32_e32 v[[OLD_LO:[0-9]+]], 0x3afaedd9
|
; GCN-OPT-DAG: v_mov_b32_e32 v[[OLD_LO:[0-9]+]], 0x3afaedd9
|
||||||
; GCN-OPT-DAG: v_mov_b32_e32 v[[OLD_HI:[0-9]+]], 0x7047
|
; GCN-OPT-DAG: v_mov_b32_e32 v[[OLD_HI:[0-9]+]], 0x7047
|
||||||
; GFX8-NOOPT-DAG: s_mov_b32 s[[SOLD_LO:[0-9]+]], 0x3afaedd9
|
; GFX8-NOOPT-DAG: s_mov_b32 s[[SOLD_LO:[0-9]+]], 0x3afaedd9
|
||||||
; GFX8-NOOPT-DAG: s_movk_i32 s[[SOLD_HI:[0-9]+]], 0x7047
|
; GFX8-NOOPT-DAG: s_mov_b32 s[[SOLD_HI:[0-9]+]], 0x7047
|
||||||
; GCN-DAG: load_dwordx2 v{{\[}}[[SRC_LO:[0-9]+]]:[[SRC_HI:[0-9]+]]]
|
; GCN-DAG: load_dwordx2 v{{\[}}[[SRC_LO:[0-9]+]]:[[SRC_HI:[0-9]+]]]
|
||||||
; GCN-OPT-DAG: v_mov_b32_dpp v[[OLD_LO]], v[[SRC_LO]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1{{$}}
|
; GCN-OPT-DAG: v_mov_b32_dpp v[[OLD_LO]], v[[SRC_LO]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1{{$}}
|
||||||
; GCN-OPT-DAG: v_mov_b32_dpp v[[OLD_HI]], v[[SRC_HI]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1{{$}}
|
; GCN-OPT-DAG: v_mov_b32_dpp v[[OLD_HI]], v[[SRC_HI]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1{{$}}
|
||||||
@ -86,7 +86,7 @@ define amdgpu_kernel void @update_dpp64_imm_old_test(i64 addrspace(1)* %arg, i64
|
|||||||
; GCN-OPT-DAG: v_mov_b32_e32 v[[OLD_LO:[0-9]+]], 0x3afaedd9
|
; GCN-OPT-DAG: v_mov_b32_e32 v[[OLD_LO:[0-9]+]], 0x3afaedd9
|
||||||
; GCN-OPT-DAG: v_mov_b32_e32 v[[OLD_HI:[0-9]+]], 0x7047
|
; GCN-OPT-DAG: v_mov_b32_e32 v[[OLD_HI:[0-9]+]], 0x7047
|
||||||
; GFX8-NOOPT-DAG: s_mov_b32 s[[SOLD_LO:[0-9]+]], 0x3afaedd9
|
; GFX8-NOOPT-DAG: s_mov_b32 s[[SOLD_LO:[0-9]+]], 0x3afaedd9
|
||||||
; GFX8-NOOPT-DAG: s_movk_i32 s[[SOLD_HI:[0-9]+]], 0x7047
|
; GFX8-NOOPT-DAG: s_mov_b32 s[[SOLD_HI:[0-9]+]], 0x7047
|
||||||
; GCN-OPT-DAG: v_mov_b32_dpp v{{[0-9]+}}, v[[OLD_LO]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1{{$}}
|
; GCN-OPT-DAG: v_mov_b32_dpp v{{[0-9]+}}, v[[OLD_LO]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1{{$}}
|
||||||
; GCN-OPT-DAG: v_mov_b32_dpp v{{[0-9]+}}, v[[OLD_HI]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1{{$}}
|
; GCN-OPT-DAG: v_mov_b32_dpp v{{[0-9]+}}, v[[OLD_HI]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1{{$}}
|
||||||
; GCN-NOOPT-DAG: v_mov_b32_dpp v{{[0-9]+}}, v[[SRC_LO]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1{{$}}
|
; GCN-NOOPT-DAG: v_mov_b32_dpp v{{[0-9]+}}, v[[SRC_LO]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1{{$}}
|
||||||
|
@ -14,7 +14,7 @@ define void @scalar_to_vector_i16() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
; GCN-LABEL: {{^}}scalar_to_vector_f16:
|
; GCN-LABEL: {{^}}scalar_to_vector_f16:
|
||||||
; GCN-NOOPT: s_movk_i32 [[S:s[0-9]+]], 0x3c00
|
; GCN-NOOPT: s_mov_b32 [[S:s[0-9]+]], 0x3c00
|
||||||
; GCN-NOOPT: v_mov_b32_e32 [[V:v[0-9]+]], [[S]]
|
; GCN-NOOPT: v_mov_b32_e32 [[V:v[0-9]+]], [[S]]
|
||||||
; GCN-OPT: v_mov_b32_e32 [[V:v[0-9]+]], 0x3c00
|
; GCN-OPT: v_mov_b32_e32 [[V:v[0-9]+]], 0x3c00
|
||||||
; GCN: buffer_store_short [[V]],
|
; GCN: buffer_store_short [[V]],
|
||||||
|
@ -36,20 +36,20 @@ define amdgpu_gfx void @strict_wwm_no_cfg(<4 x i32> inreg %tmp14) {
|
|||||||
; GFX9-O0-NEXT: v_mov_b32_e32 v2, s8
|
; GFX9-O0-NEXT: v_mov_b32_e32 v2, s8
|
||||||
; GFX9-O0-NEXT: s_nop 1
|
; GFX9-O0-NEXT: s_nop 1
|
||||||
; GFX9-O0-NEXT: v_mov_b32_dpp v2, v0 row_bcast:31 row_mask:0xc bank_mask:0xf
|
; GFX9-O0-NEXT: v_mov_b32_dpp v2, v0 row_bcast:31 row_mask:0xc bank_mask:0xf
|
||||||
; GFX9-O0-NEXT: v_add_u32_e32 v0, v0, v2
|
; GFX9-O0-NEXT: v_add_u32_e64 v0, v0, v2
|
||||||
; GFX9-O0-NEXT: s_mov_b64 exec, s[10:11]
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[10:11]
|
||||||
; GFX9-O0-NEXT: v_mov_b32_e32 v3, v0
|
; GFX9-O0-NEXT: v_mov_b32_e32 v3, v0
|
||||||
; GFX9-O0-NEXT: s_or_saveexec_b64 s[10:11], -1
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[10:11], -1
|
||||||
; GFX9-O0-NEXT: v_mov_b32_e32 v0, s8
|
; GFX9-O0-NEXT: v_mov_b32_e32 v0, s8
|
||||||
; GFX9-O0-NEXT: s_nop 1
|
; GFX9-O0-NEXT: s_nop 1
|
||||||
; GFX9-O0-NEXT: v_mov_b32_dpp v0, v1 row_bcast:31 row_mask:0xc bank_mask:0xf
|
; GFX9-O0-NEXT: v_mov_b32_dpp v0, v1 row_bcast:31 row_mask:0xc bank_mask:0xf
|
||||||
; GFX9-O0-NEXT: v_add_u32_e32 v0, v1, v0
|
; GFX9-O0-NEXT: v_add_u32_e64 v0, v1, v0
|
||||||
; GFX9-O0-NEXT: s_mov_b64 exec, s[10:11]
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[10:11]
|
||||||
; GFX9-O0-NEXT: v_mov_b32_e32 v4, v0
|
; GFX9-O0-NEXT: v_mov_b32_e32 v4, v0
|
||||||
; GFX9-O0-NEXT: v_cmp_eq_u32_e64 s[10:11], v3, v4
|
; GFX9-O0-NEXT: v_cmp_eq_u32_e64 s[10:11], v3, v4
|
||||||
; GFX9-O0-NEXT: v_cndmask_b32_e64 v3, 0, 1, s[10:11]
|
; GFX9-O0-NEXT: v_cndmask_b32_e64 v3, 0, 1, s[10:11]
|
||||||
; GFX9-O0-NEXT: s_mov_b32 s9, 1
|
; GFX9-O0-NEXT: s_mov_b32 s9, 1
|
||||||
; GFX9-O0-NEXT: v_lshlrev_b32_e32 v3, s9, v3
|
; GFX9-O0-NEXT: v_lshlrev_b32_e64 v3, s9, v3
|
||||||
; GFX9-O0-NEXT: s_mov_b32 s9, 2
|
; GFX9-O0-NEXT: s_mov_b32 s9, 2
|
||||||
; GFX9-O0-NEXT: v_mov_b32_e32 v4, s9
|
; GFX9-O0-NEXT: v_mov_b32_e32 v4, s9
|
||||||
; GFX9-O0-NEXT: v_and_b32_e32 v3, v3, v4
|
; GFX9-O0-NEXT: v_and_b32_e32 v3, v3, v4
|
||||||
@ -170,11 +170,11 @@ define amdgpu_gfx void @strict_wwm_cfg(<4 x i32> inreg %tmp14, i32 %arg) {
|
|||||||
; GFX9-O0-NEXT: v_mov_b32_e32 v2, s4
|
; GFX9-O0-NEXT: v_mov_b32_e32 v2, s4
|
||||||
; GFX9-O0-NEXT: s_nop 1
|
; GFX9-O0-NEXT: s_nop 1
|
||||||
; GFX9-O0-NEXT: v_mov_b32_dpp v2, v1 row_bcast:31 row_mask:0xc bank_mask:0xf
|
; GFX9-O0-NEXT: v_mov_b32_dpp v2, v1 row_bcast:31 row_mask:0xc bank_mask:0xf
|
||||||
; GFX9-O0-NEXT: v_add_u32_e32 v1, v1, v2
|
; GFX9-O0-NEXT: v_add_u32_e64 v1, v1, v2
|
||||||
; GFX9-O0-NEXT: s_mov_b64 exec, s[6:7]
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[6:7]
|
||||||
; GFX9-O0-NEXT: v_mov_b32_e32 v3, v1
|
; GFX9-O0-NEXT: v_mov_b32_e32 v3, v1
|
||||||
; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
|
; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
|
||||||
; GFX9-O0-NEXT: v_cmp_eq_u32_e64 s[6:7], s4, v0
|
; GFX9-O0-NEXT: v_cmp_eq_u32_e64 s[6:7], v0, s4
|
||||||
; GFX9-O0-NEXT: v_mov_b32_e32 v0, s4
|
; GFX9-O0-NEXT: v_mov_b32_e32 v0, s4
|
||||||
; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
|
; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
|
||||||
; GFX9-O0-NEXT: s_mov_b64 s[4:5], exec
|
; GFX9-O0-NEXT: s_mov_b64 s[4:5], exec
|
||||||
@ -198,7 +198,7 @@ define amdgpu_gfx void @strict_wwm_cfg(<4 x i32> inreg %tmp14, i32 %arg) {
|
|||||||
; GFX9-O0-NEXT: s_not_b64 exec, exec
|
; GFX9-O0-NEXT: s_not_b64 exec, exec
|
||||||
; GFX9-O0-NEXT: s_or_saveexec_b64 s[4:5], -1
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[4:5], -1
|
||||||
; GFX9-O0-NEXT: v_mov_b32_dpp v1, v2 row_bcast:31 row_mask:0xc bank_mask:0xf
|
; GFX9-O0-NEXT: v_mov_b32_dpp v1, v2 row_bcast:31 row_mask:0xc bank_mask:0xf
|
||||||
; GFX9-O0-NEXT: v_add_u32_e32 v1, v2, v1
|
; GFX9-O0-NEXT: v_add_u32_e64 v1, v2, v1
|
||||||
; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
|
||||||
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v1
|
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v1
|
||||||
; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
|
; GFX9-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
|
||||||
@ -218,7 +218,7 @@ define amdgpu_gfx void @strict_wwm_cfg(<4 x i32> inreg %tmp14, i32 %arg) {
|
|||||||
; GFX9-O0-NEXT: v_cmp_eq_u32_e64 s[6:7], v0, v3
|
; GFX9-O0-NEXT: v_cmp_eq_u32_e64 s[6:7], v0, v3
|
||||||
; GFX9-O0-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[6:7]
|
; GFX9-O0-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[6:7]
|
||||||
; GFX9-O0-NEXT: s_mov_b32 s6, 1
|
; GFX9-O0-NEXT: s_mov_b32 s6, 1
|
||||||
; GFX9-O0-NEXT: v_lshlrev_b32_e32 v0, s6, v0
|
; GFX9-O0-NEXT: v_lshlrev_b32_e64 v0, s6, v0
|
||||||
; GFX9-O0-NEXT: s_mov_b32 s6, 2
|
; GFX9-O0-NEXT: s_mov_b32 s6, 2
|
||||||
; GFX9-O0-NEXT: v_mov_b32_e32 v3, s6
|
; GFX9-O0-NEXT: v_mov_b32_e32 v3, s6
|
||||||
; GFX9-O0-NEXT: v_and_b32_e32 v0, v0, v3
|
; GFX9-O0-NEXT: v_and_b32_e32 v0, v0, v3
|
||||||
@ -321,11 +321,11 @@ define hidden i32 @strict_wwm_called(i32 %a) noinline {
|
|||||||
; GFX9-O0-LABEL: strict_wwm_called:
|
; GFX9-O0-LABEL: strict_wwm_called:
|
||||||
; GFX9-O0: ; %bb.0:
|
; GFX9-O0: ; %bb.0:
|
||||||
; GFX9-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
; GFX9-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||||
; GFX9-O0-NEXT: v_add_u32_e32 v1, v0, v0
|
; GFX9-O0-NEXT: v_add_u32_e64 v1, v0, v0
|
||||||
; GFX9-O0-NEXT: ; implicit-def: $sgpr4
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr4
|
||||||
; GFX9-O0-NEXT: ; implicit-def: $sgpr4
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr4
|
||||||
; GFX9-O0-NEXT: v_mul_lo_u32 v0, v1, v0
|
; GFX9-O0-NEXT: v_mul_lo_u32 v0, v1, v0
|
||||||
; GFX9-O0-NEXT: v_sub_u32_e32 v0, v0, v1
|
; GFX9-O0-NEXT: v_sub_u32_e64 v0, v0, v1
|
||||||
; GFX9-O0-NEXT: s_setpc_b64 s[30:31]
|
; GFX9-O0-NEXT: s_setpc_b64 s[30:31]
|
||||||
;
|
;
|
||||||
; GFX9-O3-LABEL: strict_wwm_called:
|
; GFX9-O3-LABEL: strict_wwm_called:
|
||||||
@ -352,7 +352,7 @@ define amdgpu_gfx void @strict_wwm_call(<4 x i32> inreg %tmp14, i32 inreg %arg)
|
|||||||
; GFX9-O0-NEXT: s_mov_b64 exec, s[10:11]
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[10:11]
|
||||||
; GFX9-O0-NEXT: v_writelane_b32 v3, s33, 7
|
; GFX9-O0-NEXT: v_writelane_b32 v3, s33, 7
|
||||||
; GFX9-O0-NEXT: s_mov_b32 s33, s32
|
; GFX9-O0-NEXT: s_mov_b32 s33, s32
|
||||||
; GFX9-O0-NEXT: s_addk_i32 s32, 0x400
|
; GFX9-O0-NEXT: s_add_i32 s32, s32, 0x400
|
||||||
; GFX9-O0-NEXT: v_writelane_b32 v3, s30, 0
|
; GFX9-O0-NEXT: v_writelane_b32 v3, s30, 0
|
||||||
; GFX9-O0-NEXT: v_writelane_b32 v3, s31, 1
|
; GFX9-O0-NEXT: v_writelane_b32 v3, s31, 1
|
||||||
; GFX9-O0-NEXT: v_writelane_b32 v3, s8, 2
|
; GFX9-O0-NEXT: v_writelane_b32 v3, s8, 2
|
||||||
@ -389,11 +389,11 @@ define amdgpu_gfx void @strict_wwm_call(<4 x i32> inreg %tmp14, i32 inreg %arg)
|
|||||||
; GFX9-O0-NEXT: v_readlane_b32 s30, v3, 0
|
; GFX9-O0-NEXT: v_readlane_b32 s30, v3, 0
|
||||||
; GFX9-O0-NEXT: v_readlane_b32 s31, v3, 1
|
; GFX9-O0-NEXT: v_readlane_b32 s31, v3, 1
|
||||||
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v0
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v0
|
||||||
; GFX9-O0-NEXT: v_add_u32_e32 v1, v1, v2
|
; GFX9-O0-NEXT: v_add_u32_e64 v1, v1, v2
|
||||||
; GFX9-O0-NEXT: s_mov_b64 exec, s[10:11]
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[10:11]
|
||||||
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v1
|
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v1
|
||||||
; GFX9-O0-NEXT: buffer_store_dword v0, off, s[4:7], s8 offset:4
|
; GFX9-O0-NEXT: buffer_store_dword v0, off, s[4:7], s8 offset:4
|
||||||
; GFX9-O0-NEXT: s_addk_i32 s32, 0xfc00
|
; GFX9-O0-NEXT: s_add_i32 s32, s32, 0xfffffc00
|
||||||
; GFX9-O0-NEXT: v_readlane_b32 s33, v3, 7
|
; GFX9-O0-NEXT: v_readlane_b32 s33, v3, 7
|
||||||
; GFX9-O0-NEXT: s_or_saveexec_b64 s[4:5], -1
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[4:5], -1
|
||||||
; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
|
; GFX9-O0-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
|
||||||
@ -553,7 +553,7 @@ define amdgpu_gfx void @strict_wwm_call_i64(<4 x i32> inreg %tmp14, i64 inreg %a
|
|||||||
; GFX9-O0-NEXT: s_mov_b64 exec, s[10:11]
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[10:11]
|
||||||
; GFX9-O0-NEXT: v_writelane_b32 v11, s33, 9
|
; GFX9-O0-NEXT: v_writelane_b32 v11, s33, 9
|
||||||
; GFX9-O0-NEXT: s_mov_b32 s33, s32
|
; GFX9-O0-NEXT: s_mov_b32 s33, s32
|
||||||
; GFX9-O0-NEXT: s_addk_i32 s32, 0xc00
|
; GFX9-O0-NEXT: s_add_i32 s32, s32, 0xc00
|
||||||
; GFX9-O0-NEXT: v_writelane_b32 v11, s30, 0
|
; GFX9-O0-NEXT: v_writelane_b32 v11, s30, 0
|
||||||
; GFX9-O0-NEXT: v_writelane_b32 v11, s31, 1
|
; GFX9-O0-NEXT: v_writelane_b32 v11, s31, 1
|
||||||
; GFX9-O0-NEXT: v_writelane_b32 v11, s9, 2
|
; GFX9-O0-NEXT: v_writelane_b32 v11, s9, 2
|
||||||
@ -619,7 +619,7 @@ define amdgpu_gfx void @strict_wwm_call_i64(<4 x i32> inreg %tmp14, i64 inreg %a
|
|||||||
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v3
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v3
|
||||||
; GFX9-O0-NEXT: s_mov_b32 s8, 0
|
; GFX9-O0-NEXT: s_mov_b32 s8, 0
|
||||||
; GFX9-O0-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], s8 offset:4
|
; GFX9-O0-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], s8 offset:4
|
||||||
; GFX9-O0-NEXT: s_addk_i32 s32, 0xf400
|
; GFX9-O0-NEXT: s_add_i32 s32, s32, 0xfffff400
|
||||||
; GFX9-O0-NEXT: v_readlane_b32 s33, v11, 9
|
; GFX9-O0-NEXT: v_readlane_b32 s33, v11, 9
|
||||||
; GFX9-O0-NEXT: s_or_saveexec_b64 s[4:5], -1
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[4:5], -1
|
||||||
; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
|
; GFX9-O0-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
|
||||||
@ -729,7 +729,7 @@ define amdgpu_gfx void @strict_wwm_amdgpu_cs_main(<4 x i32> inreg %desc, i32 %in
|
|||||||
; GFX9-O0-NEXT: s_mov_b32 s7, s8
|
; GFX9-O0-NEXT: s_mov_b32 s7, s8
|
||||||
; GFX9-O0-NEXT: ; kill: def $sgpr8_sgpr9_sgpr10_sgpr11 killed $sgpr4_sgpr5_sgpr6_sgpr7
|
; GFX9-O0-NEXT: ; kill: def $sgpr8_sgpr9_sgpr10_sgpr11 killed $sgpr4_sgpr5_sgpr6_sgpr7
|
||||||
; GFX9-O0-NEXT: s_mov_b32 s8, 5
|
; GFX9-O0-NEXT: s_mov_b32 s8, 5
|
||||||
; GFX9-O0-NEXT: v_lshlrev_b32_e32 v0, s8, v0
|
; GFX9-O0-NEXT: v_lshlrev_b32_e64 v0, s8, v0
|
||||||
; GFX9-O0-NEXT: s_mov_b32 s8, 0
|
; GFX9-O0-NEXT: s_mov_b32 s8, 0
|
||||||
; GFX9-O0-NEXT: buffer_load_dwordx4 v[10:13], v0, s[4:7], s8 offen
|
; GFX9-O0-NEXT: buffer_load_dwordx4 v[10:13], v0, s[4:7], s8 offen
|
||||||
; GFX9-O0-NEXT: buffer_load_dwordx2 v[3:4], v0, s[4:7], s8 offen offset:16
|
; GFX9-O0-NEXT: buffer_load_dwordx2 v[3:4], v0, s[4:7], s8 offen offset:16
|
||||||
@ -738,7 +738,7 @@ define amdgpu_gfx void @strict_wwm_amdgpu_cs_main(<4 x i32> inreg %desc, i32 %in
|
|||||||
; GFX9-O0-NEXT: v_mov_b32_e32 v5, v10
|
; GFX9-O0-NEXT: v_mov_b32_e32 v5, v10
|
||||||
; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
|
; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
|
||||||
; GFX9-O0-NEXT: v_mov_b32_e32 v6, v7
|
; GFX9-O0-NEXT: v_mov_b32_e32 v6, v7
|
||||||
; GFX9-O0-NEXT: s_brev_b32 s9, -2
|
; GFX9-O0-NEXT: s_mov_b32 s9, 0x7fffffff
|
||||||
; GFX9-O0-NEXT: s_mov_b32 s10, -1
|
; GFX9-O0-NEXT: s_mov_b32 s10, -1
|
||||||
; GFX9-O0-NEXT: ; kill: def $sgpr10 killed $sgpr10 def $sgpr10_sgpr11
|
; GFX9-O0-NEXT: ; kill: def $sgpr10 killed $sgpr10 def $sgpr10_sgpr11
|
||||||
; GFX9-O0-NEXT: s_mov_b32 s11, s9
|
; GFX9-O0-NEXT: s_mov_b32 s11, s9
|
||||||
|
@ -15,14 +15,16 @@ define amdgpu_cs void @no_cfg(<4 x i32> inreg %tmp14) {
|
|||||||
; GFX9: s_or_saveexec_b64 s{{\[}}{{[0-9]+}}:{{[0-9]+}}{{\]}}, -1
|
; GFX9: s_or_saveexec_b64 s{{\[}}{{[0-9]+}}:{{[0-9]+}}{{\]}}, -1
|
||||||
|
|
||||||
; GFX9-DAG: v_mov_b32_dpp v[[FIRST_MOV:[0-9]+]], v{{[0-9]+}} row_bcast:31 row_mask:0xc bank_mask:0xf
|
; GFX9-DAG: v_mov_b32_dpp v[[FIRST_MOV:[0-9]+]], v{{[0-9]+}} row_bcast:31 row_mask:0xc bank_mask:0xf
|
||||||
; GFX9-DAG: v_add_u32_e32 v[[FIRST_ADD:[0-9]+]], v{{[0-9]+}}, v[[FIRST_MOV]]
|
; GFX9-O3-DAG: v_add_u32_e32 v[[FIRST_ADD:[0-9]+]], v{{[0-9]+}}, v[[FIRST_MOV]]
|
||||||
|
; GFX9-O0-DAG: v_add_u32_e64 v[[FIRST_ADD:[0-9]+]], v{{[0-9]+}}, v[[FIRST_MOV]]
|
||||||
; GFX9-DAG: v_mov_b32_e32 v[[FIRST:[0-9]+]], v[[FIRST_ADD]]
|
; GFX9-DAG: v_mov_b32_e32 v[[FIRST:[0-9]+]], v[[FIRST_ADD]]
|
||||||
%tmp120 = tail call i32 @llvm.amdgcn.update.dpp.i32(i32 0, i32 %tmp105, i32 323, i32 12, i32 15, i1 false)
|
%tmp120 = tail call i32 @llvm.amdgcn.update.dpp.i32(i32 0, i32 %tmp105, i32 323, i32 12, i32 15, i1 false)
|
||||||
%tmp121 = add i32 %tmp105, %tmp120
|
%tmp121 = add i32 %tmp105, %tmp120
|
||||||
%tmp122 = tail call i32 @llvm.amdgcn.wwm.i32(i32 %tmp121)
|
%tmp122 = tail call i32 @llvm.amdgcn.wwm.i32(i32 %tmp121)
|
||||||
|
|
||||||
; GFX9-DAG: v_mov_b32_dpp v[[SECOND_MOV:[0-9]+]], v{{[0-9]+}} row_bcast:31 row_mask:0xc bank_mask:0xf
|
; GFX9-DAG: v_mov_b32_dpp v[[SECOND_MOV:[0-9]+]], v{{[0-9]+}} row_bcast:31 row_mask:0xc bank_mask:0xf
|
||||||
; GFX9-DAG: v_add_u32_e32 v[[SECOND_ADD:[0-9]+]], v{{[0-9]+}}, v[[SECOND_MOV]]
|
; GFX9-O3-DAG: v_add_u32_e32 v[[SECOND_ADD:[0-9]+]], v{{[0-9]+}}, v[[SECOND_MOV]]
|
||||||
|
; GFX9-O0-DAG: v_add_u32_e64 v[[SECOND_ADD:[0-9]+]], v{{[0-9]+}}, v[[SECOND_MOV]]
|
||||||
; GFX9-DAG: v_mov_b32_e32 v[[SECOND:[0-9]+]], v[[SECOND_ADD]]
|
; GFX9-DAG: v_mov_b32_e32 v[[SECOND:[0-9]+]], v[[SECOND_ADD]]
|
||||||
%tmp135 = tail call i32 @llvm.amdgcn.update.dpp.i32(i32 0, i32 %tmp107, i32 323, i32 12, i32 15, i1 false)
|
%tmp135 = tail call i32 @llvm.amdgcn.update.dpp.i32(i32 0, i32 %tmp107, i32 323, i32 12, i32 15, i1 false)
|
||||||
%tmp136 = add i32 %tmp107, %tmp135
|
%tmp136 = add i32 %tmp107, %tmp135
|
||||||
@ -48,7 +50,8 @@ entry:
|
|||||||
%tmp105 = tail call i32 @llvm.amdgcn.set.inactive.i32(i32 %tmp102, i32 0)
|
%tmp105 = tail call i32 @llvm.amdgcn.set.inactive.i32(i32 %tmp102, i32 0)
|
||||||
|
|
||||||
; GFX9: v_mov_b32_dpp v[[FIRST_MOV:[0-9]+]], v{{[0-9]+}} row_bcast:31 row_mask:0xc bank_mask:0xf
|
; GFX9: v_mov_b32_dpp v[[FIRST_MOV:[0-9]+]], v{{[0-9]+}} row_bcast:31 row_mask:0xc bank_mask:0xf
|
||||||
; GFX9: v_add_u32_e32 v[[FIRST_ADD:[0-9]+]], v{{[0-9]+}}, v[[FIRST_MOV]]
|
; GFX9-O3: v_add_u32_e32 v[[FIRST_ADD:[0-9]+]], v{{[0-9]+}}, v[[FIRST_MOV]]
|
||||||
|
; GFX9-O0: v_add_u32_e64 v[[FIRST_ADD:[0-9]+]], v{{[0-9]+}}, v[[FIRST_MOV]]
|
||||||
; GFX9: v_mov_b32_e32 v[[FIRST:[0-9]+]], v[[FIRST_ADD]]
|
; GFX9: v_mov_b32_e32 v[[FIRST:[0-9]+]], v[[FIRST_ADD]]
|
||||||
; GFX9-O0: buffer_store_dword v[[FIRST]], off, s{{\[}}{{[0-9]+}}:{{[0-9]+}}{{\]}}, 0 offset:[[FIRST_IMM_OFFSET:[0-9]+]]
|
; GFX9-O0: buffer_store_dword v[[FIRST]], off, s{{\[}}{{[0-9]+}}:{{[0-9]+}}{{\]}}, 0 offset:[[FIRST_IMM_OFFSET:[0-9]+]]
|
||||||
%tmp120 = tail call i32 @llvm.amdgcn.update.dpp.i32(i32 0, i32 %tmp105, i32 323, i32 12, i32 15, i1 false)
|
%tmp120 = tail call i32 @llvm.amdgcn.update.dpp.i32(i32 0, i32 %tmp105, i32 323, i32 12, i32 15, i1 false)
|
||||||
@ -62,7 +65,8 @@ if:
|
|||||||
%tmp107 = tail call i32 @llvm.amdgcn.set.inactive.i32(i32 %tmp103, i32 0)
|
%tmp107 = tail call i32 @llvm.amdgcn.set.inactive.i32(i32 %tmp103, i32 0)
|
||||||
|
|
||||||
; GFX9: v_mov_b32_dpp v[[SECOND_MOV:[0-9]+]], v{{[0-9]+}} row_bcast:31 row_mask:0xc bank_mask:0xf
|
; GFX9: v_mov_b32_dpp v[[SECOND_MOV:[0-9]+]], v{{[0-9]+}} row_bcast:31 row_mask:0xc bank_mask:0xf
|
||||||
; GFX9: v_add_u32_e32 v[[SECOND_ADD:[0-9]+]], v{{[0-9]+}}, v[[SECOND_MOV]]
|
; GFX9-O3: v_add_u32_e32 v[[SECOND_ADD:[0-9]+]], v{{[0-9]+}}, v[[SECOND_MOV]]
|
||||||
|
; GFX9-O0: v_add_u32_e64 v[[SECOND_ADD:[0-9]+]], v{{[0-9]+}}, v[[SECOND_MOV]]
|
||||||
; GFX9: v_mov_b32_e32 v[[SECOND:[0-9]+]], v[[SECOND_ADD]]
|
; GFX9: v_mov_b32_e32 v[[SECOND:[0-9]+]], v[[SECOND_ADD]]
|
||||||
; GFX9-O0: buffer_store_dword v[[SECOND]], off, s{{\[}}{{[0-9]+}}:{{[0-9]+}}{{\]}}, 0 offset:[[SECOND_IMM_OFFSET:[0-9]+]]
|
; GFX9-O0: buffer_store_dword v[[SECOND]], off, s{{\[}}{{[0-9]+}}:{{[0-9]+}}{{\]}}, 0 offset:[[SECOND_IMM_OFFSET:[0-9]+]]
|
||||||
%tmp135 = tail call i32 @llvm.amdgcn.update.dpp.i32(i32 0, i32 %tmp107, i32 323, i32 12, i32 15, i1 false)
|
%tmp135 = tail call i32 @llvm.amdgcn.update.dpp.i32(i32 0, i32 %tmp107, i32 323, i32 12, i32 15, i1 false)
|
||||||
@ -87,11 +91,13 @@ merge:
|
|||||||
|
|
||||||
; GFX9-LABEL: {{^}}called:
|
; GFX9-LABEL: {{^}}called:
|
||||||
define hidden i32 @called(i32 %a) noinline {
|
define hidden i32 @called(i32 %a) noinline {
|
||||||
; GFX9: v_add_u32_e32 v1, v0, v0
|
; GFX9-O3: v_add_u32_e32 v1, v0, v0
|
||||||
|
; GFX9-O0: v_add_u32_e64 v1, v0, v0
|
||||||
%add = add i32 %a, %a
|
%add = add i32 %a, %a
|
||||||
; GFX9: v_mul_lo_u32 v0, v1, v0
|
; GFX9: v_mul_lo_u32 v0, v1, v0
|
||||||
%mul = mul i32 %add, %a
|
%mul = mul i32 %add, %a
|
||||||
; GFX9: v_sub_u32_e32 v0, v0, v1
|
; GFX9-O3: v_sub_u32_e32 v0, v0, v1
|
||||||
|
; GFX9-O0: v_sub_u32_e64 v0, v0, v1
|
||||||
%sub = sub i32 %mul, %add
|
%sub = sub i32 %mul, %add
|
||||||
ret i32 %sub
|
ret i32 %sub
|
||||||
}
|
}
|
||||||
@ -114,7 +120,8 @@ define amdgpu_kernel void @call(<4 x i32> inreg %tmp14, i32 inreg %arg) {
|
|||||||
; GFX9: s_swappc_b64
|
; GFX9: s_swappc_b64
|
||||||
%tmp134 = call i32 @called(i32 %tmp107)
|
%tmp134 = call i32 @called(i32 %tmp107)
|
||||||
; GFX9: v_mov_b32_e32 v1, v0
|
; GFX9: v_mov_b32_e32 v1, v0
|
||||||
; GFX9: v_add_u32_e32 v1, v1, v2
|
; GFX9-O3: v_add_u32_e32 v1, v1, v2
|
||||||
|
; GFX9-O0: v_add_u32_e64 v1, v1, v2
|
||||||
%tmp136 = add i32 %tmp134, %tmp107
|
%tmp136 = add i32 %tmp134, %tmp107
|
||||||
%tmp137 = tail call i32 @llvm.amdgcn.wwm.i32(i32 %tmp136)
|
%tmp137 = tail call i32 @llvm.amdgcn.wwm.i32(i32 %tmp136)
|
||||||
; GFX9: buffer_store_dword v0
|
; GFX9: buffer_store_dword v0
|
||||||
@ -202,14 +209,16 @@ define amdgpu_cs void @strict_wwm_no_cfg(<4 x i32> inreg %tmp14) {
|
|||||||
; GFX9: s_or_saveexec_b64 s{{\[}}{{[0-9]+}}:{{[0-9]+}}{{\]}}, -1
|
; GFX9: s_or_saveexec_b64 s{{\[}}{{[0-9]+}}:{{[0-9]+}}{{\]}}, -1
|
||||||
|
|
||||||
; GFX9-DAG: v_mov_b32_dpp v[[FIRST_MOV:[0-9]+]], v{{[0-9]+}} row_bcast:31 row_mask:0xc bank_mask:0xf
|
; GFX9-DAG: v_mov_b32_dpp v[[FIRST_MOV:[0-9]+]], v{{[0-9]+}} row_bcast:31 row_mask:0xc bank_mask:0xf
|
||||||
; GFX9-DAG: v_add_u32_e32 v[[FIRST_ADD:[0-9]+]], v{{[0-9]+}}, v[[FIRST_MOV]]
|
; GFX9-O3-DAG: v_add_u32_e32 v[[FIRST_ADD:[0-9]+]], v{{[0-9]+}}, v[[FIRST_MOV]]
|
||||||
|
; GFX9-O0-DAG: v_add_u32_e64 v[[FIRST_ADD:[0-9]+]], v{{[0-9]+}}, v[[FIRST_MOV]]
|
||||||
; GFX9-DAG: v_mov_b32_e32 v[[FIRST:[0-9]+]], v[[FIRST_ADD]]
|
; GFX9-DAG: v_mov_b32_e32 v[[FIRST:[0-9]+]], v[[FIRST_ADD]]
|
||||||
%tmp120 = tail call i32 @llvm.amdgcn.update.dpp.i32(i32 0, i32 %tmp105, i32 323, i32 12, i32 15, i1 false)
|
%tmp120 = tail call i32 @llvm.amdgcn.update.dpp.i32(i32 0, i32 %tmp105, i32 323, i32 12, i32 15, i1 false)
|
||||||
%tmp121 = add i32 %tmp105, %tmp120
|
%tmp121 = add i32 %tmp105, %tmp120
|
||||||
%tmp122 = tail call i32 @llvm.amdgcn.strict.wwm.i32(i32 %tmp121)
|
%tmp122 = tail call i32 @llvm.amdgcn.strict.wwm.i32(i32 %tmp121)
|
||||||
|
|
||||||
; GFX9-DAG: v_mov_b32_dpp v[[SECOND_MOV:[0-9]+]], v{{[0-9]+}} row_bcast:31 row_mask:0xc bank_mask:0xf
|
; GFX9-DAG: v_mov_b32_dpp v[[SECOND_MOV:[0-9]+]], v{{[0-9]+}} row_bcast:31 row_mask:0xc bank_mask:0xf
|
||||||
; GFX9-DAG: v_add_u32_e32 v[[SECOND_ADD:[0-9]+]], v{{[0-9]+}}, v[[SECOND_MOV]]
|
; GFX9-O3-DAG: v_add_u32_e32 v[[SECOND_ADD:[0-9]+]], v{{[0-9]+}}, v[[SECOND_MOV]]
|
||||||
|
; GFX9-O0-DAG: v_add_u32_e64 v[[SECOND_ADD:[0-9]+]], v{{[0-9]+}}, v[[SECOND_MOV]]
|
||||||
; GFX9-DAG: v_mov_b32_e32 v[[SECOND:[0-9]+]], v[[SECOND_ADD]]
|
; GFX9-DAG: v_mov_b32_e32 v[[SECOND:[0-9]+]], v[[SECOND_ADD]]
|
||||||
%tmp135 = tail call i32 @llvm.amdgcn.update.dpp.i32(i32 0, i32 %tmp107, i32 323, i32 12, i32 15, i1 false)
|
%tmp135 = tail call i32 @llvm.amdgcn.update.dpp.i32(i32 0, i32 %tmp107, i32 323, i32 12, i32 15, i1 false)
|
||||||
%tmp136 = add i32 %tmp107, %tmp135
|
%tmp136 = add i32 %tmp107, %tmp135
|
||||||
@ -235,7 +244,8 @@ entry:
|
|||||||
%tmp105 = tail call i32 @llvm.amdgcn.set.inactive.i32(i32 %tmp102, i32 0)
|
%tmp105 = tail call i32 @llvm.amdgcn.set.inactive.i32(i32 %tmp102, i32 0)
|
||||||
|
|
||||||
; GFX9: v_mov_b32_dpp v[[FIRST_MOV:[0-9]+]], v{{[0-9]+}} row_bcast:31 row_mask:0xc bank_mask:0xf
|
; GFX9: v_mov_b32_dpp v[[FIRST_MOV:[0-9]+]], v{{[0-9]+}} row_bcast:31 row_mask:0xc bank_mask:0xf
|
||||||
; GFX9: v_add_u32_e32 v[[FIRST_ADD:[0-9]+]], v{{[0-9]+}}, v[[FIRST_MOV]]
|
; GFX9-O3: v_add_u32_e32 v[[FIRST_ADD:[0-9]+]], v{{[0-9]+}}, v[[FIRST_MOV]]
|
||||||
|
; GFX9-O0: v_add_u32_e64 v[[FIRST_ADD:[0-9]+]], v{{[0-9]+}}, v[[FIRST_MOV]]
|
||||||
; GFX9: v_mov_b32_e32 v[[FIRST:[0-9]+]], v[[FIRST_ADD]]
|
; GFX9: v_mov_b32_e32 v[[FIRST:[0-9]+]], v[[FIRST_ADD]]
|
||||||
; GFX9-O0: buffer_store_dword v[[FIRST]], off, s{{\[}}{{[0-9]+}}:{{[0-9]+}}{{\]}}, 0 offset:[[FIRST_IMM_OFFSET:[0-9]+]]
|
; GFX9-O0: buffer_store_dword v[[FIRST]], off, s{{\[}}{{[0-9]+}}:{{[0-9]+}}{{\]}}, 0 offset:[[FIRST_IMM_OFFSET:[0-9]+]]
|
||||||
%tmp120 = tail call i32 @llvm.amdgcn.update.dpp.i32(i32 0, i32 %tmp105, i32 323, i32 12, i32 15, i1 false)
|
%tmp120 = tail call i32 @llvm.amdgcn.update.dpp.i32(i32 0, i32 %tmp105, i32 323, i32 12, i32 15, i1 false)
|
||||||
@ -249,7 +259,8 @@ if:
|
|||||||
%tmp107 = tail call i32 @llvm.amdgcn.set.inactive.i32(i32 %tmp103, i32 0)
|
%tmp107 = tail call i32 @llvm.amdgcn.set.inactive.i32(i32 %tmp103, i32 0)
|
||||||
|
|
||||||
; GFX9: v_mov_b32_dpp v[[SECOND_MOV:[0-9]+]], v{{[0-9]+}} row_bcast:31 row_mask:0xc bank_mask:0xf
|
; GFX9: v_mov_b32_dpp v[[SECOND_MOV:[0-9]+]], v{{[0-9]+}} row_bcast:31 row_mask:0xc bank_mask:0xf
|
||||||
; GFX9: v_add_u32_e32 v[[SECOND_ADD:[0-9]+]], v{{[0-9]+}}, v[[SECOND_MOV]]
|
; GFX9-O3: v_add_u32_e32 v[[SECOND_ADD:[0-9]+]], v{{[0-9]+}}, v[[SECOND_MOV]]
|
||||||
|
; GFX9-O0: v_add_u32_e64 v[[SECOND_ADD:[0-9]+]], v{{[0-9]+}}, v[[SECOND_MOV]]
|
||||||
; GFX9: v_mov_b32_e32 v[[SECOND:[0-9]+]], v[[SECOND_ADD]]
|
; GFX9: v_mov_b32_e32 v[[SECOND:[0-9]+]], v[[SECOND_ADD]]
|
||||||
; GFX9-O0: buffer_store_dword v[[SECOND]], off, s{{\[}}{{[0-9]+}}:{{[0-9]+}}{{\]}}, 0 offset:[[SECOND_IMM_OFFSET:[0-9]+]]
|
; GFX9-O0: buffer_store_dword v[[SECOND]], off, s{{\[}}{{[0-9]+}}:{{[0-9]+}}{{\]}}, 0 offset:[[SECOND_IMM_OFFSET:[0-9]+]]
|
||||||
%tmp135 = tail call i32 @llvm.amdgcn.update.dpp.i32(i32 0, i32 %tmp107, i32 323, i32 12, i32 15, i1 false)
|
%tmp135 = tail call i32 @llvm.amdgcn.update.dpp.i32(i32 0, i32 %tmp107, i32 323, i32 12, i32 15, i1 false)
|
||||||
@ -274,11 +285,13 @@ merge:
|
|||||||
|
|
||||||
; GFX9-LABEL: {{^}}strict_wwm_called:
|
; GFX9-LABEL: {{^}}strict_wwm_called:
|
||||||
define hidden i32 @strict_wwm_called(i32 %a) noinline {
|
define hidden i32 @strict_wwm_called(i32 %a) noinline {
|
||||||
; GFX9: v_add_u32_e32 v1, v0, v0
|
; GFX9-O3: v_add_u32_e32 v1, v0, v0
|
||||||
|
; GFX9-O0: v_add_u32_e64 v1, v0, v0
|
||||||
%add = add i32 %a, %a
|
%add = add i32 %a, %a
|
||||||
; GFX9: v_mul_lo_u32 v0, v1, v0
|
; GFX9: v_mul_lo_u32 v0, v1, v0
|
||||||
%mul = mul i32 %add, %a
|
%mul = mul i32 %add, %a
|
||||||
; GFX9: v_sub_u32_e32 v0, v0, v1
|
; GFX9-O3: v_sub_u32_e32 v0, v0, v1
|
||||||
|
; GFX9-O0: v_sub_u32_e64 v0, v0, v1
|
||||||
%sub = sub i32 %mul, %add
|
%sub = sub i32 %mul, %add
|
||||||
ret i32 %sub
|
ret i32 %sub
|
||||||
}
|
}
|
||||||
@ -301,7 +314,8 @@ define amdgpu_kernel void @strict_wwm_call(<4 x i32> inreg %tmp14, i32 inreg %ar
|
|||||||
; GFX9: s_swappc_b64
|
; GFX9: s_swappc_b64
|
||||||
%tmp134 = call i32 @strict_wwm_called(i32 %tmp107)
|
%tmp134 = call i32 @strict_wwm_called(i32 %tmp107)
|
||||||
; GFX9: v_mov_b32_e32 v1, v0
|
; GFX9: v_mov_b32_e32 v1, v0
|
||||||
; GFX9: v_add_u32_e32 v1, v1, v2
|
; GFX9-O3: v_add_u32_e32 v1, v1, v2
|
||||||
|
; GFX9-O0: v_add_u32_e64 v1, v1, v2
|
||||||
%tmp136 = add i32 %tmp134, %tmp107
|
%tmp136 = add i32 %tmp134, %tmp107
|
||||||
%tmp137 = tail call i32 @llvm.amdgcn.strict.wwm.i32(i32 %tmp136)
|
%tmp137 = tail call i32 @llvm.amdgcn.strict.wwm.i32(i32 %tmp136)
|
||||||
; GFX9: buffer_store_dword v0
|
; GFX9: buffer_store_dword v0
|
||||||
|
Loading…
Reference in New Issue
Block a user