mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-26 12:43:36 +01:00
16a6bb6581
Adds legalizer, register bank select, and instruction select support for G_SBFX and G_UBFX. These opcodes generate scalar or vector ALU bitfield extract instructions for AMDGPU. The instructions allow both constant or register values for the offset and width operands. The 32-bit scalar version is expanded to a sequence that combines the offset and width into a single register. There are no 64-bit vgpr bitfield extract instructions, so the operations are expanded to a sequence of instructions that implement the operation. If the width is a constant, then the 32-bit bitfield extract instructions are used. Moved the AArch64 specific code for creating G_SBFX to CombinerHelper.cpp so that it can be used by other targets. Only bitfield extracts with constant offset and width values are handled currently. Differential Revision: https://reviews.llvm.org/D100149
152 lines
4.6 KiB
YAML
152 lines
4.6 KiB
YAML
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
|
|
# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -run-pass=amdgpu-postlegalizer-combiner -verify-machineinstrs %s -o - | FileCheck --check-prefix=GCN %s
|
|
# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -run-pass=amdgpu-postlegalizer-combiner -verify-machineinstrs %s -o - | FileCheck --check-prefix=GCN %s
|
|
|
|
---
|
|
name: bfe_sext_inreg_ashr_s32
|
|
legalized: true
|
|
tracksRegLiveness: true
|
|
|
|
body: |
|
|
bb.0.entry:
|
|
liveins: $vgpr0
|
|
|
|
; GCN-LABEL: name: bfe_sext_inreg_ashr_s32
|
|
; GCN: liveins: $vgpr0
|
|
; GCN: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
|
|
; GCN: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
|
|
; GCN: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
|
; GCN: [[SBFX:%[0-9]+]]:_(s32) = G_SBFX [[COPY]], [[C]](s32), [[C1]]
|
|
; GCN: $vgpr0 = COPY [[SBFX]](s32)
|
|
%0:_(s32) = COPY $vgpr0
|
|
%1:_(s32) = G_CONSTANT i32 4
|
|
%2:_(s32) = G_ASHR %0, %1(s32)
|
|
%3:_(s32) = COPY %2(s32)
|
|
%4:_(s32) = G_SEXT_INREG %3, 16
|
|
$vgpr0 = COPY %4(s32)
|
|
...
|
|
|
|
---
|
|
name: bfe_sext_inreg_lshr_s32
|
|
legalized: true
|
|
tracksRegLiveness: true
|
|
|
|
body: |
|
|
bb.0.entry:
|
|
liveins: $vgpr0
|
|
|
|
; GCN-LABEL: name: bfe_sext_inreg_lshr_s32
|
|
; GCN: liveins: $vgpr0
|
|
; GCN: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
|
|
; GCN: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
|
|
; GCN: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
|
; GCN: [[SBFX:%[0-9]+]]:_(s32) = G_SBFX [[COPY]], [[C]](s32), [[C1]]
|
|
; GCN: $vgpr0 = COPY [[SBFX]](s32)
|
|
%0:_(s32) = COPY $vgpr0
|
|
%1:_(s32) = G_CONSTANT i32 4
|
|
%2:_(s32) = G_LSHR %0, %1(s32)
|
|
%3:_(s32) = COPY %2(s32)
|
|
%4:_(s32) = G_SEXT_INREG %3, 16
|
|
$vgpr0 = COPY %4(s32)
|
|
...
|
|
|
|
---
|
|
name: bfe_sext_inreg_ashr_s64
|
|
legalized: true
|
|
tracksRegLiveness: true
|
|
|
|
body: |
|
|
bb.0.entry:
|
|
liveins: $vgpr0_vgpr1
|
|
|
|
; GCN-LABEL: name: bfe_sext_inreg_ashr_s64
|
|
; GCN: liveins: $vgpr0_vgpr1
|
|
; GCN: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
|
|
; GCN: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
|
|
; GCN: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
|
; GCN: [[SBFX:%[0-9]+]]:_(s64) = G_SBFX [[COPY]], [[C]](s32), [[C1]]
|
|
; GCN: $vgpr0_vgpr1 = COPY [[SBFX]](s64)
|
|
%0:_(s64) = COPY $vgpr0_vgpr1
|
|
%1:_(s32) = G_CONSTANT i32 4
|
|
%2:_(s64) = G_ASHR %0, %1(s32)
|
|
%3:_(s64) = COPY %2(s64)
|
|
%4:_(s64) = G_SEXT_INREG %3, 16
|
|
$vgpr0_vgpr1 = COPY %4(s64)
|
|
...
|
|
|
|
---
|
|
name: toobig_sext_inreg_ashr_s32
|
|
legalized: true
|
|
tracksRegLiveness: true
|
|
|
|
body: |
|
|
bb.0.entry:
|
|
liveins: $vgpr0
|
|
|
|
; GCN-LABEL: name: toobig_sext_inreg_ashr_s32
|
|
; GCN: liveins: $vgpr0
|
|
; GCN: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
|
|
; GCN: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
|
; GCN: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[COPY]], [[C]](s32)
|
|
; GCN: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[ASHR]], 20
|
|
; GCN: $vgpr0 = COPY [[SEXT_INREG]](s32)
|
|
%0:_(s32) = COPY $vgpr0
|
|
%1:_(s32) = G_CONSTANT i32 16
|
|
%2:_(s32) = G_ASHR %0, %1(s32)
|
|
%3:_(s32) = COPY %2(s32)
|
|
%4:_(s32) = G_SEXT_INREG %3, 20
|
|
$vgpr0 = COPY %4(s32)
|
|
...
|
|
|
|
---
|
|
name: toobig_sext_inreg_ashr_s64
|
|
legalized: true
|
|
tracksRegLiveness: true
|
|
|
|
body: |
|
|
bb.0.entry:
|
|
liveins: $vgpr0_vgpr1
|
|
|
|
; GCN-LABEL: name: toobig_sext_inreg_ashr_s64
|
|
; GCN: liveins: $vgpr0_vgpr1
|
|
; GCN: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
|
|
; GCN: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
|
|
; GCN: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
|
|
; GCN: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[UV1]], [[C]](s32)
|
|
; GCN: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
|
; GCN: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[UV1]], [[C1]](s32)
|
|
; GCN: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[ASHR1]](s32), [[ASHR]](s32)
|
|
; GCN: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[MV]], 32
|
|
; GCN: $vgpr0_vgpr1 = COPY [[SEXT_INREG]](s64)
|
|
%0:_(s64) = COPY $vgpr0_vgpr1
|
|
%1:_(s32) = G_CONSTANT i32 40
|
|
%2:_(s64) = G_ASHR %0, %1(s32)
|
|
%3:_(s64) = COPY %2(s64)
|
|
%4:_(s64) = G_SEXT_INREG %3, 32
|
|
$vgpr0_vgpr1 = COPY %4(s64)
|
|
...
|
|
|
|
---
|
|
name: var_sext_inreg_ashr_s32
|
|
legalized: true
|
|
tracksRegLiveness: true
|
|
|
|
body: |
|
|
bb.0.entry:
|
|
liveins: $vgpr0, $vgpr1
|
|
|
|
; GCN-LABEL: name: var_sext_inreg_ashr_s32
|
|
; GCN: liveins: $vgpr0, $vgpr1
|
|
; GCN: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
|
|
; GCN: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
|
|
; GCN: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[COPY]], [[COPY1]](s32)
|
|
; GCN: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[ASHR]], 10
|
|
; GCN: $vgpr0 = COPY [[SEXT_INREG]](s32)
|
|
%0:_(s32) = COPY $vgpr0
|
|
%1:_(s32) = COPY $vgpr1
|
|
%2:_(s32) = G_ASHR %0, %1(s32)
|
|
%3:_(s32) = COPY %2(s32)
|
|
%4:_(s32) = G_SEXT_INREG %3, 10
|
|
$vgpr0 = COPY %4(s32)
|
|
...
|