mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-26 04:32:44 +01:00
95cb757dac
This patch adds support for S_ANDN2, S_ORN2 32-bit and 64-bit instructions and adds splits to move them to the vector unit (for which there is no equivalent instruction). It modifies the way that the more complex scalar instructions are lowered to vector instructions by first breaking them down to sequences of simpler scalar instructions which are then lowered through the existing code paths. The pattern for S_XNOR has also been updated to apply inversion to one input rather than the output of the XOR as the result is equivalent and may allow leaving the NOT instruction on the scalar unit. A new tests for NAND, NOR, ANDN2 and ORN2 have been added, and existing tests now hit the new instructions (and have been modified accordingly). Differential: https://reviews.llvm.org/D54714 llvm-svn: 347877
84 lines
2.3 KiB
LLVM
84 lines
2.3 KiB
LLVM
; RUN: llc -march=amdgcn -mcpu=gfx600 -verify-machineinstrs < %s | FileCheck --check-prefix=GCN --check-prefix=GFX600 %s
|
|
; RUN: llc -march=amdgcn -mcpu=gfx700 -verify-machineinstrs < %s | FileCheck --check-prefix=GCN --check-prefix=GFX700 %s
|
|
; RUN: llc -march=amdgcn -mcpu=gfx801 -verify-machineinstrs < %s | FileCheck --check-prefix=GCN --check-prefix=GFX801 %s
|
|
; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck --check-prefix=GCN --check-prefix=GFX900 %s
|
|
|
|
; GCN-LABEL: {{^}}scalar_nand_i32_one_use
|
|
; GCN: s_nand_b32
|
|
define amdgpu_kernel void @scalar_nand_i32_one_use(
|
|
i32 addrspace(1)* %r0, i32 %a, i32 %b) {
|
|
entry:
|
|
%and = and i32 %a, %b
|
|
%r0.val = xor i32 %and, -1
|
|
store i32 %r0.val, i32 addrspace(1)* %r0
|
|
ret void
|
|
}
|
|
|
|
; GCN-LABEL: {{^}}scalar_nand_i32_mul_use
|
|
; GCN-NOT: s_nand_b32
|
|
; GCN: s_and_b32
|
|
; GCN: s_not_b32
|
|
; GCN: s_add_i32
|
|
define amdgpu_kernel void @scalar_nand_i32_mul_use(
|
|
i32 addrspace(1)* %r0, i32 addrspace(1)* %r1, i32 %a, i32 %b) {
|
|
entry:
|
|
%and = and i32 %a, %b
|
|
%r0.val = xor i32 %and, -1
|
|
%r1.val = add i32 %and, %a
|
|
store i32 %r0.val, i32 addrspace(1)* %r0
|
|
store i32 %r1.val, i32 addrspace(1)* %r1
|
|
ret void
|
|
}
|
|
|
|
; GCN-LABEL: {{^}}scalar_nand_i64_one_use
|
|
; GCN: s_nand_b64
|
|
define amdgpu_kernel void @scalar_nand_i64_one_use(
|
|
i64 addrspace(1)* %r0, i64 %a, i64 %b) {
|
|
entry:
|
|
%and = and i64 %a, %b
|
|
%r0.val = xor i64 %and, -1
|
|
store i64 %r0.val, i64 addrspace(1)* %r0
|
|
ret void
|
|
}
|
|
|
|
; GCN-LABEL: {{^}}scalar_nand_i64_mul_use
|
|
; GCN-NOT: s_nand_b64
|
|
; GCN: s_and_b64
|
|
; GCN: s_not_b64
|
|
; GCN: s_add_u32
|
|
; GCN: s_addc_u32
|
|
define amdgpu_kernel void @scalar_nand_i64_mul_use(
|
|
i64 addrspace(1)* %r0, i64 addrspace(1)* %r1, i64 %a, i64 %b) {
|
|
entry:
|
|
%and = and i64 %a, %b
|
|
%r0.val = xor i64 %and, -1
|
|
%r1.val = add i64 %and, %a
|
|
store i64 %r0.val, i64 addrspace(1)* %r0
|
|
store i64 %r1.val, i64 addrspace(1)* %r1
|
|
ret void
|
|
}
|
|
|
|
; GCN-LABEL: {{^}}vector_nand_i32_one_use
|
|
; GCN-NOT: s_nand_b32
|
|
; GCN: v_and_b32
|
|
; GCN: v_not_b32
|
|
define i32 @vector_nand_i32_one_use(i32 %a, i32 %b) {
|
|
entry:
|
|
%and = and i32 %a, %b
|
|
%r = xor i32 %and, -1
|
|
ret i32 %r
|
|
}
|
|
|
|
; GCN-LABEL: {{^}}vector_nand_i64_one_use
|
|
; GCN-NOT: s_nand_b64
|
|
; GCN: v_and_b32
|
|
; GCN: v_and_b32
|
|
; GCN: v_not_b32
|
|
; GCN: v_not_b32
|
|
define i64 @vector_nand_i64_one_use(i64 %a, i64 %b) {
|
|
entry:
|
|
%and = and i64 %a, %b
|
|
%r = xor i64 %and, -1
|
|
ret i64 %r
|
|
}
|