1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 20:51:52 +01:00
llvm-mirror/test/CodeGen/RISCV/rvv/vmxor-rv32.ll
Hsiangkai Wang c0d659ef28 [RISCV] Use v8-v23 as argument registers to conform to the proposal.
The maximum LMUL is 8. We need 16 vector registers for two LMUL-8
arguments. The modification follows the proposal of psABI in
https://github.com/riscv/riscv-elf-psabi-doc/pull/171

Differential Revision: https://reviews.llvm.org/D95134
2021-01-22 07:55:24 +08:00

143 lines
4.1 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i1> @llvm.riscv.vmxor.nxv1i1(
<vscale x 1 x i1>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i1> @intrinsic_vmxor_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmxor_mm_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu
; CHECK-NEXT: vmxor.mm v0, v0, v8
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmxor.nxv1i1(
<vscale x 1 x i1> %0,
<vscale x 1 x i1> %1,
i32 %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmxor.nxv2i1(
<vscale x 2 x i1>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i1> @intrinsic_vmxor_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmxor_mm_nxv2i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu
; CHECK-NEXT: vmxor.mm v0, v0, v8
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmxor.nxv2i1(
<vscale x 2 x i1> %0,
<vscale x 2 x i1> %1,
i32 %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmxor.nxv4i1(
<vscale x 4 x i1>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i1> @intrinsic_vmxor_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmxor_mm_nxv4i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu
; CHECK-NEXT: vmxor.mm v0, v0, v8
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmxor.nxv4i1(
<vscale x 4 x i1> %0,
<vscale x 4 x i1> %1,
i32 %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmxor.nxv8i1(
<vscale x 8 x i1>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i1> @intrinsic_vmxor_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmxor_mm_nxv8i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
; CHECK-NEXT: vmxor.mm v0, v0, v8
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmxor.nxv8i1(
<vscale x 8 x i1> %0,
<vscale x 8 x i1> %1,
i32 %2)
ret <vscale x 8 x i1> %a
}
declare <vscale x 16 x i1> @llvm.riscv.vmxor.nxv16i1(
<vscale x 16 x i1>,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i1> @intrinsic_vmxor_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmxor_mm_nxv16i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu
; CHECK-NEXT: vmxor.mm v0, v0, v8
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmxor.nxv16i1(
<vscale x 16 x i1> %0,
<vscale x 16 x i1> %1,
i32 %2)
ret <vscale x 16 x i1> %a
}
declare <vscale x 32 x i1> @llvm.riscv.vmxor.nxv32i1(
<vscale x 32 x i1>,
<vscale x 32 x i1>,
i32);
define <vscale x 32 x i1> @intrinsic_vmxor_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmxor_mm_nxv32i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu
; CHECK-NEXT: vmxor.mm v0, v0, v8
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 32 x i1> @llvm.riscv.vmxor.nxv32i1(
<vscale x 32 x i1> %0,
<vscale x 32 x i1> %1,
i32 %2)
ret <vscale x 32 x i1> %a
}
declare <vscale x 64 x i1> @llvm.riscv.vmxor.nxv64i1(
<vscale x 64 x i1>,
<vscale x 64 x i1>,
i32);
define <vscale x 64 x i1> @intrinsic_vmxor_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmxor_mm_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu
; CHECK-NEXT: vmxor.mm v0, v0, v8
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 64 x i1> @llvm.riscv.vmxor.nxv64i1(
<vscale x 64 x i1> %0,
<vscale x 64 x i1> %1,
i32 %2)
ret <vscale x 64 x i1> %a
}