1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-25 12:12:47 +01:00
llvm-mirror/test/CodeGen/X86/GlobalISel/select-sub-v512.mir
Guillaume Chatelet d49cb60862 [Alignment] Use llvm::Align in MachineFunction and TargetLowering - fixes mir parsing
Summary:
This catches malformed mir files which specify alignment as log2 instead of pow2.
See https://reviews.llvm.org/D65945 for reference,

This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790

Reviewers: courbet

Subscribers: MatzeB, qcolombet, dschuff, arsenm, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, s.egerton, pzheng, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D67433

llvm-svn: 371608
2019-09-11 11:16:48 +00:00

132 lines
3.6 KiB
YAML

# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=x86_64-linux-gnu -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL
--- |
define <64 x i8> @test_sub_v64i8(<64 x i8> %arg1, <64 x i8> %arg2) #0 {
%ret = sub <64 x i8> %arg1, %arg2
ret <64 x i8> %ret
}
define <32 x i16> @test_sub_v32i16(<32 x i16> %arg1, <32 x i16> %arg2) #0 {
%ret = sub <32 x i16> %arg1, %arg2
ret <32 x i16> %ret
}
define <16 x i32> @test_sub_v16i32(<16 x i32> %arg1, <16 x i32> %arg2) #1 {
%ret = sub <16 x i32> %arg1, %arg2
ret <16 x i32> %ret
}
define <8 x i64> @test_sub_v8i64(<8 x i64> %arg1, <8 x i64> %arg2) #1 {
%ret = sub <8 x i64> %arg1, %arg2
ret <8 x i64> %ret
}
attributes #0 = { "target-features"="+avx512f,+avx512bw" }
attributes #1 = { "target-features"="+avx512f" }
...
---
name: test_sub_v64i8
alignment: 16
legalized: true
regBankSelected: true
registers:
- { id: 0, class: vecr }
- { id: 1, class: vecr }
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_sub_v64i8
; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1
; ALL: [[VPSUBBZrr:%[0-9]+]]:vr512 = VPSUBBZrr [[COPY]], [[COPY1]]
; ALL: $zmm0 = COPY [[VPSUBBZrr]]
; ALL: RET 0, implicit $zmm0
%0(<64 x s8>) = COPY $zmm0
%1(<64 x s8>) = COPY $zmm1
%2(<64 x s8>) = G_SUB %0, %1
$zmm0 = COPY %2(<64 x s8>)
RET 0, implicit $zmm0
...
---
name: test_sub_v32i16
alignment: 16
legalized: true
regBankSelected: true
registers:
- { id: 0, class: vecr }
- { id: 1, class: vecr }
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_sub_v32i16
; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1
; ALL: [[VPSUBWZrr:%[0-9]+]]:vr512 = VPSUBWZrr [[COPY]], [[COPY1]]
; ALL: $zmm0 = COPY [[VPSUBWZrr]]
; ALL: RET 0, implicit $zmm0
%0(<32 x s16>) = COPY $zmm0
%1(<32 x s16>) = COPY $zmm1
%2(<32 x s16>) = G_SUB %0, %1
$zmm0 = COPY %2(<32 x s16>)
RET 0, implicit $zmm0
...
---
name: test_sub_v16i32
alignment: 16
legalized: true
regBankSelected: true
registers:
- { id: 0, class: vecr }
- { id: 1, class: vecr }
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_sub_v16i32
; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1
; ALL: [[VPSUBDZrr:%[0-9]+]]:vr512 = VPSUBDZrr [[COPY]], [[COPY1]]
; ALL: $zmm0 = COPY [[VPSUBDZrr]]
; ALL: RET 0, implicit $zmm0
%0(<16 x s32>) = COPY $zmm0
%1(<16 x s32>) = COPY $zmm1
%2(<16 x s32>) = G_SUB %0, %1
$zmm0 = COPY %2(<16 x s32>)
RET 0, implicit $zmm0
...
---
name: test_sub_v8i64
alignment: 16
legalized: true
regBankSelected: true
registers:
- { id: 0, class: vecr }
- { id: 1, class: vecr }
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_sub_v8i64
; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1
; ALL: [[VPSUBQZrr:%[0-9]+]]:vr512 = VPSUBQZrr [[COPY]], [[COPY1]]
; ALL: $zmm0 = COPY [[VPSUBQZrr]]
; ALL: RET 0, implicit $zmm0
%0(<8 x s64>) = COPY $zmm0
%1(<8 x s64>) = COPY $zmm1
%2(<8 x s64>) = G_SUB %0, %1
$zmm0 = COPY %2(<8 x s64>)
RET 0, implicit $zmm0
...