mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-26 04:32:44 +01:00
d49cb60862
Summary: This catches malformed mir files which specify alignment as log2 instead of pow2. See https://reviews.llvm.org/D65945 for reference, This is patch is part of a series to introduce an Alignment type. See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html See this patch for the introduction of the type: https://reviews.llvm.org/D64790 Reviewers: courbet Subscribers: MatzeB, qcolombet, dschuff, arsenm, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, s.egerton, pzheng, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D67433 llvm-svn: 371608
112 lines
3.2 KiB
YAML
112 lines
3.2 KiB
YAML
# RUN: llc -mtriple=x86_64-linux-gnu -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL
|
|
--- |
|
|
define <8 x i16> @test_mul_v8i16(<8 x i16> %arg1, <8 x i16> %arg2) #0 {
|
|
%ret = mul <8 x i16> %arg1, %arg2
|
|
ret <8 x i16> %ret
|
|
}
|
|
|
|
define <4 x i32> @test_mul_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) #0 {
|
|
%ret = mul <4 x i32> %arg1, %arg2
|
|
ret <4 x i32> %ret
|
|
}
|
|
|
|
define <2 x i64> @test_mul_v2i64(<2 x i64> %arg1, <2 x i64> %arg2) #1 {
|
|
%ret = mul <2 x i64> %arg1, %arg2
|
|
ret <2 x i64> %ret
|
|
}
|
|
|
|
attributes #0 = { "target-features"="+sse4.1" }
|
|
attributes #1 = { "target-features"="+sse4.1,+avx512vl,+avx512f,+avx512dq" }
|
|
|
|
...
|
|
---
|
|
name: test_mul_v8i16
|
|
# ALL-LABEL: name: test_mul_v8i16
|
|
alignment: 16
|
|
legalized: false
|
|
regBankSelected: false
|
|
# ALL: registers:
|
|
# ALL-NEXT: - { id: 0, class: _, preferred-register: '' }
|
|
# ALL-NEXT: - { id: 1, class: _, preferred-register: '' }
|
|
# ALL-NEXT: - { id: 2, class: _, preferred-register: '' }
|
|
registers:
|
|
- { id: 0, class: _ }
|
|
- { id: 1, class: _ }
|
|
- { id: 2, class: _ }
|
|
# ALL: %0:_(<8 x s16>) = COPY $xmm0
|
|
# ALL-NEXT: %1:_(<8 x s16>) = COPY $xmm1
|
|
# ALL-NEXT: %2:_(<8 x s16>) = G_MUL %0, %1
|
|
# ALL-NEXT: $xmm0 = COPY %2(<8 x s16>)
|
|
# ALL-NEXT: RET 0, implicit $xmm0
|
|
body: |
|
|
bb.1 (%ir-block.0):
|
|
liveins: $xmm0, $xmm1
|
|
|
|
%0(<8 x s16>) = COPY $xmm0
|
|
%1(<8 x s16>) = COPY $xmm1
|
|
%2(<8 x s16>) = G_MUL %0, %1
|
|
$xmm0 = COPY %2(<8 x s16>)
|
|
RET 0, implicit $xmm0
|
|
|
|
...
|
|
---
|
|
name: test_mul_v4i32
|
|
# ALL-LABEL: name: test_mul_v4i32
|
|
alignment: 16
|
|
legalized: false
|
|
regBankSelected: false
|
|
# ALL: registers:
|
|
# ALL-NEXT: - { id: 0, class: _, preferred-register: '' }
|
|
# ALL-NEXT: - { id: 1, class: _, preferred-register: '' }
|
|
# ALL-NEXT: - { id: 2, class: _, preferred-register: '' }
|
|
registers:
|
|
- { id: 0, class: _ }
|
|
- { id: 1, class: _ }
|
|
- { id: 2, class: _ }
|
|
# ALL: %0:_(<4 x s32>) = COPY $xmm0
|
|
# ALL-NEXT: %1:_(<4 x s32>) = COPY $xmm1
|
|
# ALL-NEXT: %2:_(<4 x s32>) = G_MUL %0, %1
|
|
# ALL-NEXT: $xmm0 = COPY %2(<4 x s32>)
|
|
# ALL-NEXT: RET 0, implicit $xmm0
|
|
body: |
|
|
bb.1 (%ir-block.0):
|
|
liveins: $xmm0, $xmm1
|
|
|
|
%0(<4 x s32>) = COPY $xmm0
|
|
%1(<4 x s32>) = COPY $xmm1
|
|
%2(<4 x s32>) = G_MUL %0, %1
|
|
$xmm0 = COPY %2(<4 x s32>)
|
|
RET 0, implicit $xmm0
|
|
|
|
...
|
|
---
|
|
name: test_mul_v2i64
|
|
# ALL-LABEL: name: test_mul_v2i64
|
|
alignment: 16
|
|
legalized: false
|
|
regBankSelected: false
|
|
# ALL: registers:
|
|
# ALL-NEXT: - { id: 0, class: _, preferred-register: '' }
|
|
# ALL-NEXT: - { id: 1, class: _, preferred-register: '' }
|
|
# ALL-NEXT: - { id: 2, class: _, preferred-register: '' }
|
|
registers:
|
|
- { id: 0, class: _ }
|
|
- { id: 1, class: _ }
|
|
- { id: 2, class: _ }
|
|
# ALL: %0:_(<2 x s64>) = COPY $xmm0
|
|
# ALL-NEXT: %1:_(<2 x s64>) = COPY $xmm1
|
|
# ALL-NEXT: %2:_(<2 x s64>) = G_MUL %0, %1
|
|
# ALL-NEXT: $xmm0 = COPY %2(<2 x s64>)
|
|
# ALL-NEXT: RET 0, implicit $xmm0
|
|
body: |
|
|
bb.1 (%ir-block.0):
|
|
liveins: $xmm0, $xmm1
|
|
|
|
%0(<2 x s64>) = COPY $xmm0
|
|
%1(<2 x s64>) = COPY $xmm1
|
|
%2(<2 x s64>) = G_MUL %0, %1
|
|
$xmm0 = COPY %2(<2 x s64>)
|
|
RET 0, implicit $xmm0
|
|
|
|
...
|