mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-23 11:13:28 +01:00
d49cb60862
Summary: This catches malformed mir files which specify alignment as log2 instead of pow2. See https://reviews.llvm.org/D65945 for reference, This is patch is part of a series to introduce an Alignment type. See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html See this patch for the introduction of the type: https://reviews.llvm.org/D64790 Reviewers: courbet Subscribers: MatzeB, qcolombet, dschuff, arsenm, sdardis, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, s.egerton, pzheng, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D67433 llvm-svn: 371608
307 lines
11 KiB
YAML
307 lines
11 KiB
YAML
# RUN: llc -run-pass machine-cse -verify-machineinstrs -mtriple aarch64-apple-ios %s -o - | FileCheck %s
|
|
---
|
|
name: irtranslated
|
|
legalized: false
|
|
regBankSelected: false
|
|
selected: false
|
|
body: |
|
|
; CHECK-LABEL: name: irtranslated
|
|
; CHECK: %[[ONE:[0-9]+]]:_(s32) = G_CONSTANT i32 1
|
|
; CHECK-NEXT: %[[TWO:[0-9]+]]:_(s32) = G_ADD %[[ONE]], %[[ONE]]
|
|
; CHECK-NEXT: %[[SUM:[0-9]+]]:_(s32) = G_ADD %[[TWO]], %[[TWO]]
|
|
; CHECK-NEXT: $[[RET:[wx][0-9]+]] = COPY %[[SUM]](s32)
|
|
; CHECK-NEXT: RET_ReallyLR implicit $[[RET]]
|
|
bb.0:
|
|
%0:_(s32) = G_CONSTANT i32 1
|
|
%1:_(s32) = G_ADD %0, %0
|
|
%2:_(s32) = G_ADD %0, %0
|
|
%3:_(s32) = G_ADD %1, %2
|
|
$w0 = COPY %3(s32)
|
|
RET_ReallyLR implicit $w0
|
|
...
|
|
---
|
|
name: regbankselected
|
|
legalized: true
|
|
regBankSelected: true
|
|
selected: false
|
|
body: |
|
|
; CHECK-LABEL: name: regbankselected
|
|
; CHECK: %[[ONE:[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
|
|
; CHECK-NEXT: %[[TWO:[0-9]+]]:gpr(s32) = G_ADD %[[ONE]], %[[ONE]]
|
|
; CHECK-NEXT: %[[SUM:[0-9]+]]:gpr(s32) = G_ADD %[[TWO]], %[[TWO]]
|
|
; CHECK-NEXT: $[[RET:[wx][0-9]+]] = COPY %[[SUM]](s32)
|
|
; CHECK-NEXT: RET_ReallyLR implicit $[[RET]]
|
|
bb.0:
|
|
%0:gpr(s32) = G_CONSTANT i32 1
|
|
%1:gpr(s32) = G_ADD %0, %0
|
|
%2:gpr(s32) = G_ADD %0, %0
|
|
%3:gpr(s32) = G_ADD %1, %2
|
|
$w0 = COPY %3(s32)
|
|
RET_ReallyLR implicit $w0
|
|
...
|
|
---
|
|
name: legalized
|
|
legalized: true
|
|
regBankSelected: false
|
|
selected: false
|
|
body: |
|
|
; CHECK-LABEL: name: legalized
|
|
; CHECK: %[[ONE:[0-9]+]]:_(s32) = G_CONSTANT i32 1
|
|
; CHECK-NEXT: %[[TWO:[0-9]+]]:gpr(s32) = G_ADD %[[ONE]], %[[ONE]]
|
|
; CHECK-NEXT: %[[SUM:[0-9]+]]:_(s32) = G_ADD %[[TWO]], %[[TWO]]
|
|
; CHECK-NEXT: $[[RET:[wx][0-9]+]] = COPY %[[SUM]](s32)
|
|
; CHECK-NEXT: RET_ReallyLR implicit $[[RET]]
|
|
bb.0:
|
|
%0:_(s32) = G_CONSTANT i32 1
|
|
%1:_(s32) = G_ADD %0, %0
|
|
%2:gpr(s32) = G_ADD %0, %0
|
|
%3:_(s32) = G_ADD %1, %2
|
|
$w0 = COPY %3(s32)
|
|
RET_ReallyLR implicit $w0
|
|
...
|
|
---
|
|
name: legalized_sym
|
|
legalized: true
|
|
regBankSelected: false
|
|
selected: false
|
|
body: |
|
|
; CHECK-LABEL: name: legalized_sym
|
|
; CHECK: %[[ONE:[0-9]+]]:_(s32) = G_CONSTANT i32 1
|
|
; CHECK-NEXT: %[[TWO:[0-9]+]]:gpr(s32) = G_ADD %[[ONE]], %[[ONE]]
|
|
; CHECK-NEXT: %[[SUM:[0-9]+]]:_(s32) = G_ADD %[[TWO]], %[[TWO]]
|
|
; CHECK-NEXT: $[[RET:[wx][0-9]+]] = COPY %[[SUM]](s32)
|
|
; CHECK-NEXT: RET_ReallyLR implicit $[[RET]]
|
|
bb.0:
|
|
%0:_(s32) = G_CONSTANT i32 1
|
|
%1:gpr(s32) = G_ADD %0, %0
|
|
%2:_(s32) = G_ADD %0, %0
|
|
%3:_(s32) = G_ADD %1, %2
|
|
$w0 = COPY %3(s32)
|
|
RET_ReallyLR implicit $w0
|
|
...
|
|
---
|
|
name: int_extensions
|
|
alignment: 4
|
|
legalized: false
|
|
regBankSelected: false
|
|
selected: false
|
|
body: |
|
|
; CHECK-LABEL: name: int_extensions
|
|
; CHECK: %[[ONE:[0-9]+]]:_(s8) = G_CONSTANT i8 1
|
|
; CHECK-NEXT: %[[S16:[0-9]+]]:_(s16) = G_SEXT %[[ONE]](s8)
|
|
; CHECK-NEXT: %[[S32:[0-9]+]]:_(s32) = G_SEXT %[[ONE]](s8)
|
|
; CHECK-NEXT: %[[S16_Z64:[0-9]+]]:_(s64) = G_ZEXT %[[S16]](s16)
|
|
; CHECK-NEXT: %[[S32_Z64:[0-9]+]]:_(s64) = G_ZEXT %[[S32]](s32)
|
|
; CHECK-NEXT: %[[SUM:[0-9]+]]:_(s64) = G_ADD %[[S16_Z64]], %[[S32_Z64]]
|
|
; CHECK-NEXT: $[[RET:[wx][0-9]+]] = COPY %[[SUM]](s64)
|
|
; CHECK-NEXT: RET_ReallyLR implicit $[[RET]]
|
|
bb.0.entry:
|
|
%0:_(s8) = G_CONSTANT i8 1
|
|
%1:_(s16) = G_SEXT %0(s8)
|
|
%2:_(s32) = G_SEXT %0(s8)
|
|
%3:_(s64) = G_ZEXT %1(s16)
|
|
%4:_(s64) = G_ZEXT %2(s32)
|
|
%5:_(s64) = G_ADD %3, %4
|
|
$x0 = COPY %5(s64)
|
|
RET_ReallyLR implicit $x0
|
|
...
|
|
---
|
|
name: generic
|
|
legalized: true
|
|
regBankSelected: false
|
|
selected: false
|
|
body: |
|
|
; CHECK-LABEL: name: generic
|
|
; CHECK: %[[SG:[0-9]+]]:_(s32) = G_ADD %{{[0-9]+}}, %{{[0-9]+}}
|
|
; CHECK-NEXT: %{{[0-9]+}}:_(s32) = G_ADD %[[SG]], %[[SG]]
|
|
bb.0:
|
|
%0:_(s32) = COPY $w0
|
|
%1:_(s32) = COPY $w1
|
|
%2:_(s32) = G_ADD %0, %1
|
|
%3:_(s32) = COPY %2(s32)
|
|
%4:_(s32) = G_ADD %3, %3
|
|
$w0 = COPY %4(s32)
|
|
RET_ReallyLR implicit $w0
|
|
...
|
|
---
|
|
name: generic_to_concrete_copy
|
|
legalized: true
|
|
regBankSelected: false
|
|
selected: false
|
|
body: |
|
|
; CHECK-LABEL: name: generic_to_concrete_copy
|
|
; CHECK: %[[S1:[0-9]+]]:gpr32(s32) = G_ADD %{{[0-9]+}}, %{{[0-9]+}}
|
|
; CHECK-NEXT: %{{[0-9]+}}:gpr32 = ADDWrr %[[S1]](s32), %[[S1]](s32)
|
|
bb.0:
|
|
%0:_(s32) = COPY $w0
|
|
%1:_(s32) = COPY $w1
|
|
%2:_(s32) = G_ADD %0, %1
|
|
%3:gpr32 = COPY %2(s32)
|
|
%4:gpr32 = ADDWrr %3, %3
|
|
$w0 = COPY %4
|
|
RET_ReallyLR implicit $w0
|
|
...
|
|
---
|
|
name: concrete_to_generic_copy
|
|
legalized: true
|
|
regBankSelected: false
|
|
selected: false
|
|
body: |
|
|
; CHECK-LABEL: name: concrete_to_generic_copy
|
|
; CHECK: %[[S1:[0-9]+]]:gpr32(s32) = ADDWrr %{{[0-9]+}}, %{{[0-9]+}}
|
|
; CHECK-NEXT: %{{[0-9]+}}:_(s32) = G_ADD %[[S1]], %[[S1]]
|
|
bb.0:
|
|
%0:gpr32 = COPY $w0
|
|
%1:gpr32 = COPY $w1
|
|
%2:gpr32 = ADDWrr %0, %1
|
|
%3:_(s32) = COPY %2
|
|
%4:_(s32) = G_ADD %3, %3
|
|
$w0 = COPY %4(s32)
|
|
RET_ReallyLR implicit $w0
|
|
...
|
|
---
|
|
name: concrete
|
|
legalized: true
|
|
regBankSelected: false
|
|
selected: false
|
|
body: |
|
|
; CHECK-LABEL: name: concrete
|
|
; CHECK: %[[SC:[0-9]+]]:gpr32 = ADDWrr %{{[0-9]+}}, %{{[0-9]+}}
|
|
; CHECK-NEXT: %{{[0-9]+}}:gpr32 = ADDWrr %[[SC]], %[[SC]]
|
|
bb.0:
|
|
%0:gpr32 = COPY $w0
|
|
%1:gpr32 = COPY $w1
|
|
%2:gpr32 = ADDWrr %0, %1
|
|
%3:gpr32 = COPY %2
|
|
%4:gpr32 = ADDWrr %3, %3
|
|
$w0 = COPY %4
|
|
RET_ReallyLR implicit $w0
|
|
...
|
|
---
|
|
name: variadic_defs_unmerge_vector
|
|
legalized: true
|
|
regBankSelected: false
|
|
selected: false
|
|
body: |
|
|
; CHECK-LABEL: name: variadic_defs_unmerge_vector
|
|
; CHECK: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $d0
|
|
; CHECK-NEXT: [[UV0:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
|
|
; CHECK-NEXT: [[ANYEXT0:%[0-9]+]]:_(s32) = G_ANYEXT [[UV0]](s16)
|
|
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s16)
|
|
; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s16)
|
|
; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s16)
|
|
; CHECK-NEXT: [[ADD0:%[0-9]+]]:_(s32) = G_ADD [[ANYEXT0]], [[ANYEXT1]]
|
|
; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ANYEXT2]], [[ANYEXT3]]
|
|
; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD0]], [[ADD1]]
|
|
; CHECK-NEXT: $w0 = COPY [[ADD2]](s32)
|
|
; CHECK-NEXT: RET_ReallyLR implicit $w0
|
|
bb.0:
|
|
%0 :_(<4 x s16>) = COPY $d0
|
|
%1 :_(s16), %2 :_(s16), %3 :_(s16), %4 :_(s16) = G_UNMERGE_VALUES %0(<4 x s16>)
|
|
%5 :_(s16), %6 :_(s16), %7 :_(s16), %8 :_(s16) = G_UNMERGE_VALUES %0(<4 x s16>)
|
|
%9 :_(s16), %10:_(s16), %11:_(s16), %12:_(s16) = G_UNMERGE_VALUES %0(<4 x s16>)
|
|
%13:_(s16), %14:_(s16), %15:_(s16), %16:_(s16) = G_UNMERGE_VALUES %0(<4 x s16>)
|
|
%17:_(s32) = G_ANYEXT %1 (s16)
|
|
%18:_(s32) = G_ANYEXT %6 (s16)
|
|
%19:_(s32) = G_ANYEXT %11(s16)
|
|
%20:_(s32) = G_ANYEXT %16(s16)
|
|
%21:_(s32) = G_ADD %17, %18
|
|
%22:_(s32) = G_ADD %19, %20
|
|
%23:_(s32) = G_ADD %21, %22
|
|
$w0 = COPY %23(s32)
|
|
RET_ReallyLR implicit $w0
|
|
...
|
|
---
|
|
name: variadic_defs_unmerge_scalar
|
|
legalized: true
|
|
regBankSelected: false
|
|
selected: false
|
|
body: |
|
|
; CHECK-LABEL: name: variadic_defs_unmerge_scalar
|
|
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $d0
|
|
; CHECK-NEXT: [[UV0:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](s64)
|
|
; CHECK-NEXT: [[ANYEXT0:%[0-9]+]]:_(s32) = G_ANYEXT [[UV0]](s16)
|
|
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s16)
|
|
; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s16)
|
|
; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s16)
|
|
; CHECK-NEXT: [[ADD0:%[0-9]+]]:_(s32) = G_ADD [[ANYEXT0]], [[ANYEXT1]]
|
|
; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ANYEXT2]], [[ANYEXT3]]
|
|
; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD0]], [[ADD1]]
|
|
; CHECK-NEXT: $w0 = COPY [[ADD2]](s32)
|
|
; CHECK-NEXT: RET_ReallyLR implicit $w0
|
|
bb.0:
|
|
%0 :_(s64) = COPY $d0
|
|
%1 :_(s16), %2 :_(s16), %3 :_(s16), %4 :_(s16) = G_UNMERGE_VALUES %0(s64)
|
|
%5 :_(s16), %6 :_(s16), %7 :_(s16), %8 :_(s16) = G_UNMERGE_VALUES %0(s64)
|
|
%9 :_(s16), %10:_(s16), %11:_(s16), %12:_(s16) = G_UNMERGE_VALUES %0(s64)
|
|
%13:_(s16), %14:_(s16), %15:_(s16), %16:_(s16) = G_UNMERGE_VALUES %0(s64)
|
|
%17:_(s32) = G_ANYEXT %1 (s16)
|
|
%18:_(s32) = G_ANYEXT %6 (s16)
|
|
%19:_(s32) = G_ANYEXT %11(s16)
|
|
%20:_(s32) = G_ANYEXT %16(s16)
|
|
%21:_(s32) = G_ADD %17, %18
|
|
%22:_(s32) = G_ADD %19, %20
|
|
%23:_(s32) = G_ADD %21, %22
|
|
$w0 = COPY %23(s32)
|
|
RET_ReallyLR implicit $w0
|
|
...
|
|
---
|
|
name: variadic_defs_unmerge_scalar_asym
|
|
legalized: true
|
|
regBankSelected: false
|
|
selected: false
|
|
body: |
|
|
; CHECK-LABEL: name: variadic_defs_unmerge_scalar_asym
|
|
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $d0
|
|
; CHECK-NEXT: [[UV0:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](s64)
|
|
; CHECK-NEXT: [[UV01:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
|
|
; CHECK-NEXT: [[ANYEXT0:%[0-9]+]]:_(s32) = G_ANYEXT [[UV0]](s16)
|
|
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s16)
|
|
; CHECK-NEXT: [[ADD0:%[0-9]+]]:_(s32) = G_ADD [[ANYEXT0]], [[ANYEXT1]]
|
|
; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[UV01]], [[UV23]]
|
|
; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD0]], [[ADD1]]
|
|
; CHECK-NEXT: $w0 = COPY [[ADD2]](s32)
|
|
; CHECK-NEXT: RET_ReallyLR implicit $w0
|
|
bb.0:
|
|
%0 :_(s64) = COPY $d0
|
|
%1 :_(s16), %2 :_(s16), %3 :_(s16), %4 :_(s16) = G_UNMERGE_VALUES %0(s64)
|
|
%9 :_(s32), %10:_(s32) = G_UNMERGE_VALUES %0(s64)
|
|
%5 :_(s16), %6 :_(s16), %7 :_(s16), %8 :_(s16) = G_UNMERGE_VALUES %0(s64)
|
|
%11:_(s32), %12:_(s32) = G_UNMERGE_VALUES %0(s64)
|
|
%17:_(s32) = G_ANYEXT %1 (s16)
|
|
%18:_(s32) = G_ANYEXT %6 (s16)
|
|
%21:_(s32) = G_ADD %17, %18
|
|
%22:_(s32) = G_ADD %9, %12
|
|
%23:_(s32) = G_ADD %21, %22
|
|
$w0 = COPY %23(s32)
|
|
RET_ReallyLR implicit $w0
|
|
...
|
|
---
|
|
name: variadic_defs_unmerge_vector_constraints_mix
|
|
legalized: true
|
|
regBankSelected: false
|
|
selected: false
|
|
body: |
|
|
; CHECK-LABEL: name: variadic_defs_unmerge_vector_constraints_mix
|
|
; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
|
|
; CHECK-NEXT: [[UV0:%[0-9]+]]:gpr(s32), [[UV1:%[0-9]+]]:gpr(s32), [[UV2:%[0-9]+]]:gpr32(s32), [[UV3:%[0-9]+]]:gpr32(s32) = G_UNMERGE_VALUES [[COPY]](<4 x s32>)
|
|
; CHECK-NEXT: [[ADD0:%[0-9]+]]:_(s32) = G_ADD [[UV0]], [[UV1]]
|
|
; CHECK-NEXT: [[ADD1:%[0-9]+]]:gpr32(s32) = ADDWrr [[UV2]](s32), [[UV3]](s32)
|
|
; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD0]], [[ADD1]]
|
|
; CHECK-NEXT: $w0 = COPY [[ADD2]](s32)
|
|
; CHECK-NEXT: RET_ReallyLR implicit $w0
|
|
bb.0:
|
|
%0 :_(<4 x s32>) = COPY $q0
|
|
%1 :_(s32), %2 : _ (s32), %3 :_(s32), %4 : _ (s32) = G_UNMERGE_VALUES %0(<4 x s32>)
|
|
%5 :_(s32), %6 :gpr(s32), %7 :_(s32), %8 : _ (s32) = G_UNMERGE_VALUES %0(<4 x s32>)
|
|
%9 :_(s32), %10: _ (s32), %11:_(s32), %12: _ (s32) = G_UNMERGE_VALUES %0(<4 x s32>)
|
|
%13:_(s32), %14: _ (s32), %15:_(s32), %16:gpr32(s32) = G_UNMERGE_VALUES %0(<4 x s32>)
|
|
%21:gpr(s32) = COPY %1(s32)
|
|
%17:_(s32) = G_ADD %21, %6
|
|
%18:gpr32 = COPY %11(s32)
|
|
%19:gpr32(s32) = ADDWrr %18, %16
|
|
%20:_(s32) = G_ADD %17, %19
|
|
$w0 = COPY %20(s32)
|
|
RET_ReallyLR implicit $w0
|
|
...
|