mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-02-01 05:01:59 +01:00
8eb83e55cc
We sometimes get poor code size because constants of types < 32b are legalized as 32 bit G_CONSTANTs with a truncate to fit. This works but means that the localizer can no longer sink them (although it's possible to extend it to do so). On AArch64 however s8 and s16 constants can be selected in the same way as s32 constants, with a mov pseudo into a W register. If we make s8 and s16 constants legal then we can avoid unnecessary truncates, they can be CSE'd, and the localizer can sink them as normal. There is a caveat: if the user of a smaller constant has to widen the sources, we end up with an anyext of the smaller typed G_CONSTANT. This can cause regressions because of the additional extend and missed pattern matching. To remedy this, there's a new artifact combiner to generate the wider G_CONSTANT if it's legal for the target. Differential Revision: https://reviews.llvm.org/D63587 llvm-svn: 364075
92 lines
3.2 KiB
YAML
92 lines
3.2 KiB
YAML
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
|
|
# RUN: llc -mtriple=aarch64-- -mattr=+lse -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s
|
|
|
|
--- |
|
|
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
|
|
|
|
define void @cmpxchg_i8(i8* %addr) { ret void }
|
|
define void @cmpxchg_i16(i16* %addr) { ret void }
|
|
define void @cmpxchg_i32(i32* %addr) { ret void }
|
|
define void @cmpxchg_i64(i64* %addr) { ret void }
|
|
...
|
|
|
|
---
|
|
name: cmpxchg_i8
|
|
body: |
|
|
bb.0:
|
|
liveins: $x0
|
|
|
|
; CHECK-LABEL: name: cmpxchg_i8
|
|
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
|
|
; CHECK: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
|
|
; CHECK: [[C1:%[0-9]+]]:_(s8) = G_CONSTANT i8 1
|
|
; CHECK: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s8) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[C]], [[C1]] :: (load store monotonic 1 on %ir.addr)
|
|
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ATOMIC_CMPXCHG]](s8)
|
|
; CHECK: $w0 = COPY [[ANYEXT]](s32)
|
|
%0:_(p0) = COPY $x0
|
|
%1:_(s8) = G_CONSTANT i8 0
|
|
%2:_(s8) = G_CONSTANT i8 1
|
|
%3:_(s8) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store monotonic 1 on %ir.addr)
|
|
%4:_(s32) = G_ANYEXT %3
|
|
$w0 = COPY %4(s32)
|
|
...
|
|
|
|
---
|
|
name: cmpxchg_i16
|
|
body: |
|
|
bb.0:
|
|
liveins: $x0
|
|
|
|
; CHECK-LABEL: name: cmpxchg_i16
|
|
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
|
|
; CHECK: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
|
|
; CHECK: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
|
|
; CHECK: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s16) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[C]], [[C1]] :: (load store monotonic 2 on %ir.addr)
|
|
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ATOMIC_CMPXCHG]](s16)
|
|
; CHECK: $w0 = COPY [[ANYEXT]](s32)
|
|
%0:_(p0) = COPY $x0
|
|
%1:_(s16) = G_CONSTANT i16 0
|
|
%2:_(s16) = G_CONSTANT i16 1
|
|
%3:_(s16) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store monotonic 2 on %ir.addr)
|
|
%4:_(s32) = G_ANYEXT %3
|
|
$w0 = COPY %4(s32)
|
|
...
|
|
|
|
---
|
|
name: cmpxchg_i32
|
|
body: |
|
|
bb.0:
|
|
liveins: $x0
|
|
|
|
; CHECK-LABEL: name: cmpxchg_i32
|
|
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
|
|
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
|
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
|
|
; CHECK: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s32) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[C]], [[C1]] :: (load store monotonic 4 on %ir.addr)
|
|
; CHECK: $w0 = COPY [[ATOMIC_CMPXCHG]](s32)
|
|
%0:_(p0) = COPY $x0
|
|
%1:_(s32) = G_CONSTANT i32 0
|
|
%2:_(s32) = G_CONSTANT i32 1
|
|
%3:_(s32) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store monotonic 4 on %ir.addr)
|
|
$w0 = COPY %3(s32)
|
|
...
|
|
|
|
---
|
|
name: cmpxchg_i64
|
|
body: |
|
|
bb.0:
|
|
liveins: $x0
|
|
|
|
; CHECK-LABEL: name: cmpxchg_i64
|
|
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
|
|
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
|
|
; CHECK: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s64) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[C]], [[C1]] :: (load store monotonic 8 on %ir.addr)
|
|
; CHECK: $x0 = COPY [[ATOMIC_CMPXCHG]](s64)
|
|
%0:_(p0) = COPY $x0
|
|
%1:_(s64) = G_CONSTANT i64 0
|
|
%2:_(s64) = G_CONSTANT i64 1
|
|
%3:_(s64) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store monotonic 8 on %ir.addr)
|
|
$x0 = COPY %3(s64)
|
|
...
|