mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-24 03:33:20 +01:00
ffda0633e2
We didn't have selector support for these. Selection code is similar to `getAArch64XALUOOp` in AArch64ISelLowering. Similar to that code, this returns the AArch64CC and the instruction produced. In SDAG, this is used to optimize select + overflow and condition branch + overflow pairs. (See `AArch64TargetLowering::LowerBR_CC` and `AArch64TargetLowering::LowerSelect`) (G_USUBO should be easy to add here, but it isn't legalized right now.) This also factors out the existing G_UADDO selection code, and removes an unnecessary check for s32/s64. AFAIK, we shouldn't ever get anything other than s32/s64. It makes more sense for this to be handled by the type assertion in `emitAddSub`. Differential Revision: https://reviews.llvm.org/D92610
159 lines
4.9 KiB
YAML
159 lines
4.9 KiB
YAML
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
|
|
# RUN: llc -verify-machineinstrs -mtriple aarch64-unknown-uknown -global-isel -run-pass=instruction-select %s -o - | FileCheck %s
|
|
|
|
...
|
|
---
|
|
name: saddo_s32
|
|
alignment: 4
|
|
legalized: true
|
|
regBankSelected: true
|
|
tracksRegLiveness: true
|
|
body: |
|
|
bb.1.entry:
|
|
liveins: $w0, $w1, $x2
|
|
|
|
; CHECK-LABEL: name: saddo_s32
|
|
; CHECK: liveins: $w0, $w1, $x2
|
|
; CHECK: %reg0:gpr32 = COPY $w0
|
|
; CHECK: %reg1:gpr32 = COPY $w1
|
|
; CHECK: %saddo:gpr32 = ADDSWrr %reg0, %reg1, implicit-def $nzcv
|
|
; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 7, implicit $nzcv
|
|
; CHECK: $w0 = COPY %saddo
|
|
; CHECK: RET_ReallyLR implicit $w0
|
|
%reg0:gpr(s32) = COPY $w0
|
|
%reg1:gpr(s32) = COPY $w1
|
|
%saddo:gpr(s32), %4:gpr(s1) = G_SADDO %reg0, %reg1
|
|
$w0 = COPY %saddo(s32)
|
|
RET_ReallyLR implicit $w0
|
|
|
|
...
|
|
---
|
|
name: saddo_s64
|
|
alignment: 4
|
|
legalized: true
|
|
regBankSelected: true
|
|
tracksRegLiveness: true
|
|
body: |
|
|
bb.1.entry:
|
|
liveins: $x0, $x1, $x2
|
|
|
|
; CHECK-LABEL: name: saddo_s64
|
|
; CHECK: liveins: $x0, $x1, $x2
|
|
; CHECK: %reg0:gpr64 = COPY $x0
|
|
; CHECK: %reg1:gpr64 = COPY $x1
|
|
; CHECK: %saddo:gpr64 = ADDSXrr %reg0, %reg1, implicit-def $nzcv
|
|
; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 7, implicit $nzcv
|
|
; CHECK: $x0 = COPY %saddo
|
|
; CHECK: RET_ReallyLR implicit $x0
|
|
%reg0:gpr(s64) = COPY $x0
|
|
%reg1:gpr(s64) = COPY $x1
|
|
%saddo:gpr(s64), %4:gpr(s1) = G_SADDO %reg0, %reg1
|
|
$x0 = COPY %saddo(s64)
|
|
RET_ReallyLR implicit $x0
|
|
|
|
...
|
|
---
|
|
name: saddo_s32_imm
|
|
alignment: 4
|
|
legalized: true
|
|
regBankSelected: true
|
|
tracksRegLiveness: true
|
|
body: |
|
|
bb.1.entry:
|
|
liveins: $w0, $w1, $x2
|
|
; Check that we get ADDSWri when we can fold in a constant.
|
|
;
|
|
; CHECK-LABEL: name: saddo_s32_imm
|
|
; CHECK: liveins: $w0, $w1, $x2
|
|
; CHECK: %copy:gpr32sp = COPY $w0
|
|
; CHECK: %saddo:gpr32 = ADDSWri %copy, 16, 0, implicit-def $nzcv
|
|
; CHECK: %overflow:gpr32 = CSINCWr $wzr, $wzr, 7, implicit $nzcv
|
|
; CHECK: $w0 = COPY %saddo
|
|
; CHECK: RET_ReallyLR implicit $w0
|
|
%copy:gpr(s32) = COPY $w0
|
|
%constant:gpr(s32) = G_CONSTANT i32 16
|
|
%saddo:gpr(s32), %overflow:gpr(s1) = G_SADDO %copy, %constant
|
|
$w0 = COPY %saddo(s32)
|
|
RET_ReallyLR implicit $w0
|
|
|
|
...
|
|
---
|
|
name: saddo_s32_shifted
|
|
alignment: 4
|
|
legalized: true
|
|
regBankSelected: true
|
|
tracksRegLiveness: true
|
|
body: |
|
|
bb.1.entry:
|
|
liveins: $w0, $w1, $x2
|
|
; Check that we get ADDSWrs when we can fold in a shift.
|
|
;
|
|
; CHECK-LABEL: name: saddo_s32_shifted
|
|
; CHECK: liveins: $w0, $w1, $x2
|
|
; CHECK: %reg0:gpr32 = COPY $w0
|
|
; CHECK: %reg1:gpr32 = COPY $w1
|
|
; CHECK: %add:gpr32 = ADDSWrs %reg0, %reg1, 16, implicit-def $nzcv
|
|
; CHECK: %overflow:gpr32 = CSINCWr $wzr, $wzr, 7, implicit $nzcv
|
|
; CHECK: $w0 = COPY %add
|
|
; CHECK: RET_ReallyLR implicit $w0
|
|
%reg0:gpr(s32) = COPY $w0
|
|
%reg1:gpr(s32) = COPY $w1
|
|
%constant:gpr(s32) = G_CONSTANT i32 16
|
|
%shift:gpr(s32) = G_SHL %reg1(s32), %constant(s32)
|
|
%add:gpr(s32), %overflow:gpr(s1) = G_SADDO %reg0, %shift
|
|
$w0 = COPY %add(s32)
|
|
RET_ReallyLR implicit $w0
|
|
|
|
...
|
|
---
|
|
name: saddo_s32_neg_imm
|
|
alignment: 4
|
|
legalized: true
|
|
regBankSelected: true
|
|
tracksRegLiveness: true
|
|
body: |
|
|
bb.1.entry:
|
|
liveins: $w0, $w1, $x2
|
|
; Check that we get SUBSWri when we can fold in a negative constant.
|
|
;
|
|
; CHECK-LABEL: name: saddo_s32_neg_imm
|
|
; CHECK: liveins: $w0, $w1, $x2
|
|
; CHECK: %copy:gpr32sp = COPY $w0
|
|
; CHECK: %add:gpr32 = SUBSWri %copy, 16, 0, implicit-def $nzcv
|
|
; CHECK: %overflow:gpr32 = CSINCWr $wzr, $wzr, 7, implicit $nzcv
|
|
; CHECK: $w0 = COPY %add
|
|
; CHECK: RET_ReallyLR implicit $w0
|
|
%copy:gpr(s32) = COPY $w0
|
|
%constant:gpr(s32) = G_CONSTANT i32 -16
|
|
%add:gpr(s32), %overflow:gpr(s1) = G_SADDO %copy, %constant
|
|
$w0 = COPY %add(s32)
|
|
RET_ReallyLR implicit $w0
|
|
|
|
...
|
|
---
|
|
name: saddo_arith_extended
|
|
alignment: 4
|
|
legalized: true
|
|
regBankSelected: true
|
|
tracksRegLiveness: true
|
|
body: |
|
|
bb.1.entry:
|
|
liveins: $w0, $x0
|
|
; Check that we get ADDSXrx.
|
|
; CHECK-LABEL: name: saddo_arith_extended
|
|
; CHECK: liveins: $w0, $x0
|
|
; CHECK: %reg0:gpr64sp = COPY $x0
|
|
; CHECK: %reg1:gpr32 = COPY $w0
|
|
; CHECK: %add:gpr64 = ADDSXrx %reg0, %reg1, 18, implicit-def $nzcv
|
|
; CHECK: %flags:gpr32 = CSINCWr $wzr, $wzr, 7, implicit $nzcv
|
|
; CHECK: $x0 = COPY %add
|
|
; CHECK: RET_ReallyLR implicit $x0
|
|
%reg0:gpr(s64) = COPY $x0
|
|
%reg1:gpr(s32) = COPY $w0
|
|
%ext:gpr(s64) = G_ZEXT %reg1(s32)
|
|
%cst:gpr(s64) = G_CONSTANT i64 2
|
|
%shift:gpr(s64) = G_SHL %ext, %cst(s64)
|
|
%add:gpr(s64), %flags:gpr(s1) = G_SADDO %reg0, %shift
|
|
$x0 = COPY %add(s64)
|
|
RET_ReallyLR implicit $x0
|