mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-23 11:13:28 +01:00
9f66584696
This patch teaches the AsmParser to accept add/adds/sub/subs/cmp/cmn with a negative immediate operand and convert them as shown: add Rd, Rn, -imm -> sub Rd, Rn, imm sub Rd, Rn, -imm -> add Rd, Rn, imm adds Rd, Rn, -imm -> subs Rd, Rn, imm subs Rd, Rn, -imm -> adds Rd, Rn, imm cmp Rn, -imm -> cmn Rn, imm cmn Rn, -imm -> cmp Rn, imm Those instructions are an alternate syntax available to assembly coders, and are needed in order to support code already compiling with some other assemblers (gas). They are documented in the "ARMv8 Instruction Set Overview", in the "Arithmetic (immediate)" section. This makes llvm-mc a programmer-friendly assembler ! This also fixes PR20978: "Assembly handling of adding negative numbers not as smart as gas". llvm-svn: 241166
95 lines
2.6 KiB
ArmAsm
95 lines
2.6 KiB
ArmAsm
// RUN: llvm-mc -triple=aarch64-none-linux-gnu < %s | FileCheck %s
|
|
|
|
// CHECK: sub w0, w2, #2, lsl #12
|
|
// CHECK: sub w0, w2, #2, lsl #12
|
|
sub w0, w2, #2, lsl 12
|
|
add w0, w2, #-2, lsl 12
|
|
// CHECK: sub x1, x3, #2, lsl #12
|
|
// CHECK: sub x1, x3, #2, lsl #12
|
|
sub x1, x3, #2, lsl 12
|
|
add x1, x3, #-2, lsl 12
|
|
// CHECK: sub x1, x3, #4
|
|
// CHECK: sub x1, x3, #4
|
|
sub x1, x3, #4
|
|
add x1, x3, #-4
|
|
// CHECK: sub x1, x3, #4095
|
|
// CHECK: sub x1, x3, #4095
|
|
sub x1, x3, #4095, lsl 0
|
|
add x1, x3, #-4095, lsl 0
|
|
// CHECK: sub x3, x4, #0
|
|
sub x3, x4, #0
|
|
|
|
// CHECK: add w0, w2, #2, lsl #12
|
|
// CHECK: add w0, w2, #2, lsl #12
|
|
add w0, w2, #2, lsl 12
|
|
sub w0, w2, #-2, lsl 12
|
|
// CHECK: add x1, x3, #2, lsl #12
|
|
// CHECK: add x1, x3, #2, lsl #12
|
|
add x1, x3, #2, lsl 12
|
|
sub x1, x3, #-2, lsl 12
|
|
// CHECK: add x1, x3, #4
|
|
// CHECK: add x1, x3, #4
|
|
add x1, x3, #4
|
|
sub x1, x3, #-4
|
|
// CHECK: add x1, x3, #4095
|
|
// CHECK: add x1, x3, #4095
|
|
add x1, x3, #4095, lsl 0
|
|
sub x1, x3, #-4095, lsl 0
|
|
// CHECK: add x2, x5, #0
|
|
add x2, x5, #0
|
|
|
|
// CHECK: subs w0, w2, #2, lsl #12
|
|
// CHECK: subs w0, w2, #2, lsl #12
|
|
subs w0, w2, #2, lsl 12
|
|
adds w0, w2, #-2, lsl 12
|
|
// CHECK: subs x1, x3, #2, lsl #12
|
|
// CHECK: subs x1, x3, #2, lsl #12
|
|
subs x1, x3, #2, lsl 12
|
|
adds x1, x3, #-2, lsl 12
|
|
// CHECK: subs x1, x3, #4
|
|
// CHECK: subs x1, x3, #4
|
|
subs x1, x3, #4
|
|
adds x1, x3, #-4
|
|
// CHECK: subs x1, x3, #4095
|
|
// CHECK: subs x1, x3, #4095
|
|
subs x1, x3, #4095, lsl 0
|
|
adds x1, x3, #-4095, lsl 0
|
|
// CHECK: subs x3, x4, #0
|
|
subs x3, x4, #0
|
|
|
|
// CHECK: adds w0, w2, #2, lsl #12
|
|
// CHECK: adds w0, w2, #2, lsl #12
|
|
adds w0, w2, #2, lsl 12
|
|
subs w0, w2, #-2, lsl 12
|
|
// CHECK: adds x1, x3, #2, lsl #12
|
|
// CHECK: adds x1, x3, #2, lsl #12
|
|
adds x1, x3, #2, lsl 12
|
|
subs x1, x3, #-2, lsl 12
|
|
// CHECK: adds x1, x3, #4
|
|
// CHECK: adds x1, x3, #4
|
|
adds x1, x3, #4
|
|
subs x1, x3, #-4
|
|
// CHECK: adds x1, x3, #4095
|
|
// CHECK: adds x1, x3, #4095
|
|
adds x1, x3, #4095, lsl 0
|
|
subs x1, x3, #-4095, lsl 0
|
|
// CHECK: adds x2, x5, #0
|
|
adds x2, x5, #0
|
|
|
|
// CHECK: {{adds xzr,|cmn}} x5, #5
|
|
// CHECK: {{adds xzr,|cmn}} x5, #5
|
|
cmn x5, #5
|
|
cmp x5, #-5
|
|
// CHECK: {{subs xzr,|cmp}} x6, #4095
|
|
// CHECK: {{subs xzr,|cmp}} x6, #4095
|
|
cmp x6, #4095
|
|
cmn x6, #-4095
|
|
// CHECK: {{adds wzr,|cmn}} w7, #5
|
|
// CHECK: {{adds wzr,|cmn}} w7, #5
|
|
cmn w7, #5
|
|
cmp w7, #-5
|
|
// CHECK: {{subs wzr,|cmp}} w8, #4095
|
|
// CHECK: {{subs wzr,|cmp}} w8, #4095
|
|
cmp w8, #4095
|
|
cmn w8, #-4095
|