1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 03:02:36 +01:00

[AVR] Add a selection of CodeGen tests

Summary: This adds all of the CodeGen tests which currently pass.

Reviewers: arsenm, kparzysz

Subscribers: japaric, wdng

Differential Revision: https://reviews.llvm.org/D26388

llvm-svn: 286418
This commit is contained in:
Dylan McKay 2016-11-09 23:46:52 +00:00
parent 6faa0c3ed4
commit 391ac08575
19 changed files with 556 additions and 0 deletions

View File

@ -0,0 +1,16 @@
; RUN: llc -mattr=avr6 < %s -march=avr | FileCheck %s
; CHECK-LABEL: atomic_load32
; CHECK: call __sync_val_compare_and_swap_4
define i32 @atomic_load32(i32* %foo) {
%val = load atomic i32, i32* %foo unordered, align 4
ret i32 %val
}
; CHECK-LABEL: atomic_load_sub32
; CHECK: call __sync_fetch_and_sub_4
define i32 @atomic_load_sub32(i32* %foo) {
%val = atomicrmw sub i32* %foo, i32 13 seq_cst
ret i32 %val
}

View File

@ -0,0 +1,16 @@
; RUN: llc -mattr=avr6 < %s -march=avr | FileCheck %s
; CHECK-LABEL: atomic_load64
; CHECK: call __sync_val_compare_and_swap_8
define i64 @atomic_load64(i64* %foo) {
%val = load atomic i64, i64* %foo unordered, align 8
ret i64 %val
}
; CHECK-LABEL: atomic_load_sub64
; CHECK: call __sync_fetch_and_sub_8
define i64 @atomic_load_sub64(i64* %foo) {
%val = atomicrmw sub i64* %foo, i64 13 seq_cst
ret i64 %val
}

View File

@ -0,0 +1,30 @@
; RUN: llc -mattr=avr6 < %s -march=avr | FileCheck %s
; CHECK-LABEL: atomic_swap8
; CHECK: call __sync_lock_test_and_set_1
define i8 @atomic_swap8(i8* %foo) {
%val = atomicrmw xchg i8* %foo, i8 13 seq_cst
ret i8 %val
}
; CHECK-LABEL: atomic_swap16
; CHECK: call __sync_lock_test_and_set_2
define i16 @atomic_swap16(i16* %foo) {
%val = atomicrmw xchg i16* %foo, i16 13 seq_cst
ret i16 %val
}
; CHECK-LABEL: atomic_swap32
; CHECK: call __sync_lock_test_and_set_4
define i32 @atomic_swap32(i32* %foo) {
%val = atomicrmw xchg i32* %foo, i32 13 seq_cst
ret i32 %val
}
; CHECK-LABEL: atomic_swap64
; CHECK: call __sync_lock_test_and_set_8
define i64 @atomic_swap64(i64* %foo) {
%val = atomicrmw xchg i64* %foo, i64 13 seq_cst
ret i64 %val
}

30
test/CodeGen/AVR/ctpop.ll Normal file
View File

@ -0,0 +1,30 @@
; RUN: llc < %s -march=avr | FileCheck %s
define i8 @count_population(i8) unnamed_addr {
entry-block:
%1 = tail call i8 @llvm.ctpop.i8(i8 %0)
ret i8 %1
}
declare i8 @llvm.ctpop.i8(i8)
; CHECK-LABEL: count_population:
; CHECK: mov [[SCRATCH:r[0-9]+]], [[RESULT:r[0-9]+]]
; CHECK: lsr {{.*}}[[SCRATCH]]
; CHECK: andi {{.*}}[[SCRATCH]], 85
; CHECK: sub {{.*}}[[RESULT]], {{.*}}[[SCRATCH]]
; CHECK: mov {{.*}}[[SCRATCH]], {{.*}}[[RESULT]]
; CHECK: andi {{.*}}[[SCRATCH]], 51
; CHECK: lsr {{.*}}[[RESULT]]
; CHECK: lsr {{.*}}[[RESULT]]
; CHECK: andi {{.*}}[[RESULT]], 51
; CHECK: add {{.*}}[[RESULT]], {{.*}}[[SCRATCH]]
; CHECK: mov {{.*}}[[SCRATCH]], {{.*}}[[RESULT]]
; CHECK: lsr {{.*}}[[SCRATCH]]
; CHECK: lsr {{.*}}[[SCRATCH]]
; CHECK: lsr {{.*}}[[SCRATCH]]
; CHECK: lsr {{.*}}[[SCRATCH]]
; CHECK: add {{.*}}[[SCRATCH]], {{.*}}[[RESULT]]
; CHECK: andi {{.*}}[[SCRATCH]], 15
; CHECK: mov {{.*}}[[RESULT]], {{.*}}[[SCRATCH]]
; CHECK: ret

64
test/CodeGen/AVR/div.ll Normal file
View File

@ -0,0 +1,64 @@
; RUN: llc -mattr=mul,movw < %s -march=avr | FileCheck %s
; Unsigned 8-bit division
define i8 @udiv8(i8 %a, i8 %b) {
; CHECK-LABEL: div8:
; CHECK: call __udivmodqi4
; CHECK-NEXT: ret
%quotient = udiv i8 %a, %b
ret i8 %quotient
}
; Signed 8-bit division
define i8 @sdiv8(i8 %a, i8 %b) {
; CHECK-LABEL: sdiv8:
; CHECK: call __divmodqi4
; CHECK-NEXT: ret
%quotient = sdiv i8 %a, %b
ret i8 %quotient
}
; Unsigned 16-bit division
define i16 @udiv16(i16 %a, i16 %b) {
; CHECK-LABEL: udiv16:
; CHECK: call __udivmodhi4
; CHECK-NEXT: movw r24, r22
; CHECK-NEXT: ret
%quot = udiv i16 %a, %b
ret i16 %quot
}
; Signed 16-bit division
define i16 @sdiv16(i16 %a, i16 %b) {
; CHECK-LABEL: sdiv16:
; CHECK: call __divmodhi4
; CHECK-NEXT: movw r24, r22
; CHECK-NEXT: ret
%quot = sdiv i16 %a, %b
ret i16 %quot
}
; Unsigned 32-bit division
define i32 @udiv32(i32 %a, i32 %b) {
; CHECK-LABEL: udiv32:
; CHECK: call __udivmodsi4
; CHECK-NEXT: movw r22, r18
; CHECK-NEXT: movw r24, r20
; CHECK-NEXT: ret
%quot = udiv i32 %a, %b
ret i32 %quot
}
; Signed 32-bit division
define i32 @sdiv32(i32 %a, i32 %b) {
; CHECK-LABEL: sdiv32:
; CHECK: call __divmodsi4
; CHECK-NEXT: movw r22, r18
; CHECK-NEXT: movw r24, r20
; CHECK-NEXT: ret
%quot = sdiv i32 %a, %b
ret i32 %quot
}

View File

@ -0,0 +1,9 @@
; RUN: llc -mattr=avrtiny -O0 < %s -march=avr | FileCheck %s
define i16 @reg_copy16(i16 %a) {
; CHECK-LABEL: reg_copy16
; CHECK: mov r18, r24
; CHECK: mov r19, r25
ret i16 %a
}

View File

@ -0,0 +1,8 @@
; RUN: llc -mattr=avr25 -O0 < %s -march=avr | FileCheck %s
; On most cores, the 16-bit 'MOVW' instruction can be used
define i16 @reg_copy16(i16 %a) {
; CHECK-LABEL: reg_copy16
; CHECK: movw r18, r24
ret i16 %a
}

View File

@ -0,0 +1,27 @@
; RUN: llc < %s -march=avr | FileCheck %s
; Test case for an assertion error.
;
; Error:
; ```
; Impossible reg-to-reg copy
; UNREACHABLE executed at lib/Target/AVR/AVRInstrInfo.cpp
; ```
;
; This no longer occurs.
declare { i16, i1 } @llvm.umul.with.overflow.i16(i16, i16)
; CHECK-LABEL: foo
define void @foo() {
entry-block:
%0 = call { i16, i1 } @llvm.umul.with.overflow.i16(i16 undef, i16 undef)
%1 = extractvalue { i16, i1 } %0, 1
%2 = icmp eq i1 %1, true
br i1 %2, label %cond, label %next
next: ; preds = %entry-block
ret void
cond: ; preds = %entry-block
unreachable
}

View File

@ -0,0 +1,8 @@
; RUN: llc < %s -march=avr | FileCheck %s
; CHECK-LABEL: foo
define void @foo(i16 %a) {
call void asm sideeffect "add $0, $0", "Z"(i16 %a) nounwind
ret void
}

View File

@ -0,0 +1,10 @@
; RUN: llc < %s -march=avr | FileCheck %s
declare i16 @llvm.bswap.i16(i16)
define i16 @foo(i16) {
; CHECK-LABEL: foo:
entry-block:
%1 = tail call i16 @llvm.bswap.i16(i16 %0)
ret i16 %1
}

View File

@ -0,0 +1,7 @@
; RUN: llc < %s -march=avr | FileCheck %s
define void @foo(i1) {
; CHECK-LABEL: foo:
; CHECK: ret
ret void
}

28
test/CodeGen/AVR/mul.ll Normal file
View File

@ -0,0 +1,28 @@
; RUN: llc -mattr=mul,movw < %s -march=avr | FileCheck %s
define i8 @mult8(i8 %a, i8 %b) {
; CHECK-LABEL: mult8:
; CHECK: muls r22, r24
; CHECK: eor r1, r1
; CHECK: mov r24, r0
%mul = mul i8 %b, %a
ret i8 %mul
}
define i16 @mult16(i16 %a, i16 %b) {
; CHECK-LABEL: mult16:
; CHECK: muls r22, r25
; CHECK: mov r18, r0
; CHECK: mul r22, r24
; CHECK: mov r19, r0
; CHECK: mov r20, r1
; CHECK: eor r1, r1
; CHECK: add r20, r18
; CHECK: muls r23, r24
; CHECK: eor r1, r1
; CHECK: mov r22, r0
; CHECK: add r22, r20
; :TODO: finish after reworking shift instructions
%mul = mul nsw i16 %b, %a
ret i16 %mul
}

8
test/CodeGen/AVR/neg.ll Normal file
View File

@ -0,0 +1,8 @@
; RUN: llc < %s -march=avr | FileCheck %s
define i8 @neg8(i8 %x) {
; CHECK-LABEL: neg8:
; CHECK: neg r24
%sub = sub i8 0, %x
ret i8 %sub
}

View File

@ -0,0 +1,67 @@
; RUN: llc < %s -march=avr -mattr=movw,lpmx | FileCheck %s
; Tests the extended LPM instructions (LPMW, LPM Rd, Z+).
define i8 @test8(i8 addrspace(1)* %p) {
; CHECK-LABEL: test8:
; CHECK: movw r30, r24
; CHECK: lpm r24, Z
%1 = load i8, i8 addrspace(1)* %p
ret i8 %1
}
define i16 @test16(i16 addrspace(1)* %p) {
; CHECK-LABEL: test16:
; CHECK: movw r30, r24
; CHECK: lpmw r24, Z
%1 = load i16, i16 addrspace(1)* %p
ret i16 %1
}
define i8 @test8postinc(i8 addrspace(1)* %x, i8 %y) {
; CHECK-LABEL: test8postinc:
; CHECK: movw r30, r24
; CHECK: lpm {{.*}}, Z+
entry:
%cmp10 = icmp sgt i8 %y, 0
br i1 %cmp10, label %for.body, label %for.end
for.body: ; preds = %entry, %for.body
%ret.013 = phi i8 [ %add, %for.body ], [ 0, %entry ]
%i.012 = phi i8 [ %inc, %for.body ], [ 0, %entry ]
%x.addr.011 = phi i8 addrspace(1)* [ %incdec.ptr, %for.body ], [ %x, %entry ]
%incdec.ptr = getelementptr inbounds i8, i8 addrspace(1)* %x.addr.011, i16 1
%0 = load i8, i8 addrspace(1)* %x.addr.011
%add = add i8 %0, %ret.013
%inc = add i8 %i.012, 1
%exitcond = icmp eq i8 %inc, %y
br i1 %exitcond, label %for.end, label %for.body
for.end: ; preds = %for.body, %entry
%ret.0.lcssa = phi i8 [ 0, %entry ], [ %add, %for.body ]
ret i8 %ret.0.lcssa
}
define i16 @test16postinc(i16 addrspace(1)* %x, i8 %y) {
; CHECK-LABEL: test16postinc:
; CHECK: movw r30, r24
; CHECK: lpmw {{.*}}, Z+
entry:
%cmp5 = icmp sgt i8 %y, 0
br i1 %cmp5, label %for.body, label %for.end
for.body: ; preds = %entry, %for.body
%ret.08 = phi i16 [ %add, %for.body ], [ 0, %entry ]
%i.07 = phi i8 [ %inc, %for.body ], [ 0, %entry ]
%x.addr.06 = phi i16 addrspace(1)* [ %incdec.ptr, %for.body ], [ %x, %entry ]
%incdec.ptr = getelementptr inbounds i16, i16 addrspace(1)* %x.addr.06, i16 1
%0 = load i16, i16 addrspace(1)* %x.addr.06
%add = add nsw i16 %0, %ret.08
%inc = add i8 %i.07, 1
%exitcond = icmp eq i8 %inc, %y
br i1 %exitcond, label %for.end, label %for.body
for.end: ; preds = %for.body, %entry
%ret.0.lcssa = phi i16 [ 0, %entry ], [ %add, %for.body ]
ret i16 %ret.0.lcssa
}

58
test/CodeGen/AVR/rem.ll Normal file
View File

@ -0,0 +1,58 @@
; RUN: llc -mattr=mul,movw < %s -march=avr | FileCheck %s
; Unsigned 8-bit remision
define i8 @urem8(i8 %a, i8 %b) {
; CHECK-LABEL: rem8:
; CHECK: call __udivmodqi4
; CHECK-NEXT: mov r24, r25
; CHECK-NEXT: ret
%rem = urem i8 %a, %b
ret i8 %rem
}
; Signed 8-bit remision
define i8 @srem8(i8 %a, i8 %b) {
; CHECK-LABEL: srem8:
; CHECK: call __divmodqi4
; CHECK-NEXT: mov r24, r25
; CHECK-NEXT: ret
%rem = srem i8 %a, %b
ret i8 %rem
}
; Unsigned 16-bit remision
define i16 @urem16(i16 %a, i16 %b) {
; CHECK-LABEL: urem16:
; CHECK: call __udivmodhi4
; CHECK-NEXT: ret
%rem = urem i16 %a, %b
ret i16 %rem
}
; Signed 16-bit remision
define i16 @srem16(i16 %a, i16 %b) {
; CHECK-LABEL: srem16:
; CHECK: call __divmodhi4
; CHECK-NEXT: ret
%rem = srem i16 %a, %b
ret i16 %rem
}
; Unsigned 32-bit remision
define i32 @urem32(i32 %a, i32 %b) {
; CHECK-LABEL: urem32:
; CHECK: call __udivmodsi4
; CHECK-NEXT: ret
%rem = urem i32 %a, %b
ret i32 %rem
}
; Signed 32-bit remision
define i32 @srem32(i32 %a, i32 %b) {
; CHECK-LABEL: srem32:
; CHECK: call __divmodsi4
; CHECK-NEXT: ret
%rem = srem i32 %a, %b
ret i32 %rem
}

View File

@ -0,0 +1,23 @@
; RUN: llc < %s -march=avr | FileCheck %s
; Checks that `sin` and `cos` nodes are expanded into calls to
; the `sin` and `cos` runtime library functions.
; On AVR, the only floats supported are 32-bits, and so the
; function names have no `f` or `d` suffix.
declare float @llvm.sin.f32(float %x)
declare float @llvm.cos.f32(float %x)
define float @do_sin(float %a) {
; CHECK-LABEL: do_sin:
; CHECK: {{sin$}}
%result = call float @llvm.sin.f32(float %a)
ret float %result
}
; CHECK-LABEL: do_cos:
; CHECK: {{cos$}}
define float @do_cos(float %a) {
%result = call float @llvm.cos.f32(float %a)
ret float %result
}

View File

@ -0,0 +1,58 @@
; RUN: llc -march=avr -print-after=expand-isel-pseudos < %s 2>&1 | FileCheck %s
; Because `switch` seems to trigger Machine Basic Blocks to be ordered
; in a different order than they were constructed, this exposes an
; error in the `expand-isel-pseudos` pass. Specifically, it thought we
; could always fallthrough to a newly-constructed MBB. However,
; there's no guarantee that either of the constructed MBBs need to
; occur immediately after the currently-focused one!
;
; This issue manifests in a CFG that looks something like this:
;
; BB#2: derived from LLVM BB %finish
; Predecessors according to CFG: BB#0 BB#1
; %vreg0<def> = PHI %vreg3, <BB#0>, %vreg5, <BB#1>
; %vreg7<def> = LDIRdK 2
; %vreg8<def> = LDIRdK 1
; CPRdRr %vreg2, %vreg0, %SREG<imp-def>
; BREQk <BB#6>, %SREG<imp-use>
; Successors according to CFG: BB#5(?%) BB#6(?%)
;
; The code assumes it the fallthrough block after this is BB#5, but
; it's actually BB#3! To be proper, there should be an unconditional
; jump tying this block to BB#5.
define i8 @select_must_add_unconditional_jump(i8 %arg0, i8 %arg1) unnamed_addr {
entry-block:
switch i8 %arg0, label %dead [
i8 0, label %zero
i8 1, label %one
]
zero:
br label %finish
one:
br label %finish
finish:
%predicate = phi i8 [ 50, %zero ], [ 100, %one ]
%is_eq = icmp eq i8 %arg1, %predicate
%result = select i1 %is_eq, i8 1, i8 2
ret i8 %result
dead:
ret i8 0
}
; This check may be a bit brittle, but the important thing is that the
; basic block containing `select` needs to contain explicit jumps to
; both successors.
; CHECK: BB#2: derived from LLVM BB %finish
; CHECK: BREQk <[[BRANCHED:BB#[0-9]+]]>
; CHECK: RJMPk <[[DIRECT:BB#[0-9]+]]>
; CHECK: Successors according to CFG
; CHECK-SAME-DAG: {{.*}}[[BRANCHED]]
; CHECK-SAME-DAG: {{.*}}[[DIRECT]]
; CHECK: BB#3: derived from LLVM BB

View File

@ -0,0 +1,71 @@
; RUN: llc -march=avr < %s | FileCheck %s
define i8 @sign_extended_1_to_8(i1) {
; CHECK-LABEL: sign_extended_1_to_8
entry-block:
%1 = sext i1 %0 to i8
ret i8 %1
}
define i16 @sign_extended_1_to_16(i1) {
; CHECK-LABEL: sign_extended_1_to_16
entry-block:
%1 = sext i1 %0 to i16
ret i16 %1
}
define i16 @sign_extended_8_to_16(i8) {
; CHECK-LABEL: sign_extended_8_to_16
entry-block:
%1 = sext i8 %0 to i16
ret i16 %1
}
define i32 @sign_extended_1_to_32(i1) {
; CHECK-LABEL: sign_extended_1_to_32
entry-block:
%1 = sext i1 %0 to i32
ret i32 %1
}
define i32 @sign_extended_8_to_32(i8) {
; CHECK-LABEL: sign_extended_8_to_32
entry-block:
%1 = sext i8 %0 to i32
ret i32 %1
}
define i32 @sign_extended_16_to_32(i16) {
; CHECK-LABEL: sign_extended_16_to_32
entry-block:
%1 = sext i16 %0 to i32
ret i32 %1
}
define i64 @sign_extended_1_to_64(i1) {
; CHECK-LABEL: sign_extended_1_to_64
entry-block:
%1 = sext i1 %0 to i64
ret i64 %1
}
define i64 @sign_extended_8_to_64(i8) {
; CHECK-LABEL: sign_extended_8_to_64
entry-block:
%1 = sext i8 %0 to i64
ret i64 %1
}
define i64 @sign_extended_16_to_64(i16) {
; CHECK-LABEL: sign_extended_16_to_64
entry-block:
%1 = sext i16 %0 to i64
ret i64 %1
}
define i64 @sign_extended_32_to_64(i32) {
; CHECK-LABEL: sign_extended_32_to_64
entry-block:
%1 = sext i32 %0 to i64
ret i64 %1
}

18
test/CodeGen/AVR/trunc.ll Normal file
View File

@ -0,0 +1,18 @@
; RUN: llc < %s -march=avr | FileCheck %s
define i8 @trunc8_loreg(i16 %x, i16 %y) {
; CHECK-LABEL: trunc8_loreg:
; CHECK: mov r24, r22
; CHECK-NEXT: ret
%conv = trunc i16 %y to i8
ret i8 %conv
}
define i8 @trunc8_hireg(i16 %x, i16 %y) {
; CHECK-LABEL: trunc8_hireg:
; CHECK: mov r24, r23
; CHECK-NEXT: ret
%shr1 = lshr i16 %y, 8
%conv = trunc i16 %shr1 to i8
ret i8 %conv
}