mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-24 03:33:20 +01:00
87db53ff7a
Substantial SelectionDAG scheduling is going away soon, and is interfering with Hao's attempts to implement LDn/STn instructions, so I say we make the leap first. There were a few reorderings (inevitably) which broke some tests. I tried to replace them with CHECK-DAG variants mostly, but some too complex for that to be useful and I just reordered them. llvm-svn: 192282
214 lines
6.6 KiB
LLVM
214 lines
6.6 KiB
LLVM
; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
|
|
|
|
@var32 = global i32 0
|
|
@var64 = global i64 0
|
|
|
|
define void @test_csel(i32 %lhs32, i32 %rhs32, i64 %lhs64) {
|
|
; CHECK-LABEL: test_csel:
|
|
|
|
%tst1 = icmp ugt i32 %lhs32, %rhs32
|
|
%val1 = select i1 %tst1, i32 42, i32 52
|
|
store i32 %val1, i32* @var32
|
|
; CHECK-DAG: movz [[W52:w[0-9]+]], #52
|
|
; CHECK-DAG: movz [[W42:w[0-9]+]], #42
|
|
; CHECK: csel {{w[0-9]+}}, [[W42]], [[W52]], hi
|
|
|
|
%rhs64 = sext i32 %rhs32 to i64
|
|
%tst2 = icmp sle i64 %lhs64, %rhs64
|
|
%val2 = select i1 %tst2, i64 %lhs64, i64 %rhs64
|
|
store i64 %val2, i64* @var64
|
|
; CHECK-DAG: cmp [[LHS:x[0-9]+]], [[RHS:w[0-9]+]], sxtw
|
|
; CHECK-DAG: sxtw [[EXT_RHS:x[0-9]+]], [[RHS]]
|
|
; CHECK: csel {{x[0-9]+}}, [[LHS]], [[EXT_RHS]], le
|
|
|
|
ret void
|
|
; CHECK: ret
|
|
}
|
|
|
|
define void @test_floatcsel(float %lhs32, float %rhs32, double %lhs64, double %rhs64) {
|
|
; CHECK-LABEL: test_floatcsel:
|
|
|
|
%tst1 = fcmp one float %lhs32, %rhs32
|
|
; CHECK: fcmp {{s[0-9]+}}, {{s[0-9]+}}
|
|
%val1 = select i1 %tst1, i32 42, i32 52
|
|
store i32 %val1, i32* @var32
|
|
; CHECK: movz [[W52:w[0-9]+]], #52
|
|
; CHECK: movz [[W42:w[0-9]+]], #42
|
|
; CHECK: csel [[MAYBETRUE:w[0-9]+]], [[W42]], [[W52]], mi
|
|
; CHECK: csel {{w[0-9]+}}, [[W42]], [[MAYBETRUE]], gt
|
|
|
|
|
|
%tst2 = fcmp ueq double %lhs64, %rhs64
|
|
; CHECK: fcmp {{d[0-9]+}}, {{d[0-9]+}}
|
|
%val2 = select i1 %tst2, i64 9, i64 15
|
|
store i64 %val2, i64* @var64
|
|
; CHECK: movz [[CONST15:x[0-9]+]], #15
|
|
; CHECK: movz [[CONST9:x[0-9]+]], #9
|
|
; CHECK: csel [[MAYBETRUE:x[0-9]+]], [[CONST9]], [[CONST15]], eq
|
|
; CHECK: csel {{x[0-9]+}}, [[CONST9]], [[MAYBETRUE]], vs
|
|
|
|
ret void
|
|
; CHECK: ret
|
|
}
|
|
|
|
|
|
define void @test_csinc(i32 %lhs32, i32 %rhs32, i64 %lhs64) {
|
|
; CHECK-LABEL: test_csinc:
|
|
|
|
; Note that commuting rhs and lhs in the select changes ugt to ule (i.e. hi to ls).
|
|
%tst1 = icmp ugt i32 %lhs32, %rhs32
|
|
%inc1 = add i32 %rhs32, 1
|
|
%val1 = select i1 %tst1, i32 %inc1, i32 %lhs32
|
|
store volatile i32 %val1, i32* @var32
|
|
; CHECK: cmp [[LHS:w[0-9]+]], [[RHS:w[0-9]+]]
|
|
; CHECK: csinc {{w[0-9]+}}, [[LHS]], [[RHS]], ls
|
|
|
|
%rhs2 = add i32 %rhs32, 42
|
|
%tst2 = icmp sle i32 %lhs32, %rhs2
|
|
%inc2 = add i32 %rhs32, 1
|
|
%val2 = select i1 %tst2, i32 %lhs32, i32 %inc2
|
|
store volatile i32 %val2, i32* @var32
|
|
; CHECK: cmp [[LHS:w[0-9]+]], {{w[0-9]+}}
|
|
; CHECK: csinc {{w[0-9]+}}, [[LHS]], {{w[0-9]+}}, le
|
|
|
|
; Note that commuting rhs and lhs in the select changes ugt to ule (i.e. hi to ls).
|
|
%rhs3 = sext i32 %rhs32 to i64
|
|
%tst3 = icmp ugt i64 %lhs64, %rhs3
|
|
%inc3 = add i64 %rhs3, 1
|
|
%val3 = select i1 %tst3, i64 %inc3, i64 %lhs64
|
|
store volatile i64 %val3, i64* @var64
|
|
; CHECK: cmp [[LHS:x[0-9]+]], {{w[0-9]+}}
|
|
; CHECK: csinc {{x[0-9]+}}, [[LHS]], {{x[0-9]+}}, ls
|
|
|
|
%rhs4 = zext i32 %rhs32 to i64
|
|
%tst4 = icmp sle i64 %lhs64, %rhs4
|
|
%inc4 = add i64 %rhs4, 1
|
|
%val4 = select i1 %tst4, i64 %lhs64, i64 %inc4
|
|
store volatile i64 %val4, i64* @var64
|
|
; CHECK: cmp [[LHS:x[0-9]+]], {{w[0-9]+}}
|
|
; CHECK: csinc {{x[0-9]+}}, [[LHS]], {{x[0-9]+}}, le
|
|
|
|
ret void
|
|
; CHECK: ret
|
|
}
|
|
|
|
define void @test_csinv(i32 %lhs32, i32 %rhs32, i64 %lhs64) {
|
|
; CHECK-LABEL: test_csinv:
|
|
|
|
; Note that commuting rhs and lhs in the select changes ugt to ule (i.e. hi to ls).
|
|
%tst1 = icmp ugt i32 %lhs32, %rhs32
|
|
%inc1 = xor i32 -1, %rhs32
|
|
%val1 = select i1 %tst1, i32 %inc1, i32 %lhs32
|
|
store volatile i32 %val1, i32* @var32
|
|
; CHECK: cmp [[LHS:w[0-9]+]], [[RHS:w[0-9]+]]
|
|
; CHECK: csinv {{w[0-9]+}}, [[LHS]], [[RHS]], ls
|
|
|
|
%rhs2 = add i32 %rhs32, 42
|
|
%tst2 = icmp sle i32 %lhs32, %rhs2
|
|
%inc2 = xor i32 -1, %rhs32
|
|
%val2 = select i1 %tst2, i32 %lhs32, i32 %inc2
|
|
store volatile i32 %val2, i32* @var32
|
|
; CHECK: cmp [[LHS:w[0-9]+]], {{w[0-9]+}}
|
|
; CHECK: csinv {{w[0-9]+}}, [[LHS]], {{w[0-9]+}}, le
|
|
|
|
; Note that commuting rhs and lhs in the select changes ugt to ule (i.e. hi to ls).
|
|
%rhs3 = sext i32 %rhs32 to i64
|
|
%tst3 = icmp ugt i64 %lhs64, %rhs3
|
|
%inc3 = xor i64 -1, %rhs3
|
|
%val3 = select i1 %tst3, i64 %inc3, i64 %lhs64
|
|
store volatile i64 %val3, i64* @var64
|
|
; CHECK: cmp [[LHS:x[0-9]+]], {{w[0-9]+}}
|
|
; CHECK: csinv {{x[0-9]+}}, [[LHS]], {{x[0-9]+}}, ls
|
|
|
|
%rhs4 = zext i32 %rhs32 to i64
|
|
%tst4 = icmp sle i64 %lhs64, %rhs4
|
|
%inc4 = xor i64 -1, %rhs4
|
|
%val4 = select i1 %tst4, i64 %lhs64, i64 %inc4
|
|
store volatile i64 %val4, i64* @var64
|
|
; CHECK: cmp [[LHS:x[0-9]+]], {{w[0-9]+}}
|
|
; CHECK: csinv {{x[0-9]+}}, [[LHS]], {{x[0-9]+}}, le
|
|
|
|
ret void
|
|
; CHECK: ret
|
|
}
|
|
|
|
define void @test_csneg(i32 %lhs32, i32 %rhs32, i64 %lhs64) {
|
|
; CHECK-LABEL: test_csneg:
|
|
|
|
; Note that commuting rhs and lhs in the select changes ugt to ule (i.e. hi to ls).
|
|
%tst1 = icmp ugt i32 %lhs32, %rhs32
|
|
%inc1 = sub i32 0, %rhs32
|
|
%val1 = select i1 %tst1, i32 %inc1, i32 %lhs32
|
|
store volatile i32 %val1, i32* @var32
|
|
; CHECK: cmp [[LHS:w[0-9]+]], [[RHS:w[0-9]+]]
|
|
; CHECK: csneg {{w[0-9]+}}, [[LHS]], [[RHS]], ls
|
|
|
|
%rhs2 = add i32 %rhs32, 42
|
|
%tst2 = icmp sle i32 %lhs32, %rhs2
|
|
%inc2 = sub i32 0, %rhs32
|
|
%val2 = select i1 %tst2, i32 %lhs32, i32 %inc2
|
|
store volatile i32 %val2, i32* @var32
|
|
; CHECK: cmp [[LHS:w[0-9]+]], {{w[0-9]+}}
|
|
; CHECK: csneg {{w[0-9]+}}, [[LHS]], {{w[0-9]+}}, le
|
|
|
|
; Note that commuting rhs and lhs in the select changes ugt to ule (i.e. hi to ls).
|
|
%rhs3 = sext i32 %rhs32 to i64
|
|
%tst3 = icmp ugt i64 %lhs64, %rhs3
|
|
%inc3 = sub i64 0, %rhs3
|
|
%val3 = select i1 %tst3, i64 %inc3, i64 %lhs64
|
|
store volatile i64 %val3, i64* @var64
|
|
; CHECK: cmp [[LHS:x[0-9]+]], {{w[0-9]+}}
|
|
; CHECK: csneg {{x[0-9]+}}, [[LHS]], {{x[0-9]+}}, ls
|
|
|
|
%rhs4 = zext i32 %rhs32 to i64
|
|
%tst4 = icmp sle i64 %lhs64, %rhs4
|
|
%inc4 = sub i64 0, %rhs4
|
|
%val4 = select i1 %tst4, i64 %lhs64, i64 %inc4
|
|
store volatile i64 %val4, i64* @var64
|
|
; CHECK: cmp [[LHS:x[0-9]+]], {{w[0-9]+}}
|
|
; CHECK: csneg {{x[0-9]+}}, [[LHS]], {{x[0-9]+}}, le
|
|
|
|
ret void
|
|
; CHECK: ret
|
|
}
|
|
|
|
define void @test_cset(i32 %lhs, i32 %rhs, i64 %lhs64) {
|
|
; CHECK-LABEL: test_cset:
|
|
|
|
; N.b. code is not optimal here (32-bit csinc would be better) but
|
|
; incoming DAG is too complex
|
|
%tst1 = icmp eq i32 %lhs, %rhs
|
|
%val1 = zext i1 %tst1 to i32
|
|
store i32 %val1, i32* @var32
|
|
; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}
|
|
; CHECK: csinc {{w[0-9]+}}, wzr, wzr, ne
|
|
|
|
%rhs64 = sext i32 %rhs to i64
|
|
%tst2 = icmp ule i64 %lhs64, %rhs64
|
|
%val2 = zext i1 %tst2 to i64
|
|
store i64 %val2, i64* @var64
|
|
; CHECK: csinc {{w[0-9]+}}, wzr, wzr, hi
|
|
|
|
ret void
|
|
; CHECK: ret
|
|
}
|
|
|
|
define void @test_csetm(i32 %lhs, i32 %rhs, i64 %lhs64) {
|
|
; CHECK-LABEL: test_csetm:
|
|
|
|
%tst1 = icmp eq i32 %lhs, %rhs
|
|
%val1 = sext i1 %tst1 to i32
|
|
store i32 %val1, i32* @var32
|
|
; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}
|
|
; CHECK: csinv {{w[0-9]+}}, wzr, wzr, ne
|
|
|
|
%rhs64 = sext i32 %rhs to i64
|
|
%tst2 = icmp ule i64 %lhs64, %rhs64
|
|
%val2 = sext i1 %tst2 to i64
|
|
store i64 %val2, i64* @var64
|
|
; CHECK: csinv {{x[0-9]+}}, xzr, xzr, hi
|
|
|
|
ret void
|
|
; CHECK: ret
|
|
}
|