mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-24 03:33:20 +01:00
9be7d39fd7
If we know that a particular 64-bit constant has all high bits zero, then we can rely on the fact that 32-bit ARM64 instructions automatically zero out the high bits of an x-register. This gives the expansion logic less constraints to satisfy and so sometimes allows it to pick better sequences. Came up while porting test/CodeGen/AArch64/movw-consts.ll: this will allow a 32-bit MOVN to be used in @test8 soon. llvm-svn: 206379
219 lines
7.1 KiB
LLVM
219 lines
7.1 KiB
LLVM
; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-AARCH64
|
|
; RUN: llc -verify-machineinstrs < %s -mtriple=arm64-none-linux-gnu -mcpu=cyclone | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-ARM64
|
|
; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP %s
|
|
|
|
@var32 = global i32 0
|
|
@var64 = global i64 0
|
|
|
|
define void @test_csel(i32 %lhs32, i32 %rhs32, i64 %lhs64) minsize {
|
|
; CHECK-LABEL: test_csel:
|
|
|
|
%tst1 = icmp ugt i32 %lhs32, %rhs32
|
|
%val1 = select i1 %tst1, i32 42, i32 52
|
|
store i32 %val1, i32* @var32
|
|
; CHECK-DAG: movz [[W52:w[0-9]+]], #52
|
|
; CHECK-DAG: movz [[W42:w[0-9]+]], #42
|
|
; CHECK: csel {{w[0-9]+}}, [[W42]], [[W52]], hi
|
|
|
|
%rhs64 = sext i32 %rhs32 to i64
|
|
%tst2 = icmp sle i64 %lhs64, %rhs64
|
|
%val2 = select i1 %tst2, i64 %lhs64, i64 %rhs64
|
|
store i64 %val2, i64* @var64
|
|
; CHECK: sxtw [[EXT_RHS:x[0-9]+]], {{[wx]}}[[RHS:[0-9]+]]
|
|
; CHECK: cmp [[LHS:x[0-9]+]], w[[RHS]], sxtw
|
|
; CHECK: csel {{x[0-9]+}}, [[LHS]], [[EXT_RHS]], le
|
|
|
|
ret void
|
|
; CHECK: ret
|
|
}
|
|
|
|
define void @test_floatcsel(float %lhs32, float %rhs32, double %lhs64, double %rhs64) {
|
|
; CHECK-LABEL: test_floatcsel:
|
|
|
|
%tst1 = fcmp one float %lhs32, %rhs32
|
|
; CHECK: fcmp {{s[0-9]+}}, {{s[0-9]+}}
|
|
; CHECK-NOFP-NOT: fcmp
|
|
%val1 = select i1 %tst1, i32 42, i32 52
|
|
store i32 %val1, i32* @var32
|
|
; CHECK: movz [[W52:w[0-9]+]], #52
|
|
; CHECK: movz [[W42:w[0-9]+]], #42
|
|
; CHECK: csel [[MAYBETRUE:w[0-9]+]], [[W42]], [[W52]], mi
|
|
; CHECK: csel {{w[0-9]+}}, [[W42]], [[MAYBETRUE]], gt
|
|
|
|
|
|
%tst2 = fcmp ueq double %lhs64, %rhs64
|
|
; CHECK: fcmp {{d[0-9]+}}, {{d[0-9]+}}
|
|
; CHECK-NOFP-NOT: fcmp
|
|
%val2 = select i1 %tst2, i64 9, i64 15
|
|
store i64 %val2, i64* @var64
|
|
; CHECK-AARCH64: movz x[[CONST15:[0-9]+]], #15
|
|
; CHECK-ARM64: orr w[[CONST15:[0-9]+]], wzr, #0xf
|
|
; CHECK: movz {{[wx]}}[[CONST9:[0-9]+]], #9
|
|
; CHECK: csel [[MAYBETRUE:x[0-9]+]], x[[CONST9]], x[[CONST15]], eq
|
|
; CHECK: csel {{x[0-9]+}}, x[[CONST9]], [[MAYBETRUE]], vs
|
|
|
|
ret void
|
|
; CHECK: ret
|
|
}
|
|
|
|
|
|
define void @test_csinc(i32 %lhs32, i32 %rhs32, i64 %lhs64) minsize {
|
|
; CHECK-LABEL: test_csinc:
|
|
|
|
; Note that commuting rhs and lhs in the select changes ugt to ule (i.e. hi to ls).
|
|
%tst1 = icmp ugt i32 %lhs32, %rhs32
|
|
%inc1 = add i32 %rhs32, 1
|
|
%val1 = select i1 %tst1, i32 %inc1, i32 %lhs32
|
|
store volatile i32 %val1, i32* @var32
|
|
; CHECK: cmp [[LHS:w[0-9]+]], [[RHS:w[0-9]+]]
|
|
; CHECK: csinc {{w[0-9]+}}, [[LHS]], [[RHS]], ls
|
|
|
|
%rhs2 = add i32 %rhs32, 42
|
|
%tst2 = icmp sle i32 %lhs32, %rhs2
|
|
%inc2 = add i32 %rhs32, 1
|
|
%val2 = select i1 %tst2, i32 %lhs32, i32 %inc2
|
|
store volatile i32 %val2, i32* @var32
|
|
; CHECK: cmp [[LHS:w[0-9]+]], {{w[0-9]+}}
|
|
; CHECK: csinc {{w[0-9]+}}, [[LHS]], {{w[0-9]+}}, le
|
|
|
|
; Note that commuting rhs and lhs in the select changes ugt to ule (i.e. hi to ls).
|
|
%rhs3 = sext i32 %rhs32 to i64
|
|
%tst3 = icmp ugt i64 %lhs64, %rhs3
|
|
%inc3 = add i64 %rhs3, 1
|
|
%val3 = select i1 %tst3, i64 %inc3, i64 %lhs64
|
|
store volatile i64 %val3, i64* @var64
|
|
; CHECK: cmp [[LHS:x[0-9]+]], {{w[0-9]+}}
|
|
; CHECK: csinc {{x[0-9]+}}, [[LHS]], {{x[0-9]+}}, ls
|
|
|
|
%rhs4 = zext i32 %rhs32 to i64
|
|
%tst4 = icmp sle i64 %lhs64, %rhs4
|
|
%inc4 = add i64 %rhs4, 1
|
|
%val4 = select i1 %tst4, i64 %lhs64, i64 %inc4
|
|
store volatile i64 %val4, i64* @var64
|
|
; CHECK: cmp [[LHS:x[0-9]+]], {{w[0-9]+}}
|
|
; CHECK: csinc {{x[0-9]+}}, [[LHS]], {{x[0-9]+}}, le
|
|
|
|
ret void
|
|
; CHECK: ret
|
|
}
|
|
|
|
define void @test_csinv(i32 %lhs32, i32 %rhs32, i64 %lhs64) minsize {
|
|
; CHECK-LABEL: test_csinv:
|
|
|
|
; Note that commuting rhs and lhs in the select changes ugt to ule (i.e. hi to ls).
|
|
%tst1 = icmp ugt i32 %lhs32, %rhs32
|
|
%inc1 = xor i32 -1, %rhs32
|
|
%val1 = select i1 %tst1, i32 %inc1, i32 %lhs32
|
|
store volatile i32 %val1, i32* @var32
|
|
; CHECK: cmp [[LHS:w[0-9]+]], [[RHS:w[0-9]+]]
|
|
; CHECK: csinv {{w[0-9]+}}, [[LHS]], [[RHS]], ls
|
|
|
|
%rhs2 = add i32 %rhs32, 42
|
|
%tst2 = icmp sle i32 %lhs32, %rhs2
|
|
%inc2 = xor i32 -1, %rhs32
|
|
%val2 = select i1 %tst2, i32 %lhs32, i32 %inc2
|
|
store volatile i32 %val2, i32* @var32
|
|
; CHECK: cmp [[LHS:w[0-9]+]], {{w[0-9]+}}
|
|
; CHECK: csinv {{w[0-9]+}}, [[LHS]], {{w[0-9]+}}, le
|
|
|
|
; Note that commuting rhs and lhs in the select changes ugt to ule (i.e. hi to ls).
|
|
%rhs3 = sext i32 %rhs32 to i64
|
|
%tst3 = icmp ugt i64 %lhs64, %rhs3
|
|
%inc3 = xor i64 -1, %rhs3
|
|
%val3 = select i1 %tst3, i64 %inc3, i64 %lhs64
|
|
store volatile i64 %val3, i64* @var64
|
|
; CHECK: cmp [[LHS:x[0-9]+]], {{w[0-9]+}}
|
|
; CHECK: csinv {{x[0-9]+}}, [[LHS]], {{x[0-9]+}}, ls
|
|
|
|
%rhs4 = zext i32 %rhs32 to i64
|
|
%tst4 = icmp sle i64 %lhs64, %rhs4
|
|
%inc4 = xor i64 -1, %rhs4
|
|
%val4 = select i1 %tst4, i64 %lhs64, i64 %inc4
|
|
store volatile i64 %val4, i64* @var64
|
|
; CHECK: cmp [[LHS:x[0-9]+]], {{w[0-9]+}}
|
|
; CHECK: csinv {{x[0-9]+}}, [[LHS]], {{x[0-9]+}}, le
|
|
|
|
ret void
|
|
; CHECK: ret
|
|
}
|
|
|
|
define void @test_csneg(i32 %lhs32, i32 %rhs32, i64 %lhs64) minsize {
|
|
; CHECK-LABEL: test_csneg:
|
|
|
|
; Note that commuting rhs and lhs in the select changes ugt to ule (i.e. hi to ls).
|
|
%tst1 = icmp ugt i32 %lhs32, %rhs32
|
|
%inc1 = sub i32 0, %rhs32
|
|
%val1 = select i1 %tst1, i32 %inc1, i32 %lhs32
|
|
store volatile i32 %val1, i32* @var32
|
|
; CHECK: cmp [[LHS:w[0-9]+]], [[RHS:w[0-9]+]]
|
|
; CHECK: csneg {{w[0-9]+}}, [[LHS]], [[RHS]], ls
|
|
|
|
%rhs2 = add i32 %rhs32, 42
|
|
%tst2 = icmp sle i32 %lhs32, %rhs2
|
|
%inc2 = sub i32 0, %rhs32
|
|
%val2 = select i1 %tst2, i32 %lhs32, i32 %inc2
|
|
store volatile i32 %val2, i32* @var32
|
|
; CHECK: cmp [[LHS:w[0-9]+]], {{w[0-9]+}}
|
|
; CHECK: csneg {{w[0-9]+}}, [[LHS]], {{w[0-9]+}}, le
|
|
|
|
; Note that commuting rhs and lhs in the select changes ugt to ule (i.e. hi to ls).
|
|
%rhs3 = sext i32 %rhs32 to i64
|
|
%tst3 = icmp ugt i64 %lhs64, %rhs3
|
|
%inc3 = sub i64 0, %rhs3
|
|
%val3 = select i1 %tst3, i64 %inc3, i64 %lhs64
|
|
store volatile i64 %val3, i64* @var64
|
|
; CHECK: cmp [[LHS:x[0-9]+]], {{w[0-9]+}}
|
|
; CHECK: csneg {{x[0-9]+}}, [[LHS]], {{x[0-9]+}}, ls
|
|
|
|
%rhs4 = zext i32 %rhs32 to i64
|
|
%tst4 = icmp sle i64 %lhs64, %rhs4
|
|
%inc4 = sub i64 0, %rhs4
|
|
%val4 = select i1 %tst4, i64 %lhs64, i64 %inc4
|
|
store volatile i64 %val4, i64* @var64
|
|
; CHECK: cmp [[LHS:x[0-9]+]], {{w[0-9]+}}
|
|
; CHECK: csneg {{x[0-9]+}}, [[LHS]], {{x[0-9]+}}, le
|
|
|
|
ret void
|
|
; CHECK: ret
|
|
}
|
|
|
|
define void @test_cset(i32 %lhs, i32 %rhs, i64 %lhs64) {
|
|
; CHECK-LABEL: test_cset:
|
|
|
|
; N.b. code is not optimal here (32-bit csinc would be better) but
|
|
; incoming DAG is too complex
|
|
%tst1 = icmp eq i32 %lhs, %rhs
|
|
%val1 = zext i1 %tst1 to i32
|
|
store i32 %val1, i32* @var32
|
|
; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}
|
|
; CHECK: csinc {{w[0-9]+}}, wzr, wzr, ne
|
|
|
|
%rhs64 = sext i32 %rhs to i64
|
|
%tst2 = icmp ule i64 %lhs64, %rhs64
|
|
%val2 = zext i1 %tst2 to i64
|
|
store i64 %val2, i64* @var64
|
|
; CHECK: csinc {{w[0-9]+}}, wzr, wzr, hi
|
|
|
|
ret void
|
|
; CHECK: ret
|
|
}
|
|
|
|
define void @test_csetm(i32 %lhs, i32 %rhs, i64 %lhs64) {
|
|
; CHECK-LABEL: test_csetm:
|
|
|
|
%tst1 = icmp eq i32 %lhs, %rhs
|
|
%val1 = sext i1 %tst1 to i32
|
|
store i32 %val1, i32* @var32
|
|
; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}
|
|
; CHECK: csinv {{w[0-9]+}}, wzr, wzr, ne
|
|
|
|
%rhs64 = sext i32 %rhs to i64
|
|
%tst2 = icmp ule i64 %lhs64, %rhs64
|
|
%val2 = sext i1 %tst2 to i64
|
|
store i64 %val2, i64* @var64
|
|
; CHECK: csinv {{x[0-9]+}}, xzr, xzr, hi
|
|
|
|
ret void
|
|
; CHECK: ret
|
|
}
|