1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 18:54:02 +01:00

[RISCV] Allow conversion of CC logic to bitwise logic

Indicates in the TargetLowering interface that conversions from CC logic to
bitwise logic are allowed. Adds tests that show the benefit when optimization
opportunities are detected. Also adds tests that show that when the optimization
is not applied correct code is generated (but opportunities for other
optimizations remain).

Differential Revision: https://reviews.llvm.org/D59596
Patch by Luís Marques.

llvm-svn: 356740
This commit is contained in:
Alex Bradbury 2019-03-22 10:39:22 +00:00
parent 1e9101d9d7
commit e6a58c70c2
2 changed files with 134 additions and 0 deletions

View File

@ -98,6 +98,10 @@ public:
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
EVT VT) const override;
bool convertSetCCLogicToBitwiseLogic(EVT VT) const override {
return VT.isScalarInteger();
}
bool shouldInsertFencesForAtomic(const Instruction *I) const override {
return isa<LoadInst>(I) || isa<StoreInst>(I);
}

View File

@ -0,0 +1,130 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefix=RV32I
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefix=RV64I
define i1 @and_icmp_eq(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
; RV32I-LABEL: and_icmp_eq:
; RV32I: # %bb.0:
; RV32I-NEXT: xor a2, a2, a3
; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: or a0, a0, a2
; RV32I-NEXT: seqz a0, a0
; RV32I-NEXT: ret
;
; RV64I-LABEL: and_icmp_eq:
; RV64I: # %bb.0:
; RV64I-NEXT: xor a2, a2, a3
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: or a0, a0, a2
; RV64I-NEXT: slli a0, a0, 32
; RV64I-NEXT: srli a0, a0, 32
; RV64I-NEXT: seqz a0, a0
; RV64I-NEXT: ret
%cmp1 = icmp eq i32 %a, %b
%cmp2 = icmp eq i32 %c, %d
%and = and i1 %cmp1, %cmp2
ret i1 %and
}
define i1 @or_icmp_ne(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
; RV32I-LABEL: or_icmp_ne:
; RV32I: # %bb.0:
; RV32I-NEXT: xor a2, a2, a3
; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: or a0, a0, a2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: ret
;
; RV64I-LABEL: or_icmp_ne:
; RV64I: # %bb.0:
; RV64I-NEXT: xor a2, a2, a3
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: or a0, a0, a2
; RV64I-NEXT: slli a0, a0, 32
; RV64I-NEXT: srli a0, a0, 32
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: ret
%cmp1 = icmp ne i32 %a, %b
%cmp2 = icmp ne i32 %c, %d
%or = or i1 %cmp1, %cmp2
ret i1 %or
}
define i1 @or_icmps_const_1bit_diff(i64 %x) nounwind {
; RV32I-LABEL: or_icmps_const_1bit_diff:
; RV32I: # %bb.0:
; RV32I-NEXT: addi a2, a0, -13
; RV32I-NEXT: sltu a0, a2, a0
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: addi a0, a0, -1
; RV32I-NEXT: andi a1, a2, -5
; RV32I-NEXT: or a0, a1, a0
; RV32I-NEXT: seqz a0, a0
; RV32I-NEXT: ret
;
; RV64I-LABEL: or_icmps_const_1bit_diff:
; RV64I: # %bb.0:
; RV64I-NEXT: addi a0, a0, -13
; RV64I-NEXT: andi a0, a0, -5
; RV64I-NEXT: seqz a0, a0
; RV64I-NEXT: ret
%a = icmp eq i64 %x, 17
%b = icmp eq i64 %x, 13
%r = or i1 %a, %b
ret i1 %r
}
define i1 @and_icmps_const_1bit_diff(i32 %x) nounwind {
; RV32I-LABEL: and_icmps_const_1bit_diff:
; RV32I: # %bb.0:
; RV32I-NEXT: addi a0, a0, -44
; RV32I-NEXT: andi a0, a0, -17
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: ret
;
; RV64I-LABEL: and_icmps_const_1bit_diff:
; RV64I: # %bb.0:
; RV64I-NEXT: addi a0, a0, -44
; RV64I-NEXT: addi a1, zero, 1
; RV64I-NEXT: slli a1, a1, 32
; RV64I-NEXT: addi a1, a1, -17
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: ret
%a = icmp ne i32 %x, 44
%b = icmp ne i32 %x, 60
%r = and i1 %a, %b
ret i1 %r
}
define i1 @and_icmps_const_not1bit_diff(i32 %x) nounwind {
; RV32I-LABEL: and_icmps_const_not1bit_diff:
; RV32I: # %bb.0:
; RV32I-NEXT: addi a1, zero, 44
; RV32I-NEXT: xor a1, a0, a1
; RV32I-NEXT: addi a2, zero, 92
; RV32I-NEXT: xor a0, a0, a2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: snez a1, a1
; RV32I-NEXT: and a0, a1, a0
; RV32I-NEXT: ret
;
; RV64I-LABEL: and_icmps_const_not1bit_diff:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a0, a0, 32
; RV64I-NEXT: srli a0, a0, 32
; RV64I-NEXT: addi a1, zero, 44
; RV64I-NEXT: xor a1, a0, a1
; RV64I-NEXT: addi a2, zero, 92
; RV64I-NEXT: xor a0, a0, a2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: snez a1, a1
; RV64I-NEXT: and a0, a1, a0
; RV64I-NEXT: ret
%a = icmp ne i32 %x, 44
%b = icmp ne i32 %x, 92
%r = and i1 %a, %b
ret i1 %r
}