1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 19:23:23 +01:00

[RISCV] Add test case showing suboptimal codegen when loading unsigned char/short

Implementing isZextFree will allow lbu or lhu to be selected rather than 
lb+mask and lh+mask.

llvm-svn: 330942
This commit is contained in:
Alex Bradbury 2018-04-26 14:00:35 +00:00
parent 0ecc1283b2
commit 67f9cb4d52

View File

@ -0,0 +1,79 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefix=RV32I
; TODO: lbu and lhu should be selected to avoid the unnecessary masking.
@bytes = global [5 x i8] zeroinitializer, align 1
define i32 @test_zext_i8() {
; RV32I-LABEL: test_zext_i8:
; RV32I: # %bb.0: # %entry
; RV32I-NEXT: lui a0, %hi(bytes)
; RV32I-NEXT: lbu a0, %lo(bytes)(a0)
; RV32I-NEXT: addi a1, zero, 136
; RV32I-NEXT: bne a0, a1, .LBB0_3
; RV32I-NEXT: # %bb.1: # %entry
; RV32I-NEXT: lui a0, %hi(bytes+1)
; RV32I-NEXT: lb a0, %lo(bytes+1)(a0)
; RV32I-NEXT: andi a0, a0, 255
; RV32I-NEXT: addi a1, zero, 7
; RV32I-NEXT: bne a0, a1, .LBB0_3
; RV32I-NEXT: # %bb.2: # %if.end
; RV32I-NEXT: mv a0, zero
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB0_3: # %if.then
; RV32I-NEXT: addi a0, zero, 1
; RV32I-NEXT: ret
entry:
%0 = load i8, i8* getelementptr inbounds ([5 x i8], [5 x i8]* @bytes, i32 0, i32 0), align 1
%cmp = icmp eq i8 %0, -120
%1 = load i8, i8* getelementptr inbounds ([5 x i8], [5 x i8]* @bytes, i32 0, i32 1), align 1
%cmp3 = icmp eq i8 %1, 7
%or.cond = and i1 %cmp, %cmp3
br i1 %or.cond, label %if.end, label %if.then
if.then:
ret i32 1
if.end:
ret i32 0
}
@shorts = global [5 x i16] zeroinitializer, align 2
define i32 @test_zext_i16() {
; RV32I-LABEL: test_zext_i16:
; RV32I: # %bb.0: # %entry
; RV32I-NEXT: lui a0, 16
; RV32I-NEXT: addi a1, a0, -120
; RV32I-NEXT: lui a2, %hi(shorts)
; RV32I-NEXT: lhu a2, %lo(shorts)(a2)
; RV32I-NEXT: bne a2, a1, .LBB1_3
; RV32I-NEXT: # %bb.1: # %entry
; RV32I-NEXT: lui a1, %hi(shorts+2)
; RV32I-NEXT: lh a1, %lo(shorts+2)(a1)
; RV32I-NEXT: addi a0, a0, -1
; RV32I-NEXT: and a0, a1, a0
; RV32I-NEXT: addi a1, zero, 7
; RV32I-NEXT: bne a0, a1, .LBB1_3
; RV32I-NEXT: # %bb.2: # %if.end
; RV32I-NEXT: mv a0, zero
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB1_3: # %if.then
; RV32I-NEXT: addi a0, zero, 1
; RV32I-NEXT: ret
entry:
%0 = load i16, i16* getelementptr inbounds ([5 x i16], [5 x i16]* @shorts, i32 0, i32 0), align 2
%cmp = icmp eq i16 %0, -120
%1 = load i16, i16* getelementptr inbounds ([5 x i16], [5 x i16]* @shorts, i32 0, i32 1), align 2
%cmp3 = icmp eq i16 %1, 7
%or.cond = and i1 %cmp, %cmp3
br i1 %or.cond, label %if.end, label %if.then
if.then:
ret i32 1
if.end:
ret i32 0
}