mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-26 04:32:44 +01:00
e554dc2124
Expand 128 bit shifts instead of using a libcall. This patch removes the 128 bit shift libcalls and thereby causes ExpandShiftWithUnknownAmountBit() to be called. Review: Ulrich Weigand Differential Revision: https://reviews.llvm.org/D101993
210 lines
5.6 KiB
LLVM
210 lines
5.6 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; Test removal of AND operations that don't affect last 6 bits of shift amount
|
|
; operand.
|
|
;
|
|
; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
|
|
|
|
; Test that AND is not removed when some lower 6 bits are not set.
|
|
define i32 @f1(i32 %a, i32 %sh) {
|
|
; CHECK-LABEL: f1:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: nill %r3, 31
|
|
; CHECK-NEXT: sll %r2, 0(%r3)
|
|
; CHECK-NEXT: br %r14
|
|
%and = and i32 %sh, 31
|
|
%shift = shl i32 %a, %and
|
|
ret i32 %shift
|
|
}
|
|
|
|
; Test removal of AND mask with only bottom 6 bits set.
|
|
define i32 @f2(i32 %a, i32 %sh) {
|
|
; CHECK-LABEL: f2:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: sll %r2, 0(%r3)
|
|
; CHECK-NEXT: br %r14
|
|
%and = and i32 %sh, 63
|
|
%shift = shl i32 %a, %and
|
|
ret i32 %shift
|
|
}
|
|
|
|
; Test removal of AND mask including but not limited to bottom 6 bits.
|
|
define i32 @f3(i32 %a, i32 %sh) {
|
|
; CHECK-LABEL: f3:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: sll %r2, 0(%r3)
|
|
; CHECK-NEXT: br %r14
|
|
%and = and i32 %sh, 255
|
|
%shift = shl i32 %a, %and
|
|
ret i32 %shift
|
|
}
|
|
|
|
; Test removal of AND mask from SRA.
|
|
define i32 @f4(i32 %a, i32 %sh) {
|
|
; CHECK-LABEL: f4:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: sra %r2, 0(%r3)
|
|
; CHECK-NEXT: br %r14
|
|
%and = and i32 %sh, 63
|
|
%shift = ashr i32 %a, %and
|
|
ret i32 %shift
|
|
}
|
|
|
|
; Test removal of AND mask from SRL.
|
|
define i32 @f5(i32 %a, i32 %sh) {
|
|
; CHECK-LABEL: f5:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: srl %r2, 0(%r3)
|
|
; CHECK-NEXT: br %r14
|
|
%and = and i32 %sh, 63
|
|
%shift = lshr i32 %a, %and
|
|
ret i32 %shift
|
|
}
|
|
|
|
; Test removal of AND mask from SLLG.
|
|
define i64 @f6(i64 %a, i64 %sh) {
|
|
; CHECK-LABEL: f6:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: sllg %r2, %r2, 0(%r3)
|
|
; CHECK-NEXT: br %r14
|
|
%and = and i64 %sh, 63
|
|
%shift = shl i64 %a, %and
|
|
ret i64 %shift
|
|
}
|
|
|
|
; Test removal of AND mask from SRAG.
|
|
define i64 @f7(i64 %a, i64 %sh) {
|
|
; CHECK-LABEL: f7:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: srag %r2, %r2, 0(%r3)
|
|
; CHECK-NEXT: br %r14
|
|
%and = and i64 %sh, 63
|
|
%shift = ashr i64 %a, %and
|
|
ret i64 %shift
|
|
}
|
|
|
|
; Test removal of AND mask from SRLG.
|
|
define i64 @f8(i64 %a, i64 %sh) {
|
|
; CHECK-LABEL: f8:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: srlg %r2, %r2, 0(%r3)
|
|
; CHECK-NEXT: br %r14
|
|
%and = and i64 %sh, 63
|
|
%shift = lshr i64 %a, %and
|
|
ret i64 %shift
|
|
}
|
|
|
|
; Test that AND with two register operands is not affected.
|
|
define i32 @f9(i32 %a, i32 %b, i32 %sh) {
|
|
; CHECK-LABEL: f9:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: nr %r3, %r4
|
|
; CHECK-NEXT: sll %r2, 0(%r3)
|
|
; CHECK-NEXT: br %r14
|
|
%and = and i32 %sh, %b
|
|
%shift = shl i32 %a, %and
|
|
ret i32 %shift
|
|
}
|
|
|
|
; Test that AND is not entirely removed if the result is reused.
|
|
define i32 @f10(i32 %a, i32 %sh) {
|
|
; CHECK-LABEL: f10:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: sll %r2, 0(%r3)
|
|
; CHECK-NEXT: nilf %r3, 63
|
|
; CHECK-NEXT: ar %r2, %r3
|
|
; CHECK-NEXT: br %r14
|
|
%and = and i32 %sh, 63
|
|
%shift = shl i32 %a, %and
|
|
%reuse = add i32 %and, %shift
|
|
ret i32 %reuse
|
|
}
|
|
|
|
define i128 @f11(i128 %a, i32 %sh) {
|
|
; CHECK-LABEL: f11:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: stmg %r14, %r15, 112(%r15)
|
|
; CHECK-NEXT: .cfi_offset %r14, -48
|
|
; CHECK-NEXT: .cfi_offset %r15, -40
|
|
; CHECK-NEXT: lg %r0, 8(%r3)
|
|
; CHECK-NEXT: lg %r1, 0(%r3)
|
|
; CHECK-NEXT: risblg %r3, %r4, 25, 159, 0
|
|
; CHECK-NEXT: lcr %r14, %r3
|
|
; CHECK-NEXT: sllg %r5, %r1, 0(%r4)
|
|
; CHECK-NEXT: srlg %r14, %r0, 0(%r14)
|
|
; CHECK-NEXT: ogr %r5, %r14
|
|
; CHECK-NEXT: sllg %r3, %r0, -64(%r3)
|
|
; CHECK-NEXT: tmll %r4, 127
|
|
; CHECK-NEXT: locgrle %r3, %r5
|
|
; CHECK-NEXT: sllg %r0, %r0, 0(%r4)
|
|
; CHECK-NEXT: locgre %r3, %r1
|
|
; CHECK-NEXT: locghinle %r0, 0
|
|
; CHECK-NEXT: stg %r0, 8(%r2)
|
|
; CHECK-NEXT: stg %r3, 0(%r2)
|
|
; CHECK-NEXT: lmg %r14, %r15, 112(%r15)
|
|
; CHECK-NEXT: br %r14
|
|
%and = and i32 %sh, 127
|
|
%ext = zext i32 %and to i128
|
|
%shift = shl i128 %a, %ext
|
|
ret i128 %shift
|
|
}
|
|
|
|
define i128 @f12(i128 %a, i32 %sh) {
|
|
; CHECK-LABEL: f12:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: stmg %r14, %r15, 112(%r15)
|
|
; CHECK-NEXT: .cfi_offset %r14, -48
|
|
; CHECK-NEXT: .cfi_offset %r15, -40
|
|
; CHECK-NEXT: lg %r0, 0(%r3)
|
|
; CHECK-NEXT: lg %r1, 8(%r3)
|
|
; CHECK-NEXT: risblg %r3, %r4, 25, 159, 0
|
|
; CHECK-NEXT: lcr %r14, %r3
|
|
; CHECK-NEXT: srlg %r5, %r1, 0(%r4)
|
|
; CHECK-NEXT: sllg %r14, %r0, 0(%r14)
|
|
; CHECK-NEXT: ogr %r5, %r14
|
|
; CHECK-NEXT: srlg %r3, %r0, -64(%r3)
|
|
; CHECK-NEXT: tmll %r4, 127
|
|
; CHECK-NEXT: locgrle %r3, %r5
|
|
; CHECK-NEXT: srlg %r0, %r0, 0(%r4)
|
|
; CHECK-NEXT: locgre %r3, %r1
|
|
; CHECK-NEXT: locghinle %r0, 0
|
|
; CHECK-NEXT: stg %r0, 0(%r2)
|
|
; CHECK-NEXT: stg %r3, 8(%r2)
|
|
; CHECK-NEXT: lmg %r14, %r15, 112(%r15)
|
|
; CHECK-NEXT: br %r14
|
|
%and = and i32 %sh, 127
|
|
%ext = zext i32 %and to i128
|
|
%shift = lshr i128 %a, %ext
|
|
ret i128 %shift
|
|
}
|
|
|
|
define i128 @f13(i128 %a, i32 %sh) {
|
|
; CHECK-LABEL: f13:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: stmg %r14, %r15, 112(%r15)
|
|
; CHECK-NEXT: .cfi_offset %r14, -48
|
|
; CHECK-NEXT: .cfi_offset %r15, -40
|
|
; CHECK-NEXT: lg %r0, 0(%r3)
|
|
; CHECK-NEXT: lg %r1, 8(%r3)
|
|
; CHECK-NEXT: risblg %r3, %r4, 25, 159, 0
|
|
; CHECK-NEXT: lcr %r14, %r3
|
|
; CHECK-NEXT: srlg %r5, %r1, 0(%r4)
|
|
; CHECK-NEXT: sllg %r14, %r0, 0(%r14)
|
|
; CHECK-NEXT: ogr %r5, %r14
|
|
; CHECK-NEXT: srag %r14, %r0, 0(%r4)
|
|
; CHECK-NEXT: srag %r3, %r0, -64(%r3)
|
|
; CHECK-NEXT: srag %r0, %r0, 63
|
|
; CHECK-NEXT: tmll %r4, 127
|
|
; CHECK-NEXT: locgrle %r3, %r5
|
|
; CHECK-NEXT: locgre %r3, %r1
|
|
; CHECK-NEXT: locgrle %r0, %r14
|
|
; CHECK-NEXT: stg %r0, 0(%r2)
|
|
; CHECK-NEXT: stg %r3, 8(%r2)
|
|
; CHECK-NEXT: lmg %r14, %r15, 112(%r15)
|
|
; CHECK-NEXT: br %r14
|
|
%and = and i32 %sh, 127
|
|
%ext = zext i32 %and to i128
|
|
%shift = ashr i128 %a, %ext
|
|
ret i128 %shift
|
|
}
|
|
|