1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 20:51:52 +01:00

[AMDGPU] should expand ROTL i16 to shifts.

Instruction combining pass turns library rotl implementation to llvm.fshl.i16.
In the selection dag the intrinsic is turned to ISD::ROTL node that cannot be selected.
Need to expand it to shifts again.

Reviewed By: rampitec, arsenm

Differential Revision: https://reviews.llvm.org/D87618
This commit is contained in:
alex-t 2020-09-16 19:54:29 +03:00
parent f989ef009e
commit 317f9a63d2
3 changed files with 52 additions and 2 deletions

View File

@ -546,8 +546,8 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Promote);
AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32);
setOperationAction(ISD::ROTR, MVT::i16, Promote);
setOperationAction(ISD::ROTL, MVT::i16, Promote);
setOperationAction(ISD::ROTR, MVT::i16, Expand);
setOperationAction(ISD::ROTL, MVT::i16, Expand);
setOperationAction(ISD::SDIV, MVT::i16, Promote);
setOperationAction(ISD::UDIV, MVT::i16, Promote);

View File

@ -55,3 +55,28 @@ entry:
store <4 x i32> %3, <4 x i32> addrspace(1)* %in
ret void
}
; GCN-LABEL: @test_rotl_i16
; GCN: global_load_ushort [[X:v[0-9]+]]
; GCN: global_load_ushort [[D:v[0-9]+]]
; GCN: v_sub_nc_u16_e64 [[NX:v[0-9]+]], 0, [[X]]
; GCN: v_and_b32_e32 [[XAND:v[0-9]+]], 15, [[X]]
; GCN: v_and_b32_e32 [[NXAND:v[0-9]+]], 15, [[NX]]
; GCN: v_lshlrev_b16_e64 [[LO:v[0-9]+]], [[XAND]], [[D]]
; GCN: v_lshrrev_b16_e64 [[HI:v[0-9]+]], [[NXAND]], [[D]]
; GCN: v_or_b32_e32 [[RES:v[0-9]+]], [[LO]], [[HI]]
; GCN: global_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RES]]
declare i16 @llvm.fshl.i16(i16, i16, i16)
define void @test_rotl_i16(i16 addrspace(1)* nocapture readonly %sourceA, i16 addrspace(1)* nocapture readonly %sourceB, i16 addrspace(1)* nocapture %destValues) {
entry:
%arrayidx = getelementptr inbounds i16, i16 addrspace(1)* %sourceA, i64 16
%a = load i16, i16 addrspace(1)* %arrayidx
%arrayidx2 = getelementptr inbounds i16, i16 addrspace(1)* %sourceB, i64 24
%b = load i16, i16 addrspace(1)* %arrayidx2
%c = tail call i16 @llvm.fshl.i16(i16 %a, i16 %a, i16 %b)
%arrayidx5 = getelementptr inbounds i16, i16 addrspace(1)* %destValues, i64 4
store i16 %c, i16 addrspace(1)* %arrayidx5
ret void
}

View File

@ -51,3 +51,28 @@ entry:
store <4 x i32> %tmp3, <4 x i32> addrspace(1)* %in
ret void
}
; GCN-LABEL: @test_rotr_i16
; GCN: global_load_ushort [[X:v[0-9]+]]
; GCN: global_load_ushort [[D:v[0-9]+]]
; GCN: v_sub_nc_u16_e64 [[NX:v[0-9]+]], 0, [[X]]
; GCN: v_and_b32_e32 [[XAND:v[0-9]+]], 15, [[X]]
; GCN: v_and_b32_e32 [[NXAND:v[0-9]+]], 15, [[NX]]
; GCN: v_lshrrev_b16_e64 [[LO:v[0-9]+]], [[XAND]], [[D]]
; GCN: v_lshlrev_b16_e64 [[HI:v[0-9]+]], [[NXAND]], [[D]]
; GCN: v_or_b32_e32 [[RES:v[0-9]+]], [[LO]], [[HI]]
; GCN: global_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RES]]
declare i16 @llvm.fshr.i16(i16, i16, i16)
define void @test_rotr_i16(i16 addrspace(1)* nocapture readonly %sourceA, i16 addrspace(1)* nocapture readonly %sourceB, i16 addrspace(1)* nocapture %destValues) {
entry:
%arrayidx = getelementptr inbounds i16, i16 addrspace(1)* %sourceA, i64 16
%a = load i16, i16 addrspace(1)* %arrayidx
%arrayidx2 = getelementptr inbounds i16, i16 addrspace(1)* %sourceB, i64 24
%b = load i16, i16 addrspace(1)* %arrayidx2
%c = tail call i16 @llvm.fshr.i16(i16 %a, i16 %a, i16 %b)
%arrayidx5 = getelementptr inbounds i16, i16 addrspace(1)* %destValues, i64 4
store i16 %c, i16 addrspace(1)* %arrayidx5
ret void
}