1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 18:54:02 +01:00
llvm-mirror/test/CodeGen/Thumb2/mve-vpt-from-intrinsics.ll
Sebastian Neubauer 8980610845 [InstCombine] Move target-specific inst combining
For a long time, the InstCombine pass handled target specific
intrinsics. Having target specific code in general passes was noted as
an area for improvement for a long time.

D81728 moves most target specific code out of the InstCombine pass.
Applying the target specific combinations in an extra pass would
probably result in inferior optimizations compared to the current
fixed-point iteration, therefore the InstCombine pass resorts to newly
introduced functions in the TargetTransformInfo when it encounters
unknown intrinsics.
The patch should not have any effect on generated code (under the
assumption that code never uses intrinsics from a foreign target).

This introduces three new functions:
TargetTransformInfo::instCombineIntrinsic
TargetTransformInfo::simplifyDemandedUseBitsIntrinsic
TargetTransformInfo::simplifyDemandedVectorEltsIntrinsic

A few target specific parts are left in the InstCombine folder, where
it makes sense to share code. The largest left-over part in
InstCombineCalls.cpp is the code shared between arm and aarch64.

This allows to move about 3000 lines out from InstCombine to the targets.

Differential Revision: https://reviews.llvm.org/D81728
2020-07-22 15:59:49 +02:00

45 lines
1.8 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: opt -instcombine -mtriple=thumbv8.1m.main-none-eabi %s | llc -mtriple=thumbv8.1m.main-none-eabi -mattr=+mve --verify-machineinstrs -o - | FileCheck %s
target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
define arm_aapcs_vfpcc <8 x i16> @test_vpt_block(<8 x i16> %v_inactive, <8 x i16> %v1, <8 x i16> %v2, <8 x i16> %v3) {
; CHECK-LABEL: test_vpt_block:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vpt.i16 eq, q1, q2
; CHECK-NEXT: vaddt.i16 q0, q3, q2
; CHECK-NEXT: bx lr
entry:
%0 = icmp eq <8 x i16> %v1, %v2
%1 = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> %0)
%2 = trunc i32 %1 to i16
%3 = zext i16 %2 to i32
%4 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %3)
%5 = call <8 x i16> @llvm.arm.mve.add.predicated.v8i16.v8i1(<8 x i16> %v3, <8 x i16> %v2, <8 x i1> %4, <8 x i16> %v_inactive)
ret <8 x i16> %5
}
define arm_aapcs_vfpcc <8 x i16> @test_vpnot(<8 x i16> %v, <8 x i16> %w, <8 x i16> %x, i32 %n) {
; CHECK-LABEL: test_vpnot:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vctp.16 r0
; CHECK-NEXT: vpnot
; CHECK-NEXT: vpst
; CHECK-NEXT: vaddt.i16 q0, q1, q2
; CHECK-NEXT: bx lr
entry:
%0 = call <8 x i1> @llvm.arm.mve.vctp16(i32 %n)
%1 = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> %0)
%2 = trunc i32 %1 to i16
%3 = xor i16 %2, -1
%4 = zext i16 %3 to i32
%5 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %4)
%6 = call <8 x i16> @llvm.arm.mve.add.predicated.v8i16.v8i1(<8 x i16> %w, <8 x i16> %x, <8 x i1> %5, <8 x i16> %v)
ret <8 x i16> %6
}
declare i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1>)
declare <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32)
declare <8 x i16> @llvm.arm.mve.add.predicated.v8i16.v8i1(<8 x i16>, <8 x i16>, <8 x i1>, <8 x i16>)
declare <8 x i1> @llvm.arm.mve.vctp16(i32)