mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-23 19:23:23 +01:00
[ARM,MVE] Add InstCombine rules for pred_i2v / pred_v2i.
If you're writing C code using the ACLE MVE intrinsics that passes the result of a vcmp as input to a predicated intrinsic, e.g. mve_pred16_t pred = vcmpeqq(v1, v2); v_out = vaddq_m(v_inactive, v3, v4, pred); then clang's codegen for the compare intrinsic will create calls to `@llvm.arm.mve.pred.v2i` to convert the output of `icmp` into an `mve_pred16_t` integer representation, and then the next intrinsic will call `@llvm.arm.mve.pred.i2v` to convert it straight back again. This will be visible in the generated code as a `vmrs`/`vmsr` pair that move the predicate value pointlessly out of `p0` and back into it again. To prevent that, I've added InstCombine rules to remove round trips of the form `v2i(i2v(x))` and `i2v(v2i(x))`. Also I've taught InstCombine about the known and demanded bits of those intrinsics. As a result, you now get just the generated code you wanted: vpt.u16 eq, q1, q2 vaddt.u16 q0, q3, q4 Reviewers: ostannard, MarkMurrayARM, dmgreen Reviewed By: dmgreen Subscribers: kristof.beyls, hiraditya, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D70313
This commit is contained in:
parent
82ebd4fde8
commit
2090af3d5e
@ -3308,6 +3308,34 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
||||
}
|
||||
break;
|
||||
}
|
||||
case Intrinsic::arm_mve_pred_i2v: {
|
||||
Value *Arg = II->getArgOperand(0);
|
||||
Value *ArgArg;
|
||||
if (match(Arg, m_Intrinsic<Intrinsic::arm_mve_pred_v2i>(m_Value(ArgArg))) &&
|
||||
II->getType() == ArgArg->getType())
|
||||
return replaceInstUsesWith(*II, ArgArg);
|
||||
KnownBits ScalarKnown(32);
|
||||
if (SimplifyDemandedBits(II, 0, APInt::getLowBitsSet(32, 16),
|
||||
ScalarKnown, 0))
|
||||
return II;
|
||||
break;
|
||||
}
|
||||
case Intrinsic::arm_mve_pred_v2i: {
|
||||
Value *Arg = II->getArgOperand(0);
|
||||
Value *ArgArg;
|
||||
if (match(Arg, m_Intrinsic<Intrinsic::arm_mve_pred_i2v>(m_Value(ArgArg))))
|
||||
return replaceInstUsesWith(*II, ArgArg);
|
||||
if (!II->getMetadata(LLVMContext::MD_range)) {
|
||||
Type *IntTy32 = Type::getInt32Ty(II->getContext());
|
||||
Metadata *M[] = {
|
||||
ConstantAsMetadata::get(ConstantInt::get(IntTy32, 0)),
|
||||
ConstantAsMetadata::get(ConstantInt::get(IntTy32, 0xFFFF))
|
||||
};
|
||||
II->setMetadata(LLVMContext::MD_range, MDNode::get(II->getContext(), M));
|
||||
return II;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case Intrinsic::arm_mve_vadc:
|
||||
case Intrinsic::arm_mve_vadc_predicated: {
|
||||
unsigned CarryOp =
|
||||
|
22
test/CodeGen/Thumb2/mve-vpt-from-intrinsics.ll
Normal file
22
test/CodeGen/Thumb2/mve-vpt-from-intrinsics.ll
Normal file
@ -0,0 +1,22 @@
|
||||
; RUN: opt -instcombine %s | llc -mtriple=thumbv8.1m.main-none-eabi -mattr=+mve --verify-machineinstrs -o - | FileCheck %s
|
||||
|
||||
define arm_aapcs_vfpcc <8 x i16> @test_vpt_block(<8 x i16> %v_inactive, <8 x i16> %v1, <8 x i16> %v2, <8 x i16> %v3) {
|
||||
; CHECK-LABEL: test_vpt_block:
|
||||
; CHECK: @ %bb.0: @ %entry
|
||||
; CHECK-NEXT: vpt.i16 eq, q1, q2
|
||||
; CHECK-NEXT: vaddt.i16 q0, q3, q2
|
||||
; CHECK-NEXT: bx lr
|
||||
entry:
|
||||
%0 = icmp eq <8 x i16> %v1, %v2
|
||||
%1 = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> %0)
|
||||
%2 = trunc i32 %1 to i16
|
||||
%3 = zext i16 %2 to i32
|
||||
%4 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %3)
|
||||
%5 = call <8 x i16> @llvm.arm.mve.add.predicated.v8i16.v8i1(<8 x i16> %v3, <8 x i16> %v2, <8 x i1> %4, <8 x i16> %v_inactive)
|
||||
ret <8 x i16> %5
|
||||
}
|
||||
|
||||
declare i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1>)
|
||||
declare <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32)
|
||||
declare <8 x i16> @llvm.arm.mve.add.predicated.v8i16.v8i1(<8 x i16>, <8 x i16>, <8 x i1>, <8 x i16>)
|
||||
|
236
test/Transforms/InstCombine/ARM/mve-v2i2v.ll
Normal file
236
test/Transforms/InstCombine/ARM/mve-v2i2v.ll
Normal file
@ -0,0 +1,236 @@
|
||||
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
||||
; RUN: opt -instcombine -S -o - %s | FileCheck %s
|
||||
|
||||
declare i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1>)
|
||||
declare i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1>)
|
||||
declare i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1>)
|
||||
|
||||
declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32)
|
||||
declare <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32)
|
||||
declare <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32)
|
||||
|
||||
; Round-trip conversions from predicate vector to i32 back to the same
|
||||
; size of vector should be eliminated.
|
||||
|
||||
define <4 x i1> @v2i2v_4(<4 x i1> %vin) {
|
||||
; CHECK-LABEL: @v2i2v_4(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: ret <4 x i1> [[VIN:%.*]]
|
||||
;
|
||||
entry:
|
||||
%int = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> %vin)
|
||||
%vout = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %int)
|
||||
ret <4 x i1> %vout
|
||||
}
|
||||
|
||||
define <8 x i1> @v2i2v_8(<8 x i1> %vin) {
|
||||
; CHECK-LABEL: @v2i2v_8(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: ret <8 x i1> [[VIN:%.*]]
|
||||
;
|
||||
entry:
|
||||
%int = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> %vin)
|
||||
%vout = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %int)
|
||||
ret <8 x i1> %vout
|
||||
}
|
||||
|
||||
define <16 x i1> @v2i2v_16(<16 x i1> %vin) {
|
||||
; CHECK-LABEL: @v2i2v_16(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: ret <16 x i1> [[VIN:%.*]]
|
||||
;
|
||||
entry:
|
||||
%int = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> %vin)
|
||||
%vout = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %int)
|
||||
ret <16 x i1> %vout
|
||||
}
|
||||
|
||||
; Conversions from a predicate vector to i32 and then to a _different_
|
||||
; size of predicate vector should be left alone.
|
||||
|
||||
define <16 x i1> @v2i2v_4_16(<4 x i1> %vin) {
|
||||
; CHECK-LABEL: @v2i2v_4_16(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[INT:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[VIN:%.*]]), !range !0
|
||||
; CHECK-NEXT: [[VOUT:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[INT]])
|
||||
; CHECK-NEXT: ret <16 x i1> [[VOUT]]
|
||||
;
|
||||
entry:
|
||||
%int = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> %vin)
|
||||
%vout = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %int)
|
||||
ret <16 x i1> %vout
|
||||
}
|
||||
|
||||
define <4 x i1> @v2i2v_8_4(<8 x i1> %vin) {
|
||||
; CHECK-LABEL: @v2i2v_8_4(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[INT:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[VIN:%.*]]), !range !0
|
||||
; CHECK-NEXT: [[VOUT:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[INT]])
|
||||
; CHECK-NEXT: ret <4 x i1> [[VOUT]]
|
||||
;
|
||||
entry:
|
||||
%int = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> %vin)
|
||||
%vout = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %int)
|
||||
ret <4 x i1> %vout
|
||||
}
|
||||
|
||||
define <8 x i1> @v2i2v_16_8(<16 x i1> %vin) {
|
||||
; CHECK-LABEL: @v2i2v_16_8(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[INT:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[VIN:%.*]]), !range !0
|
||||
; CHECK-NEXT: [[VOUT:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[INT]])
|
||||
; CHECK-NEXT: ret <8 x i1> [[VOUT]]
|
||||
;
|
||||
entry:
|
||||
%int = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> %vin)
|
||||
%vout = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %int)
|
||||
ret <8 x i1> %vout
|
||||
}
|
||||
|
||||
; Round-trip conversions from i32 to predicate vector back to i32
|
||||
; should be eliminated.
|
||||
|
||||
define i32 @i2v2i_4(i32 %iin) {
|
||||
; CHECK-LABEL: @i2v2i_4(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: ret i32 [[IIN:%.*]]
|
||||
;
|
||||
entry:
|
||||
%vec = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %iin)
|
||||
%iout = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> %vec)
|
||||
ret i32 %iout
|
||||
}
|
||||
|
||||
define i32 @i2v2i_8(i32 %iin) {
|
||||
; CHECK-LABEL: @i2v2i_8(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: ret i32 [[IIN:%.*]]
|
||||
;
|
||||
entry:
|
||||
%vec = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %iin)
|
||||
%iout = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> %vec)
|
||||
ret i32 %iout
|
||||
}
|
||||
|
||||
define i32 @i2v2i_16(i32 %iin) {
|
||||
; CHECK-LABEL: @i2v2i_16(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: ret i32 [[IIN:%.*]]
|
||||
;
|
||||
entry:
|
||||
%vec = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %iin)
|
||||
%iout = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> %vec)
|
||||
ret i32 %iout
|
||||
}
|
||||
|
||||
; v2i leaves the top 16 bits clear. So a trunc/zext pair applied to
|
||||
; its output, going via i16, can be completely eliminated - but not
|
||||
; one going via i8. Similarly with other methods of clearing the top
|
||||
; bits, like bitwise and.
|
||||
|
||||
define i32 @v2i_truncext_i16(<4 x i1> %vin) {
|
||||
; CHECK-LABEL: @v2i_truncext_i16(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[WIDE1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[VIN:%.*]]), !range !0
|
||||
; CHECK-NEXT: ret i32 [[WIDE1]]
|
||||
;
|
||||
entry:
|
||||
%wide1 = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> %vin)
|
||||
%narrow = trunc i32 %wide1 to i16
|
||||
%wide2 = zext i16 %narrow to i32
|
||||
ret i32 %wide2
|
||||
}
|
||||
|
||||
define i32 @v2i_truncext_i8(<4 x i1> %vin) {
|
||||
; CHECK-LABEL: @v2i_truncext_i8(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[WIDE1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[VIN:%.*]]), !range !0
|
||||
; CHECK-NEXT: [[WIDE2:%.*]] = and i32 [[WIDE1]], 255
|
||||
; CHECK-NEXT: ret i32 [[WIDE2]]
|
||||
;
|
||||
entry:
|
||||
%wide1 = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> %vin)
|
||||
%narrow = trunc i32 %wide1 to i8
|
||||
%wide2 = zext i8 %narrow to i32
|
||||
ret i32 %wide2
|
||||
}
|
||||
|
||||
define i32 @v2i_and_16(<4 x i1> %vin) {
|
||||
; CHECK-LABEL: @v2i_and_16(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[WIDE1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[VIN:%.*]]), !range !0
|
||||
; CHECK-NEXT: ret i32 [[WIDE1]]
|
||||
;
|
||||
entry:
|
||||
%wide1 = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> %vin)
|
||||
%wide2 = and i32 %wide1, 65535
|
||||
ret i32 %wide2
|
||||
}
|
||||
|
||||
define i32 @v2i_and_15(<4 x i1> %vin) {
|
||||
; CHECK-LABEL: @v2i_and_15(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[WIDE1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[VIN:%.*]]), !range !0
|
||||
; CHECK-NEXT: [[WIDE2:%.*]] = and i32 [[WIDE1]], 32767
|
||||
; CHECK-NEXT: ret i32 [[WIDE2]]
|
||||
;
|
||||
entry:
|
||||
%wide1 = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> %vin)
|
||||
%wide2 = and i32 %wide1, 32767
|
||||
ret i32 %wide2
|
||||
}
|
||||
|
||||
; i2v doesn't use the top bits of its input. So the same operations
|
||||
; on a value that's about to be passed to i2v can be eliminated.
|
||||
|
||||
define <4 x i1> @i2v_truncext_i16(i32 %wide1) {
|
||||
; CHECK-LABEL: @i2v_truncext_i16(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[VOUT:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[WIDE1:%.*]])
|
||||
; CHECK-NEXT: ret <4 x i1> [[VOUT]]
|
||||
;
|
||||
entry:
|
||||
%narrow = trunc i32 %wide1 to i16
|
||||
%wide2 = zext i16 %narrow to i32
|
||||
%vout = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %wide2)
|
||||
ret <4 x i1> %vout
|
||||
}
|
||||
|
||||
define <4 x i1> @i2v_truncext_i8(i32 %wide1) {
|
||||
; CHECK-LABEL: @i2v_truncext_i8(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[WIDE2:%.*]] = and i32 [[WIDE1:%.*]], 255
|
||||
; CHECK-NEXT: [[VOUT:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[WIDE2]])
|
||||
; CHECK-NEXT: ret <4 x i1> [[VOUT]]
|
||||
;
|
||||
entry:
|
||||
%narrow = trunc i32 %wide1 to i8
|
||||
%wide2 = zext i8 %narrow to i32
|
||||
%vout = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %wide2)
|
||||
ret <4 x i1> %vout
|
||||
}
|
||||
|
||||
define <4 x i1> @i2v_and_16(i32 %wide1) {
|
||||
; CHECK-LABEL: @i2v_and_16(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[VOUT:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[WIDE1:%.*]])
|
||||
; CHECK-NEXT: ret <4 x i1> [[VOUT]]
|
||||
;
|
||||
entry:
|
||||
%wide2 = and i32 %wide1, 65535
|
||||
%vout = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %wide2)
|
||||
ret <4 x i1> %vout
|
||||
}
|
||||
|
||||
define <4 x i1> @i2v_and_15(i32 %wide1) {
|
||||
; CHECK-LABEL: @i2v_and_15(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[WIDE2:%.*]] = and i32 [[WIDE1:%.*]], 32767
|
||||
; CHECK-NEXT: [[VOUT:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[WIDE2]])
|
||||
; CHECK-NEXT: ret <4 x i1> [[VOUT]]
|
||||
;
|
||||
entry:
|
||||
%wide2 = and i32 %wide1, 32767
|
||||
%vout = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %wide2)
|
||||
ret <4 x i1> %vout
|
||||
}
|
Loading…
Reference in New Issue
Block a user