1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-25 12:12:47 +01:00

Implement strip.invariant.group

Summary:
This patch introduce new intrinsic -
strip.invariant.group that was described in the
RFC: Devirtualization v2

Reviewers: rsmith, hfinkel, nlopes, sanjoy, amharc, kuhar

Subscribers: arsenm, nhaehnle, JDevlieghere, hiraditya, xbolva00, llvm-commits

Differential Revision: https://reviews.llvm.org/D47103

Co-authored-by: Krzysztof Pszeniczny <krzysztof.pszeniczny@gmail.com>
llvm-svn: 336073
This commit is contained in:
Piotr Padlewski 2018-07-02 04:49:30 +00:00
parent 592986ec19
commit f85a84745e
21 changed files with 297 additions and 49 deletions

View File

@ -13350,16 +13350,17 @@ Overview:
"""""""""
The '``llvm.launder.invariant.group``' intrinsic can be used when an invariant
established by invariant.group metadata no longer holds, to obtain a new pointer
value that does not carry the invariant information. It is an experimental
intrinsic, which means that its semantics might change in the future.
established by ``invariant.group`` metadata no longer holds, to obtain a new
pointer value that carries fresh invariant group information. It is an
experimental intrinsic, which means that its semantics might change in the
future.
Arguments:
""""""""""
The ``llvm.launder.invariant.group`` takes only one argument, which is
the pointer to the memory for which the ``invariant.group`` no longer holds.
The ``llvm.launder.invariant.group`` takes only one argument, which is a pointer
to the memory.
Semantics:
""""""""""
@ -13368,6 +13369,43 @@ Returns another pointer that aliases its argument but which is considered differ
for the purposes of ``load``/``store`` ``invariant.group`` metadata.
It does not read any accessible memory and the execution can be speculated.
'``llvm.strip.invariant.group``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
This is an overloaded intrinsic. The memory object can belong to any address
space. The returned pointer must belong to the same address space as the
argument.
::
declare i8* @llvm.strip.invariant.group.p0i8(i8* <ptr>)
Overview:
"""""""""
The '``llvm.strip.invariant.group``' intrinsic can be used when an invariant
established by ``invariant.group`` metadata no longer holds, to obtain a new pointer
value that does not carry the invariant information. It is an experimental
intrinsic, which means that its semantics might change in the future.
Arguments:
""""""""""
The ``llvm.strip.invariant.group`` takes only one argument, which is a pointer
to the memory.
Semantics:
""""""""""
Returns another pointer that aliases its argument but which has no associated
``invariant.group`` metadata.
It does not read any memory and can be speculated.
.. _constrainedfp:
Constrained Floating-Point Intrinsics

View File

@ -2022,6 +2022,7 @@ public:
Value *CreateLaunderInvariantGroup(Value *Ptr) {
assert(isa<PointerType>(Ptr->getType()) &&
"launder.invariant.group only applies to pointers.");
// FIXME: we could potentially avoid casts to/from i8*.
auto *PtrType = Ptr->getType();
auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace());
if (PtrType != Int8PtrTy)
@ -2042,6 +2043,34 @@ public:
return Fn;
}
/// \brief Create a strip.invariant.group intrinsic call. If Ptr type is
/// different from pointer to i8, it's casted to pointer to i8 in the same
/// address space before call and casted back to Ptr type after call.
Value *CreateStripInvariantGroup(Value *Ptr) {
assert(isa<PointerType>(Ptr->getType()) &&
"strip.invariant.group only applies to pointers.");
// FIXME: we could potentially avoid casts to/from i8*.
auto *PtrType = Ptr->getType();
auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace());
if (PtrType != Int8PtrTy)
Ptr = CreateBitCast(Ptr, Int8PtrTy);
Module *M = BB->getParent()->getParent();
Function *FnStripInvariantGroup = Intrinsic::getDeclaration(
M, Intrinsic::strip_invariant_group, {Int8PtrTy});
assert(FnStripInvariantGroup->getReturnType() == Int8PtrTy &&
FnStripInvariantGroup->getFunctionType()->getParamType(0) ==
Int8PtrTy &&
"StripInvariantGroup should take and return the same type");
CallInst *Fn = CreateCall(FnStripInvariantGroup, {Ptr});
if (PtrType != Int8PtrTy)
return CreateBitCast(Fn, PtrType);
return Fn;
}
/// Return a vector value that contains \arg V broadcasted to \p
/// NumElts elements.
Value *CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name = "") {

View File

@ -728,6 +728,11 @@ def int_launder_invariant_group : Intrinsic<[llvm_anyptr_ty],
[LLVMMatchType<0>],
[IntrInaccessibleMemOnly, IntrSpeculatable]>;
def int_strip_invariant_group : Intrinsic<[llvm_anyptr_ty],
[LLVMMatchType<0>],
[IntrSpeculatable, IntrNoMem]>;
//===------------------------ Stackmap Intrinsics -------------------------===//
//
def int_experimental_stackmap : Intrinsic<[],

View File

@ -431,13 +431,15 @@ bool BasicAAResult::DecomposeGEPExpression(const Value *V,
const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
if (!GEPOp) {
if (auto CS = ImmutableCallSite(V)) {
// Note: getArgumentAliasingToReturnedPointer keeps it in sync with
// CaptureTracking, which is needed for correctness. This is because
// some intrinsics like launder.invariant.group returns pointers that
// are aliasing it's argument, which is known to CaptureTracking.
// If AliasAnalysis does not use the same information, it could assume
// that pointer returned from launder does not alias it's argument
// because launder could not return it if the pointer was not captured.
// CaptureTracking can know about special capturing properties of some
// intrinsics like launder.invariant.group, that can't be expressed with
// the attributes, but have properties like returning aliasing pointer.
// Because some analysis may assume that nocaptured pointer is not
// returned from some special intrinsic (because function would have to
// be marked with returns attribute), it is crucial to use this function
// because it should be in sync with CaptureTracking. Not using it may
// cause weird miscompilations where 2 aliasing pointers are assumed to
// noalias.
if (auto *RP = getArgumentAliasingToReturnedPointer(CS)) {
V = RP;
continue;

View File

@ -1393,6 +1393,7 @@ bool llvm::canConstantFoldCallTo(ImmutableCallSite CS, const Function *F) {
case Intrinsic::fmuladd:
case Intrinsic::copysign:
case Intrinsic::launder_invariant_group:
case Intrinsic::strip_invariant_group:
case Intrinsic::round:
case Intrinsic::masked_load:
case Intrinsic::sadd_with_overflow:
@ -1596,14 +1597,16 @@ Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID, Type *Ty,
return Constant::getNullValue(Ty);
if (IntrinsicID == Intrinsic::bswap ||
IntrinsicID == Intrinsic::bitreverse ||
IntrinsicID == Intrinsic::launder_invariant_group)
IntrinsicID == Intrinsic::launder_invariant_group ||
IntrinsicID == Intrinsic::strip_invariant_group)
return Operands[0];
}
if (isa<ConstantPointerNull>(Operands[0]) &&
Operands[0]->getType()->getPointerAddressSpace() == 0) {
// launder(null) == null iff in addrspace 0
if (IntrinsicID == Intrinsic::launder_invariant_group)
// launder(null) == null == strip(null) iff in addrspace 0
if (IntrinsicID == Intrinsic::launder_invariant_group ||
IntrinsicID == Intrinsic::strip_invariant_group)
return Operands[0];
return nullptr;
}

View File

@ -3404,8 +3404,9 @@ const Value *llvm::getArgumentAliasingToReturnedPointer(ImmutableCallSite CS) {
}
bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
ImmutableCallSite CS) {
return CS.getIntrinsicID() == Intrinsic::launder_invariant_group;
ImmutableCallSite CS) {
return CS.getIntrinsicID() == Intrinsic::launder_invariant_group ||
CS.getIntrinsicID() == Intrinsic::strip_invariant_group;
}
/// \p PN defines a loop-variant pointer to an object. Check if the
@ -3454,13 +3455,15 @@ Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL,
return V;
} else {
if (auto CS = CallSite(V)) {
// Note: getArgumentAliasingToReturnedPointer keeps it in sync with
// CaptureTracking, which is needed for correctness. This is because
// some intrinsics like launder.invariant.group returns pointers that
// are aliasing it's argument, which is known to CaptureTracking.
// If AliasAnalysis does not use the same information, it could assume
// that pointer returned from launder does not alias it's argument
// because launder could not return it if the pointer was not captured.
// CaptureTracking can know about special capturing properties of some
// intrinsics like launder.invariant.group, that can't be expressed with
// the attributes, but have properties like returning aliasing pointer.
// Because some analysis may assume that nocaptured pointer is not
// returned from some special intrinsic (because function would have to
// be marked with returns attribute), it is crucial to use this function
// because it should be in sync with CaptureTracking. Not using it may
// cause weird miscompilations where 2 aliasing pointers are assumed to
// noalias.
if (auto *RP = getArgumentAliasingToReturnedPointer(CS)) {
V = RP;
continue;

View File

@ -1702,6 +1702,7 @@ bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) {
return true;
}
case Intrinsic::launder_invariant_group:
case Intrinsic::strip_invariant_group:
II->replaceAllUsesWith(II->getArgOperand(0));
II->eraseFromParent();
return true;

View File

@ -1437,6 +1437,7 @@ bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
return true;
}
case Intrinsic::launder_invariant_group:
case Intrinsic::strip_invariant_group:
case Intrinsic::expect: {
unsigned ResultReg = getRegForValue(II->getArgOperand(0));
if (!ResultReg)

View File

@ -5768,6 +5768,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
case Intrinsic::annotation:
case Intrinsic::ptr_annotation:
case Intrinsic::launder_invariant_group:
case Intrinsic::strip_invariant_group:
// Drop the intrinsic, but forward the value
setValue(&I, getValue(I.getOperand(0)));
return nullptr;

View File

@ -521,7 +521,8 @@ static const Value *stripPointerCastsAndOffsets(const Value *V) {
// but it can't be marked with returned attribute, that's why it needs
// special case.
if (StripKind == PSK_ZeroIndicesAndAliasesAndInvariantGroups &&
CS.getIntrinsicID() == Intrinsic::launder_invariant_group) {
(CS.getIntrinsicID() == Intrinsic::launder_invariant_group ||
CS.getIntrinsicID() == Intrinsic::strip_invariant_group)) {
V = CS.getArgOperand(0);
continue;
}

View File

@ -457,6 +457,7 @@ static bool isCallPromotable(CallInst *CI) {
case Intrinsic::invariant_start:
case Intrinsic::invariant_end:
case Intrinsic::launder_invariant_group:
case Intrinsic::strip_invariant_group:
case Intrinsic::objectsize:
return true;
default:
@ -882,6 +883,7 @@ bool AMDGPUPromoteAlloca::handleAlloca(AllocaInst &I, bool SufficientLDS) {
case Intrinsic::invariant_start:
case Intrinsic::invariant_end:
case Intrinsic::launder_invariant_group:
case Intrinsic::strip_invariant_group:
Intr->eraseFromParent();
// FIXME: I think the invariant marker should still theoretically apply,
// but the intrinsics need to be changed to accept pointers with any

View File

@ -1,7 +1,7 @@
; RUN: opt -S -instsimplify -instcombine < %s | FileCheck %s
; CHECK-LABEL: define void @checkNonnull()
define void @checkNonnull() {
; CHECK-LABEL: define void @checkNonnullLaunder()
define void @checkNonnullLaunder() {
; CHECK: %p = call i8* @llvm.launder.invariant.group.p0i8(i8* nonnull %0)
; CHECK: %p2 = call i8* @llvm.launder.invariant.group.p0i8(i8* nonnull %p)
; CHECK: call void @use(i8* nonnull %p2)
@ -15,5 +15,22 @@ entry:
ret void
}
; CHECK-LABEL: define void @checkNonnullStrip()
define void @checkNonnullStrip() {
; CHECK: %p = call i8* @llvm.strip.invariant.group.p0i8(i8* nonnull %0)
; CHECK: %p2 = call i8* @llvm.strip.invariant.group.p0i8(i8* nonnull %p)
; CHECK: call void @use(i8* nonnull %p2)
entry:
%0 = alloca i8, align 8
%p = call i8* @llvm.strip.invariant.group.p0i8(i8* %0)
%p2 = call i8* @llvm.strip.invariant.group.p0i8(i8* %p)
call void @use(i8* %p2)
ret void
}
declare i8* @llvm.launder.invariant.group.p0i8(i8*)
declare i8* @llvm.strip.invariant.group.p0i8(i8*)
declare void @use(i8*)

View File

@ -41,11 +41,19 @@ define double @test_cos(float %F) {
declare i8* @llvm.launder.invariant.group(i8*)
define i8* @barrier(i8* %p) {
define i8* @launder(i8* %p) {
%q = call i8* @llvm.launder.invariant.group(i8* %p)
ret i8* %q
}
declare i8* @llvm.strip.invariant.group(i8*)
define i8* @strip(i8* %p) {
%q = call i8* @llvm.strip.invariant.group(i8* %p)
ret i8* %q
}
; sideeffect
declare void @llvm.sideeffect()

View File

@ -77,8 +77,14 @@ define i8 @unoptimizable2() {
define void @dontProveEquality(i8* %a) {
%b = call i8* @llvm.launder.invariant.group.p0i8(i8* %a)
%r = icmp eq i8* %b, %a
;CHECK: call void @useBool(i1 %r)
; CHECK: call void @useBool(i1 %r)
call void @useBool(i1 %r)
%b2 = call i8* @llvm.strip.invariant.group.p0i8(i8* %a)
%r2 = icmp eq i8* %b2, %a
; CHECK: call void @useBool(i1 %r2)
call void @useBool(i1 %r2)
ret void
}
@ -90,5 +96,9 @@ declare void @clobber(i8*)
; CHECK-NEXT: declare i8* @llvm.launder.invariant.group.p0i8(i8*)
declare i8* @llvm.launder.invariant.group.p0i8(i8*)
!0 = !{}
; CHECK: Function Attrs: nounwind readnone speculatable{{$}}
; CHECK-NEXT: declare i8* @llvm.strip.invariant.group.p0i8(i8*)
declare i8* @llvm.strip.invariant.group.p0i8(i8*)
!0 = !{}

View File

@ -7,8 +7,8 @@ define void @foo() {
enter:
; CHECK-NOT: !invariant.group
; CHECK-NOT: @llvm.launder.invariant.group.p0i8(
; CHECK: %val = load i8, i8* @tmp, !tbaa
%val = load i8, i8* @tmp, !invariant.group !0, !tbaa !{!1, !1, i64 0}
; CHECK: %val = load i8, i8* @tmp{{$}}
%val = load i8, i8* @tmp, !invariant.group !0
%ptr = call i8* @llvm.launder.invariant.group.p0i8(i8* @tmp)
; CHECK: store i8 42, i8* @tmp{{$}}
@ -18,7 +18,23 @@ enter:
}
; CHECK-LABEL: }
declare i8* @llvm.launder.invariant.group.p0i8(i8*)
; CHECK-LABEL: define void @foo2() {
define void @foo2() {
enter:
; CHECK-NOT: !invariant.group
; CHECK-NOT: @llvm.strip.invariant.group.p0i8(
; CHECK: %val = load i8, i8* @tmp{{$}}
%val = load i8, i8* @tmp, !invariant.group !0
%ptr = call i8* @llvm.strip.invariant.group.p0i8(i8* @tmp)
; CHECK: store i8 42, i8* @tmp{{$}}
store i8 42, i8* %ptr, !invariant.group !0
ret void
}
; CHECK-LABEL: }
declare i8* @llvm.launder.invariant.group.p0i8(i8*)
declare i8* @llvm.strip.invariant.group.p0i8(i8*)
!0 = !{}
!1 = !{!"x", !0}

View File

@ -27,4 +27,39 @@ define void @skip2Barriers(i8* %ptr) {
ret void
}
; CHECK-LABEL: void @skip3Barriers(i8* %ptr)
define void @skip3Barriers(i8* %ptr) {
; CHECK-NOT: store i8 42
store i8 42, i8* %ptr
; CHECK: %ptr2 = call i8* @llvm.strip.invariant.group.p0i8(i8* %ptr)
%ptr2 = call i8* @llvm.strip.invariant.group.p0i8(i8* %ptr)
; CHECK-NOT: store i8 43
store i8 43, i8* %ptr2
%ptr3 = call i8* @llvm.strip.invariant.group.p0i8(i8* %ptr2)
%ptr4 = call i8* @llvm.strip.invariant.group.p0i8(i8* %ptr3)
; CHECK: store i8 44
store i8 44, i8* %ptr4
ret void
}
; CHECK-LABEL: void @skip4Barriers(i8* %ptr)
define void @skip4Barriers(i8* %ptr) {
; CHECK-NOT: store i8 42
store i8 42, i8* %ptr
; CHECK: %ptr2 = call i8* @llvm.strip.invariant.group.p0i8(i8* %ptr)
%ptr2 = call i8* @llvm.strip.invariant.group.p0i8(i8* %ptr)
; CHECK-NOT: store i8 43
store i8 43, i8* %ptr2
%ptr3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr2)
%ptr4 = call i8* @llvm.strip.invariant.group.p0i8(i8* %ptr3)
%ptr5 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr3)
; CHECK: store i8 44
store i8 44, i8* %ptr5
ret void
}
declare i8* @llvm.launder.invariant.group.p0i8(i8*)
declare i8* @llvm.strip.invariant.group.p0i8(i8*)

View File

@ -237,4 +237,21 @@ define void @captureLaunder(i8* %p) {
ret void
}
; CHECK: @nocaptureStrip(i8* nocapture %p)
define void @nocaptureStrip(i8* %p) {
entry:
%b = call i8* @llvm.strip.invariant.group.p0i8(i8* %p)
store i8 42, i8* %b
ret void
}
@g3 = global i8* null
; CHECK: define void @captureStrip(i8* %p)
define void @captureStrip(i8* %p) {
%b = call i8* @llvm.strip.invariant.group.p0i8(i8* %p)
store i8* %b, i8** @g3
ret void
}
declare i8* @llvm.launder.invariant.group.p0i8(i8*)
declare i8* @llvm.strip.invariant.group.p0i8(i8*)

View File

@ -51,6 +51,18 @@ entry:
ret i8 %b
}
; CHECK-LABEL: define i1 @proveEqualityForStrip(
define i1 @proveEqualityForStrip(i8* %a) {
; FIXME: The first call could be also removed by GVN. Right now
; DCE removes it. The second call is CSE'd with the first one.
; CHECK: %b1 = call i8* @llvm.strip.invariant.group.p0i8(i8* %a)
%b1 = call i8* @llvm.strip.invariant.group.p0i8(i8* %a)
; CHECK-NOT: llvm.strip.invariant.group
%b2 = call i8* @llvm.strip.invariant.group.p0i8(i8* %a)
%r = icmp eq i8* %b1, %b2
; CHECK: ret i1 true
ret i1 %r
}
; CHECK-LABEL: define i8 @unoptimizable1() {
define i8 @unoptimizable1() {
entry:
@ -437,10 +449,10 @@ declare void @_ZN1AC1Ev(%struct.A*)
declare void @fooBit(i1*, i1)
declare i8* @llvm.launder.invariant.group.p0i8(i8*)
; Function Attrs: nounwind
declare void @llvm.assume(i1 %cmp.vtables) #0
declare i8* @llvm.strip.invariant.group.p0i8(i8*)
declare void @llvm.assume(i1 %cmp.vtables)
attributes #0 = { nounwind }
!0 = !{}

View File

@ -27,15 +27,15 @@ enter:
define void @_optimizable() {
enter:
%valptr = alloca i32
%val = call i32 @TheAnswerToLifeTheUniverseAndEverything()
store i32 %val, i32* @tmp
store i32 %val, i32* %valptr
%0 = bitcast i32* %valptr to i8*
%barr = call i8* @llvm.launder.invariant.group(i8* %0)
%1 = bitcast i8* %barr to i32*
%val2 = load i32, i32* %1
store i32 %val2, i32* @tmp2
ret void
@ -43,30 +43,30 @@ enter:
; We can't step through launder.invariant.group here, because that would change
; this load in @usage_of_globals()
; val = load i32, i32* %ptrVal, !invariant.group !0
; into
; val = load i32, i32* %ptrVal, !invariant.group !0
; into
; %val = load i32, i32* @tmp3, !invariant.group !0
; and then we could assume that %val and %val2 to be the same, which coud be
; and then we could assume that %val and %val2 to be the same, which coud be
; false, because @changeTmp3ValAndCallBarrierInside() may change the value
; of @tmp3.
define void @_not_optimizable() {
enter:
store i32 13, i32* @tmp3, !invariant.group !0
%0 = bitcast i32* @tmp3 to i8*
%barr = call i8* @llvm.launder.invariant.group(i8* %0)
%1 = bitcast i8* %barr to i32*
store i32* %1, i32** @ptrToTmp3
store i32 42, i32* %1, !invariant.group !0
ret void
}
define void @usage_of_globals() {
entry:
%ptrVal = load i32*, i32** @ptrToTmp3
%val = load i32, i32* %ptrVal, !invariant.group !0
call void @changeTmp3ValAndCallBarrierInside()
%val2 = load i32, i32* @tmp3, !invariant.group !0
ret void;

View File

@ -1,5 +1,6 @@
; RUN: opt -instcombine -S < %s | FileCheck %s
; CHECK-LABEL: define i8* @simplifyNullLaunder()
define i8* @simplifyNullLaunder() {
; CHECK-NEXT: ret i8* null
@ -29,6 +30,39 @@ define i8 addrspace(42)* @simplifyUndefLaunder2() {
ret i8 addrspace(42)* %b2
}
declare i8* @llvm.launder.invariant.group.p0i8(i8*)
declare i8 addrspace(42)* @llvm.launder.invariant.group.p42i8(i8 addrspace(42)*)
; CHECK-LABEL: define i8* @simplifyNullStrip()
define i8* @simplifyNullStrip() {
; CHECK-NEXT: ret i8* null
%b2 = call i8* @llvm.strip.invariant.group.p0i8(i8* null)
ret i8* %b2
}
; CHECK-LABEL: define i8 addrspace(42)* @dontsimplifyNullStripForDifferentAddrspace()
define i8 addrspace(42)* @dontsimplifyNullStripForDifferentAddrspace() {
; CHECK: %b2 = call i8 addrspace(42)* @llvm.strip.invariant.group.p42i8(i8 addrspace(42)* null)
; CHECK: ret i8 addrspace(42)* %b2
%b2 = call i8 addrspace(42)* @llvm.strip.invariant.group.p42i8(i8 addrspace(42)* null)
ret i8 addrspace(42)* %b2
}
; CHECK-LABEL: define i8* @simplifyUndefStrip()
define i8* @simplifyUndefStrip() {
; CHECK-NEXT: ret i8* undef
%b2 = call i8* @llvm.strip.invariant.group.p0i8(i8* undef)
ret i8* %b2
}
; CHECK-LABEL: define i8 addrspace(42)* @simplifyUndefStrip2()
define i8 addrspace(42)* @simplifyUndefStrip2() {
; CHECK-NEXT: ret i8 addrspace(42)* undef
%b2 = call i8 addrspace(42)* @llvm.strip.invariant.group.p42i8(i8 addrspace(42)* undef)
ret i8 addrspace(42)* %b2
}
declare i8* @llvm.strip.invariant.group.p0i8(i8*)
declare i8 addrspace(42)* @llvm.strip.invariant.group.p42i8(i8 addrspace(42)*)

View File

@ -52,6 +52,19 @@ entry:
ret i8 %b
}
; CHECK-LABEL: define i1 @proveEqualityForStrip(
define i1 @proveEqualityForStrip(i8* %a) {
; FIXME: The first call could be also removed by GVN. Right now
; DCE removes it. The second call is CSE'd with the first one.
; CHECK: %b1 = call i8* @llvm.strip.invariant.group.p0i8(i8* %a)
%b1 = call i8* @llvm.strip.invariant.group.p0i8(i8* %a)
; CHECK-NOT: llvm.strip.invariant.group
%b2 = call i8* @llvm.strip.invariant.group.p0i8(i8* %a)
%r = icmp eq i8* %b1, %b2
; CHECK: ret i1 true
ret i1 %r
}
; CHECK-LABEL: define i8 @unoptimizable1() {
define i8 @unoptimizable1() {
entry: