2010-08-03 01:49:30 +02:00
|
|
|
//===- ScalarEvolutionsTest.cpp - ScalarEvolution unit tests --------------===//
|
|
|
|
//
|
2019-01-19 09:50:56 +01:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2010-08-03 01:49:30 +02:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2017-05-27 05:22:55 +02:00
|
|
|
#include "llvm/ADT/SmallVector.h"
|
2016-12-19 09:22:17 +01:00
|
|
|
#include "llvm/Analysis/AssumptionCache.h"
|
[PM] Port ScalarEvolution to the new pass manager.
This change makes ScalarEvolution a stand-alone object and just produces
one from a pass as needed. Making this work well requires making the
object movable, using references instead of overwritten pointers in
a number of places, and other refactorings.
I've also wired it up to the new pass manager and added a RUN line to
a test to exercise it under the new pass manager. This includes basic
printing support much like with other analyses.
But there is a big and somewhat scary change here. Prior to this patch
ScalarEvolution was never *actually* invalidated!!! Re-running the pass
just re-wired up the various other analyses and didn't remove any of the
existing entries in the SCEV caches or clear out anything at all. This
might seem OK as everything in SCEV that can uses ValueHandles to track
updates to the values that serve as SCEV keys. However, this still means
that as we ran SCEV over each function in the module, we kept
accumulating more and more SCEVs into the cache. At the end, we would
have a SCEV cache with every value that we ever needed a SCEV for in the
entire module!!! Yowzers. The releaseMemory routine would dump all of
this, but that isn't realy called during normal runs of the pipeline as
far as I can see.
To make matters worse, there *is* actually a key that we don't update
with value handles -- there is a map keyed off of Loop*s. Because
LoopInfo *does* release its memory from run to run, it is entirely
possible to run SCEV over one function, then over another function, and
then lookup a Loop* from the second function but find an entry inserted
for the first function! Ouch.
To make matters still worse, there are plenty of updates that *don't*
trip a value handle. It seems incredibly unlikely that today GVN or
another pass that invalidates SCEV can update values in *just* such
a way that a subsequent run of SCEV will incorrectly find lookups in
a cache, but it is theoretically possible and would be a nightmare to
debug.
With this refactoring, I've fixed all this by actually destroying and
recreating the ScalarEvolution object from run to run. Technically, this
could increase the amount of malloc traffic we see, but then again it is
also technically correct. ;] I don't actually think we're suffering from
tons of malloc traffic from SCEV because if we were, the fact that we
never clear the memory would seem more likely to have come up as an
actual problem before now. So, I've made the simple fix here. If in fact
there are serious issues with too much allocation and deallocation,
I can work on a clever fix that preserves the allocations (while
clearing the data) between each run, but I'd prefer to do that kind of
optimization with a test case / benchmark that shows why we need such
cleverness (and that can test that we actually make it faster). It's
possible that this will make some things faster by making the SCEV
caches have higher locality (due to being significantly smaller) so
until there is a clear benchmark, I think the simple change is best.
Differential Revision: http://reviews.llvm.org/D12063
llvm-svn: 245193
2015-08-17 04:08:17 +02:00
|
|
|
#include "llvm/Analysis/LoopInfo.h"
|
2017-05-27 05:22:55 +02:00
|
|
|
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
|
2020-05-20 11:08:08 +02:00
|
|
|
#include "llvm/Analysis/ScalarEvolutionNormalization.h"
|
2017-05-27 05:22:55 +02:00
|
|
|
#include "llvm/Analysis/TargetLibraryInfo.h"
|
2016-10-18 19:45:16 +02:00
|
|
|
#include "llvm/AsmParser/Parser.h"
|
2013-01-02 12:36:10 +01:00
|
|
|
#include "llvm/IR/Constants.h"
|
[PM] Port ScalarEvolution to the new pass manager.
This change makes ScalarEvolution a stand-alone object and just produces
one from a pass as needed. Making this work well requires making the
object movable, using references instead of overwritten pointers in
a number of places, and other refactorings.
I've also wired it up to the new pass manager and added a RUN line to
a test to exercise it under the new pass manager. This includes basic
printing support much like with other analyses.
But there is a big and somewhat scary change here. Prior to this patch
ScalarEvolution was never *actually* invalidated!!! Re-running the pass
just re-wired up the various other analyses and didn't remove any of the
existing entries in the SCEV caches or clear out anything at all. This
might seem OK as everything in SCEV that can uses ValueHandles to track
updates to the values that serve as SCEV keys. However, this still means
that as we ran SCEV over each function in the module, we kept
accumulating more and more SCEVs into the cache. At the end, we would
have a SCEV cache with every value that we ever needed a SCEV for in the
entire module!!! Yowzers. The releaseMemory routine would dump all of
this, but that isn't realy called during normal runs of the pipeline as
far as I can see.
To make matters worse, there *is* actually a key that we don't update
with value handles -- there is a map keyed off of Loop*s. Because
LoopInfo *does* release its memory from run to run, it is entirely
possible to run SCEV over one function, then over another function, and
then lookup a Loop* from the second function but find an entry inserted
for the first function! Ouch.
To make matters still worse, there are plenty of updates that *don't*
trip a value handle. It seems incredibly unlikely that today GVN or
another pass that invalidates SCEV can update values in *just* such
a way that a subsequent run of SCEV will incorrectly find lookups in
a cache, but it is theoretically possible and would be a nightmare to
debug.
With this refactoring, I've fixed all this by actually destroying and
recreating the ScalarEvolution object from run to run. Technically, this
could increase the amount of malloc traffic we see, but then again it is
also technically correct. ;] I don't actually think we're suffering from
tons of malloc traffic from SCEV because if we were, the fact that we
never clear the memory would seem more likely to have come up as an
actual problem before now. So, I've made the simple fix here. If in fact
there are serious issues with too much allocation and deallocation,
I can work on a clever fix that preserves the allocations (while
clearing the data) between each run, but I'd prefer to do that kind of
optimization with a test case / benchmark that shows why we need such
cleverness (and that can test that we actually make it faster). It's
possible that this will make some things faster by making the SCEV
caches have higher locality (due to being significantly smaller) so
until there is a clear benchmark, I think the simple change is best.
Differential Revision: http://reviews.llvm.org/D12063
llvm-svn: 245193
2015-08-17 04:08:17 +02:00
|
|
|
#include "llvm/IR/Dominators.h"
|
2013-01-02 12:36:10 +01:00
|
|
|
#include "llvm/IR/GlobalVariable.h"
|
2017-05-27 05:22:55 +02:00
|
|
|
#include "llvm/IR/IRBuilder.h"
|
2016-10-18 19:45:16 +02:00
|
|
|
#include "llvm/IR/InstIterator.h"
|
2013-01-02 12:36:10 +01:00
|
|
|
#include "llvm/IR/LLVMContext.h"
|
2015-02-13 11:01:29 +01:00
|
|
|
#include "llvm/IR/LegacyPassManager.h"
|
2017-05-27 05:22:55 +02:00
|
|
|
#include "llvm/IR/Module.h"
|
2016-10-18 19:45:16 +02:00
|
|
|
#include "llvm/IR/Verifier.h"
|
|
|
|
#include "llvm/Support/SourceMgr.h"
|
2010-08-03 01:49:30 +02:00
|
|
|
#include "gtest/gtest.h"
|
|
|
|
|
|
|
|
namespace llvm {
|
|
|
|
|
2011-10-04 08:51:26 +02:00
|
|
|
// We use this fixture to ensure that we clean up ScalarEvolution before
|
|
|
|
// deleting the PassManager.
|
|
|
|
class ScalarEvolutionsTest : public testing::Test {
|
|
|
|
protected:
|
2010-08-03 01:49:30 +02:00
|
|
|
LLVMContext Context;
|
2011-10-04 08:51:26 +02:00
|
|
|
Module M;
|
[PM] Port ScalarEvolution to the new pass manager.
This change makes ScalarEvolution a stand-alone object and just produces
one from a pass as needed. Making this work well requires making the
object movable, using references instead of overwritten pointers in
a number of places, and other refactorings.
I've also wired it up to the new pass manager and added a RUN line to
a test to exercise it under the new pass manager. This includes basic
printing support much like with other analyses.
But there is a big and somewhat scary change here. Prior to this patch
ScalarEvolution was never *actually* invalidated!!! Re-running the pass
just re-wired up the various other analyses and didn't remove any of the
existing entries in the SCEV caches or clear out anything at all. This
might seem OK as everything in SCEV that can uses ValueHandles to track
updates to the values that serve as SCEV keys. However, this still means
that as we ran SCEV over each function in the module, we kept
accumulating more and more SCEVs into the cache. At the end, we would
have a SCEV cache with every value that we ever needed a SCEV for in the
entire module!!! Yowzers. The releaseMemory routine would dump all of
this, but that isn't realy called during normal runs of the pipeline as
far as I can see.
To make matters worse, there *is* actually a key that we don't update
with value handles -- there is a map keyed off of Loop*s. Because
LoopInfo *does* release its memory from run to run, it is entirely
possible to run SCEV over one function, then over another function, and
then lookup a Loop* from the second function but find an entry inserted
for the first function! Ouch.
To make matters still worse, there are plenty of updates that *don't*
trip a value handle. It seems incredibly unlikely that today GVN or
another pass that invalidates SCEV can update values in *just* such
a way that a subsequent run of SCEV will incorrectly find lookups in
a cache, but it is theoretically possible and would be a nightmare to
debug.
With this refactoring, I've fixed all this by actually destroying and
recreating the ScalarEvolution object from run to run. Technically, this
could increase the amount of malloc traffic we see, but then again it is
also technically correct. ;] I don't actually think we're suffering from
tons of malloc traffic from SCEV because if we were, the fact that we
never clear the memory would seem more likely to have come up as an
actual problem before now. So, I've made the simple fix here. If in fact
there are serious issues with too much allocation and deallocation,
I can work on a clever fix that preserves the allocations (while
clearing the data) between each run, but I'd prefer to do that kind of
optimization with a test case / benchmark that shows why we need such
cleverness (and that can test that we actually make it faster). It's
possible that this will make some things faster by making the SCEV
caches have higher locality (due to being significantly smaller) so
until there is a clear benchmark, I think the simple change is best.
Differential Revision: http://reviews.llvm.org/D12063
llvm-svn: 245193
2015-08-17 04:08:17 +02:00
|
|
|
TargetLibraryInfoImpl TLII;
|
|
|
|
TargetLibraryInfo TLI;
|
|
|
|
|
2016-12-19 09:22:17 +01:00
|
|
|
std::unique_ptr<AssumptionCache> AC;
|
[PM] Port ScalarEvolution to the new pass manager.
This change makes ScalarEvolution a stand-alone object and just produces
one from a pass as needed. Making this work well requires making the
object movable, using references instead of overwritten pointers in
a number of places, and other refactorings.
I've also wired it up to the new pass manager and added a RUN line to
a test to exercise it under the new pass manager. This includes basic
printing support much like with other analyses.
But there is a big and somewhat scary change here. Prior to this patch
ScalarEvolution was never *actually* invalidated!!! Re-running the pass
just re-wired up the various other analyses and didn't remove any of the
existing entries in the SCEV caches or clear out anything at all. This
might seem OK as everything in SCEV that can uses ValueHandles to track
updates to the values that serve as SCEV keys. However, this still means
that as we ran SCEV over each function in the module, we kept
accumulating more and more SCEVs into the cache. At the end, we would
have a SCEV cache with every value that we ever needed a SCEV for in the
entire module!!! Yowzers. The releaseMemory routine would dump all of
this, but that isn't realy called during normal runs of the pipeline as
far as I can see.
To make matters worse, there *is* actually a key that we don't update
with value handles -- there is a map keyed off of Loop*s. Because
LoopInfo *does* release its memory from run to run, it is entirely
possible to run SCEV over one function, then over another function, and
then lookup a Loop* from the second function but find an entry inserted
for the first function! Ouch.
To make matters still worse, there are plenty of updates that *don't*
trip a value handle. It seems incredibly unlikely that today GVN or
another pass that invalidates SCEV can update values in *just* such
a way that a subsequent run of SCEV will incorrectly find lookups in
a cache, but it is theoretically possible and would be a nightmare to
debug.
With this refactoring, I've fixed all this by actually destroying and
recreating the ScalarEvolution object from run to run. Technically, this
could increase the amount of malloc traffic we see, but then again it is
also technically correct. ;] I don't actually think we're suffering from
tons of malloc traffic from SCEV because if we were, the fact that we
never clear the memory would seem more likely to have come up as an
actual problem before now. So, I've made the simple fix here. If in fact
there are serious issues with too much allocation and deallocation,
I can work on a clever fix that preserves the allocations (while
clearing the data) between each run, but I'd prefer to do that kind of
optimization with a test case / benchmark that shows why we need such
cleverness (and that can test that we actually make it faster). It's
possible that this will make some things faster by making the SCEV
caches have higher locality (due to being significantly smaller) so
until there is a clear benchmark, I think the simple change is best.
Differential Revision: http://reviews.llvm.org/D12063
llvm-svn: 245193
2015-08-17 04:08:17 +02:00
|
|
|
std::unique_ptr<DominatorTree> DT;
|
|
|
|
std::unique_ptr<LoopInfo> LI;
|
|
|
|
|
|
|
|
ScalarEvolutionsTest() : M("", Context), TLII(), TLI(TLII) {}
|
|
|
|
|
|
|
|
ScalarEvolution buildSE(Function &F) {
|
2016-12-19 09:22:17 +01:00
|
|
|
AC.reset(new AssumptionCache(F));
|
[PM] Port ScalarEvolution to the new pass manager.
This change makes ScalarEvolution a stand-alone object and just produces
one from a pass as needed. Making this work well requires making the
object movable, using references instead of overwritten pointers in
a number of places, and other refactorings.
I've also wired it up to the new pass manager and added a RUN line to
a test to exercise it under the new pass manager. This includes basic
printing support much like with other analyses.
But there is a big and somewhat scary change here. Prior to this patch
ScalarEvolution was never *actually* invalidated!!! Re-running the pass
just re-wired up the various other analyses and didn't remove any of the
existing entries in the SCEV caches or clear out anything at all. This
might seem OK as everything in SCEV that can uses ValueHandles to track
updates to the values that serve as SCEV keys. However, this still means
that as we ran SCEV over each function in the module, we kept
accumulating more and more SCEVs into the cache. At the end, we would
have a SCEV cache with every value that we ever needed a SCEV for in the
entire module!!! Yowzers. The releaseMemory routine would dump all of
this, but that isn't realy called during normal runs of the pipeline as
far as I can see.
To make matters worse, there *is* actually a key that we don't update
with value handles -- there is a map keyed off of Loop*s. Because
LoopInfo *does* release its memory from run to run, it is entirely
possible to run SCEV over one function, then over another function, and
then lookup a Loop* from the second function but find an entry inserted
for the first function! Ouch.
To make matters still worse, there are plenty of updates that *don't*
trip a value handle. It seems incredibly unlikely that today GVN or
another pass that invalidates SCEV can update values in *just* such
a way that a subsequent run of SCEV will incorrectly find lookups in
a cache, but it is theoretically possible and would be a nightmare to
debug.
With this refactoring, I've fixed all this by actually destroying and
recreating the ScalarEvolution object from run to run. Technically, this
could increase the amount of malloc traffic we see, but then again it is
also technically correct. ;] I don't actually think we're suffering from
tons of malloc traffic from SCEV because if we were, the fact that we
never clear the memory would seem more likely to have come up as an
actual problem before now. So, I've made the simple fix here. If in fact
there are serious issues with too much allocation and deallocation,
I can work on a clever fix that preserves the allocations (while
clearing the data) between each run, but I'd prefer to do that kind of
optimization with a test case / benchmark that shows why we need such
cleverness (and that can test that we actually make it faster). It's
possible that this will make some things faster by making the SCEV
caches have higher locality (due to being significantly smaller) so
until there is a clear benchmark, I think the simple change is best.
Differential Revision: http://reviews.llvm.org/D12063
llvm-svn: 245193
2015-08-17 04:08:17 +02:00
|
|
|
DT.reset(new DominatorTree(F));
|
|
|
|
LI.reset(new LoopInfo(*DT));
|
2016-12-19 09:22:17 +01:00
|
|
|
return ScalarEvolution(F, TLI, *AC, *DT, *LI);
|
[PM] Port ScalarEvolution to the new pass manager.
This change makes ScalarEvolution a stand-alone object and just produces
one from a pass as needed. Making this work well requires making the
object movable, using references instead of overwritten pointers in
a number of places, and other refactorings.
I've also wired it up to the new pass manager and added a RUN line to
a test to exercise it under the new pass manager. This includes basic
printing support much like with other analyses.
But there is a big and somewhat scary change here. Prior to this patch
ScalarEvolution was never *actually* invalidated!!! Re-running the pass
just re-wired up the various other analyses and didn't remove any of the
existing entries in the SCEV caches or clear out anything at all. This
might seem OK as everything in SCEV that can uses ValueHandles to track
updates to the values that serve as SCEV keys. However, this still means
that as we ran SCEV over each function in the module, we kept
accumulating more and more SCEVs into the cache. At the end, we would
have a SCEV cache with every value that we ever needed a SCEV for in the
entire module!!! Yowzers. The releaseMemory routine would dump all of
this, but that isn't realy called during normal runs of the pipeline as
far as I can see.
To make matters worse, there *is* actually a key that we don't update
with value handles -- there is a map keyed off of Loop*s. Because
LoopInfo *does* release its memory from run to run, it is entirely
possible to run SCEV over one function, then over another function, and
then lookup a Loop* from the second function but find an entry inserted
for the first function! Ouch.
To make matters still worse, there are plenty of updates that *don't*
trip a value handle. It seems incredibly unlikely that today GVN or
another pass that invalidates SCEV can update values in *just* such
a way that a subsequent run of SCEV will incorrectly find lookups in
a cache, but it is theoretically possible and would be a nightmare to
debug.
With this refactoring, I've fixed all this by actually destroying and
recreating the ScalarEvolution object from run to run. Technically, this
could increase the amount of malloc traffic we see, but then again it is
also technically correct. ;] I don't actually think we're suffering from
tons of malloc traffic from SCEV because if we were, the fact that we
never clear the memory would seem more likely to have come up as an
actual problem before now. So, I've made the simple fix here. If in fact
there are serious issues with too much allocation and deallocation,
I can work on a clever fix that preserves the allocations (while
clearing the data) between each run, but I'd prefer to do that kind of
optimization with a test case / benchmark that shows why we need such
cleverness (and that can test that we actually make it faster). It's
possible that this will make some things faster by making the SCEV
caches have higher locality (due to being significantly smaller) so
until there is a clear benchmark, I think the simple change is best.
Differential Revision: http://reviews.llvm.org/D12063
llvm-svn: 245193
2015-08-17 04:08:17 +02:00
|
|
|
}
|
2016-11-10 08:56:05 +01:00
|
|
|
|
2017-04-15 01:47:53 +02:00
|
|
|
void runWithSE(
|
2016-12-12 15:57:11 +01:00
|
|
|
Module &M, StringRef FuncName,
|
2017-04-15 01:47:53 +02:00
|
|
|
function_ref<void(Function &F, LoopInfo &LI, ScalarEvolution &SE)> Test) {
|
2016-12-12 19:52:32 +01:00
|
|
|
auto *F = M.getFunction(FuncName);
|
|
|
|
ASSERT_NE(F, nullptr) << "Could not find " << FuncName;
|
|
|
|
ScalarEvolution SE = buildSE(*F);
|
2017-04-15 01:47:53 +02:00
|
|
|
Test(*F, *LI, SE);
|
2016-11-10 08:56:05 +01:00
|
|
|
}
|
2019-08-07 19:38:38 +02:00
|
|
|
|
|
|
|
static Optional<APInt> computeConstantDifference(ScalarEvolution &SE,
|
|
|
|
const SCEV *LHS,
|
|
|
|
const SCEV *RHS) {
|
|
|
|
return SE.computeConstantDifference(LHS, RHS);
|
|
|
|
}
|
2020-10-29 10:30:37 +01:00
|
|
|
|
|
|
|
static bool matchURem(ScalarEvolution &SE, const SCEV *Expr, const SCEV *&LHS,
|
|
|
|
const SCEV *&RHS) {
|
|
|
|
return SE.matchURem(Expr, LHS, RHS);
|
|
|
|
}
|
2020-10-26 23:56:39 +01:00
|
|
|
|
|
|
|
static bool isImpliedCond(
|
|
|
|
ScalarEvolution &SE, ICmpInst::Predicate Pred, const SCEV *LHS,
|
|
|
|
const SCEV *RHS, ICmpInst::Predicate FoundPred, const SCEV *FoundLHS,
|
|
|
|
const SCEV *FoundRHS) {
|
|
|
|
return SE.isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS);
|
|
|
|
}
|
2011-10-04 08:51:26 +02:00
|
|
|
};
|
2010-08-03 01:49:30 +02:00
|
|
|
|
2011-10-04 08:51:26 +02:00
|
|
|
TEST_F(ScalarEvolutionsTest, SCEVUnknownRAUW) {
|
2011-07-18 06:54:35 +02:00
|
|
|
FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context),
|
2011-07-12 16:06:48 +02:00
|
|
|
std::vector<Type *>(), false);
|
[opaque pointer types] Add a FunctionCallee wrapper type, and use it.
Recommit r352791 after tweaking DerivedTypes.h slightly, so that gcc
doesn't choke on it, hopefully.
Original Message:
The FunctionCallee type is effectively a {FunctionType*,Value*} pair,
and is a useful convenience to enable code to continue passing the
result of getOrInsertFunction() through to EmitCall, even once pointer
types lose their pointee-type.
Then:
- update the CallInst/InvokeInst instruction creation functions to
take a Callee,
- modify getOrInsertFunction to return FunctionCallee, and
- update all callers appropriately.
One area of particular note is the change to the sanitizer
code. Previously, they had been casting the result of
`getOrInsertFunction` to a `Function*` via
`checkSanitizerInterfaceFunction`, and storing that. That would report
an error if someone had already inserted a function declaraction with
a mismatching signature.
However, in general, LLVM allows for such mismatches, as
`getOrInsertFunction` will automatically insert a bitcast if
needed. As part of this cleanup, cause the sanitizer code to do the
same. (It will call its functions using the expected signature,
however they may have been declared.)
Finally, in a small number of locations, callers of
`getOrInsertFunction` actually were expecting/requiring that a brand
new function was being created. In such cases, I've switched them to
Function::Create instead.
Differential Revision: https://reviews.llvm.org/D57315
llvm-svn: 352827
2019-02-01 03:28:03 +01:00
|
|
|
Function *F = Function::Create(FTy, Function::ExternalLinkage, "f", M);
|
2010-08-03 01:49:30 +02:00
|
|
|
BasicBlock *BB = BasicBlock::Create(Context, "entry", F);
|
2014-06-09 00:29:17 +02:00
|
|
|
ReturnInst::Create(Context, nullptr, BB);
|
2010-08-03 01:49:30 +02:00
|
|
|
|
2011-07-18 06:54:35 +02:00
|
|
|
Type *Ty = Type::getInt1Ty(Context);
|
2010-08-03 01:49:30 +02:00
|
|
|
Constant *Init = Constant::getNullValue(Ty);
|
|
|
|
Value *V0 = new GlobalVariable(M, Ty, false, GlobalValue::ExternalLinkage, Init, "V0");
|
|
|
|
Value *V1 = new GlobalVariable(M, Ty, false, GlobalValue::ExternalLinkage, Init, "V1");
|
|
|
|
Value *V2 = new GlobalVariable(M, Ty, false, GlobalValue::ExternalLinkage, Init, "V2");
|
|
|
|
|
[PM] Port ScalarEvolution to the new pass manager.
This change makes ScalarEvolution a stand-alone object and just produces
one from a pass as needed. Making this work well requires making the
object movable, using references instead of overwritten pointers in
a number of places, and other refactorings.
I've also wired it up to the new pass manager and added a RUN line to
a test to exercise it under the new pass manager. This includes basic
printing support much like with other analyses.
But there is a big and somewhat scary change here. Prior to this patch
ScalarEvolution was never *actually* invalidated!!! Re-running the pass
just re-wired up the various other analyses and didn't remove any of the
existing entries in the SCEV caches or clear out anything at all. This
might seem OK as everything in SCEV that can uses ValueHandles to track
updates to the values that serve as SCEV keys. However, this still means
that as we ran SCEV over each function in the module, we kept
accumulating more and more SCEVs into the cache. At the end, we would
have a SCEV cache with every value that we ever needed a SCEV for in the
entire module!!! Yowzers. The releaseMemory routine would dump all of
this, but that isn't realy called during normal runs of the pipeline as
far as I can see.
To make matters worse, there *is* actually a key that we don't update
with value handles -- there is a map keyed off of Loop*s. Because
LoopInfo *does* release its memory from run to run, it is entirely
possible to run SCEV over one function, then over another function, and
then lookup a Loop* from the second function but find an entry inserted
for the first function! Ouch.
To make matters still worse, there are plenty of updates that *don't*
trip a value handle. It seems incredibly unlikely that today GVN or
another pass that invalidates SCEV can update values in *just* such
a way that a subsequent run of SCEV will incorrectly find lookups in
a cache, but it is theoretically possible and would be a nightmare to
debug.
With this refactoring, I've fixed all this by actually destroying and
recreating the ScalarEvolution object from run to run. Technically, this
could increase the amount of malloc traffic we see, but then again it is
also technically correct. ;] I don't actually think we're suffering from
tons of malloc traffic from SCEV because if we were, the fact that we
never clear the memory would seem more likely to have come up as an
actual problem before now. So, I've made the simple fix here. If in fact
there are serious issues with too much allocation and deallocation,
I can work on a clever fix that preserves the allocations (while
clearing the data) between each run, but I'd prefer to do that kind of
optimization with a test case / benchmark that shows why we need such
cleverness (and that can test that we actually make it faster). It's
possible that this will make some things faster by making the SCEV
caches have higher locality (due to being significantly smaller) so
until there is a clear benchmark, I think the simple change is best.
Differential Revision: http://reviews.llvm.org/D12063
llvm-svn: 245193
2015-08-17 04:08:17 +02:00
|
|
|
ScalarEvolution SE = buildSE(*F);
|
2010-08-03 01:49:30 +02:00
|
|
|
|
|
|
|
const SCEV *S0 = SE.getSCEV(V0);
|
|
|
|
const SCEV *S1 = SE.getSCEV(V1);
|
|
|
|
const SCEV *S2 = SE.getSCEV(V2);
|
|
|
|
|
|
|
|
const SCEV *P0 = SE.getAddExpr(S0, S0);
|
|
|
|
const SCEV *P1 = SE.getAddExpr(S1, S1);
|
|
|
|
const SCEV *P2 = SE.getAddExpr(S2, S2);
|
|
|
|
|
|
|
|
const SCEVMulExpr *M0 = cast<SCEVMulExpr>(P0);
|
|
|
|
const SCEVMulExpr *M1 = cast<SCEVMulExpr>(P1);
|
|
|
|
const SCEVMulExpr *M2 = cast<SCEVMulExpr>(P2);
|
|
|
|
|
|
|
|
EXPECT_EQ(cast<SCEVConstant>(M0->getOperand(0))->getValue()->getZExtValue(),
|
|
|
|
2u);
|
|
|
|
EXPECT_EQ(cast<SCEVConstant>(M1->getOperand(0))->getValue()->getZExtValue(),
|
|
|
|
2u);
|
|
|
|
EXPECT_EQ(cast<SCEVConstant>(M2->getOperand(0))->getValue()->getZExtValue(),
|
|
|
|
2u);
|
|
|
|
|
|
|
|
// Before the RAUWs, these are all pointing to separate values.
|
|
|
|
EXPECT_EQ(cast<SCEVUnknown>(M0->getOperand(1))->getValue(), V0);
|
|
|
|
EXPECT_EQ(cast<SCEVUnknown>(M1->getOperand(1))->getValue(), V1);
|
|
|
|
EXPECT_EQ(cast<SCEVUnknown>(M2->getOperand(1))->getValue(), V2);
|
|
|
|
|
|
|
|
// Do some RAUWs.
|
|
|
|
V2->replaceAllUsesWith(V1);
|
|
|
|
V1->replaceAllUsesWith(V0);
|
|
|
|
|
|
|
|
// After the RAUWs, these should all be pointing to V0.
|
|
|
|
EXPECT_EQ(cast<SCEVUnknown>(M0->getOperand(1))->getValue(), V0);
|
|
|
|
EXPECT_EQ(cast<SCEVUnknown>(M1->getOperand(1))->getValue(), V0);
|
|
|
|
EXPECT_EQ(cast<SCEVUnknown>(M2->getOperand(1))->getValue(), V0);
|
2011-10-04 08:51:26 +02:00
|
|
|
}
|
|
|
|
|
2016-02-21 18:42:10 +01:00
|
|
|
TEST_F(ScalarEvolutionsTest, SimplifiedPHI) {
|
|
|
|
FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context),
|
|
|
|
std::vector<Type *>(), false);
|
[opaque pointer types] Add a FunctionCallee wrapper type, and use it.
Recommit r352791 after tweaking DerivedTypes.h slightly, so that gcc
doesn't choke on it, hopefully.
Original Message:
The FunctionCallee type is effectively a {FunctionType*,Value*} pair,
and is a useful convenience to enable code to continue passing the
result of getOrInsertFunction() through to EmitCall, even once pointer
types lose their pointee-type.
Then:
- update the CallInst/InvokeInst instruction creation functions to
take a Callee,
- modify getOrInsertFunction to return FunctionCallee, and
- update all callers appropriately.
One area of particular note is the change to the sanitizer
code. Previously, they had been casting the result of
`getOrInsertFunction` to a `Function*` via
`checkSanitizerInterfaceFunction`, and storing that. That would report
an error if someone had already inserted a function declaraction with
a mismatching signature.
However, in general, LLVM allows for such mismatches, as
`getOrInsertFunction` will automatically insert a bitcast if
needed. As part of this cleanup, cause the sanitizer code to do the
same. (It will call its functions using the expected signature,
however they may have been declared.)
Finally, in a small number of locations, callers of
`getOrInsertFunction` actually were expecting/requiring that a brand
new function was being created. In such cases, I've switched them to
Function::Create instead.
Differential Revision: https://reviews.llvm.org/D57315
llvm-svn: 352827
2019-02-01 03:28:03 +01:00
|
|
|
Function *F = Function::Create(FTy, Function::ExternalLinkage, "f", M);
|
2016-02-21 18:42:10 +01:00
|
|
|
BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
|
|
|
|
BasicBlock *LoopBB = BasicBlock::Create(Context, "loop", F);
|
|
|
|
BasicBlock *ExitBB = BasicBlock::Create(Context, "exit", F);
|
|
|
|
BranchInst::Create(LoopBB, EntryBB);
|
|
|
|
BranchInst::Create(LoopBB, ExitBB, UndefValue::get(Type::getInt1Ty(Context)),
|
|
|
|
LoopBB);
|
|
|
|
ReturnInst::Create(Context, nullptr, ExitBB);
|
|
|
|
auto *Ty = Type::getInt32Ty(Context);
|
|
|
|
auto *PN = PHINode::Create(Ty, 2, "", &*LoopBB->begin());
|
|
|
|
PN->addIncoming(Constant::getNullValue(Ty), EntryBB);
|
|
|
|
PN->addIncoming(UndefValue::get(Ty), LoopBB);
|
|
|
|
ScalarEvolution SE = buildSE(*F);
|
|
|
|
auto *S1 = SE.getSCEV(PN);
|
|
|
|
auto *S2 = SE.getSCEV(PN);
|
2016-02-22 08:20:40 +01:00
|
|
|
auto *ZeroConst = SE.getConstant(Ty, 0);
|
2016-02-21 18:42:10 +01:00
|
|
|
|
|
|
|
// At some point, only the first call to getSCEV returned the simplified
|
|
|
|
// SCEVConstant and later calls just returned a SCEVUnknown referencing the
|
|
|
|
// PHI node.
|
2016-02-22 08:20:40 +01:00
|
|
|
EXPECT_EQ(S1, ZeroConst);
|
|
|
|
EXPECT_EQ(S1, S2);
|
2016-02-21 18:42:10 +01:00
|
|
|
}
|
|
|
|
|
2020-01-04 19:44:38 +01:00
|
|
|
|
2016-10-31 00:52:50 +01:00
|
|
|
static Instruction *getInstructionByName(Function &F, StringRef Name) {
|
|
|
|
for (auto &I : instructions(F))
|
|
|
|
if (I.getName() == Name)
|
|
|
|
return &I;
|
2016-10-18 19:45:16 +02:00
|
|
|
llvm_unreachable("Expected to find instruction!");
|
|
|
|
}
|
|
|
|
|
2020-09-18 10:50:01 +02:00
|
|
|
static Value *getArgByName(Function &F, StringRef Name) {
|
|
|
|
for (auto &Arg : F.args())
|
|
|
|
if (Arg.getName() == Name)
|
|
|
|
return &Arg;
|
|
|
|
llvm_unreachable("Expected to find instruction!");
|
|
|
|
}
|
2016-10-18 19:45:16 +02:00
|
|
|
TEST_F(ScalarEvolutionsTest, CommutativeExprOperandOrder) {
|
|
|
|
LLVMContext C;
|
|
|
|
SMDiagnostic Err;
|
|
|
|
std::unique_ptr<Module> M = parseAssemblyString(
|
|
|
|
"target datalayout = \"e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128\" "
|
2016-10-31 00:52:50 +01:00
|
|
|
" "
|
2016-10-31 00:52:56 +01:00
|
|
|
"@var_0 = external global i32, align 4"
|
|
|
|
"@var_1 = external global i32, align 4"
|
2016-10-31 04:32:45 +01:00
|
|
|
"@var_2 = external global i32, align 4"
|
2016-10-31 00:52:56 +01:00
|
|
|
" "
|
2016-10-31 04:32:43 +01:00
|
|
|
"declare i32 @unknown(i32, i32, i32)"
|
|
|
|
" "
|
2016-10-31 00:52:50 +01:00
|
|
|
"define void @f_1(i8* nocapture %arr, i32 %n, i32* %A, i32* %B) "
|
2016-10-18 19:45:16 +02:00
|
|
|
" local_unnamed_addr { "
|
|
|
|
"entry: "
|
|
|
|
" %entrycond = icmp sgt i32 %n, 0 "
|
|
|
|
" br i1 %entrycond, label %loop.ph, label %for.end "
|
|
|
|
" "
|
|
|
|
"loop.ph: "
|
|
|
|
" %a = load i32, i32* %A, align 4 "
|
|
|
|
" %b = load i32, i32* %B, align 4 "
|
|
|
|
" %mul = mul nsw i32 %b, %a "
|
|
|
|
" %iv0.init = getelementptr inbounds i8, i8* %arr, i32 %mul "
|
|
|
|
" br label %loop "
|
|
|
|
" "
|
|
|
|
"loop: "
|
|
|
|
" %iv0 = phi i8* [ %iv0.inc, %loop ], [ %iv0.init, %loop.ph ] "
|
|
|
|
" %iv1 = phi i32 [ %iv1.inc, %loop ], [ 0, %loop.ph ] "
|
|
|
|
" %conv = trunc i32 %iv1 to i8 "
|
|
|
|
" store i8 %conv, i8* %iv0, align 1 "
|
|
|
|
" %iv0.inc = getelementptr inbounds i8, i8* %iv0, i32 %b "
|
|
|
|
" %iv1.inc = add nuw nsw i32 %iv1, 1 "
|
|
|
|
" %exitcond = icmp eq i32 %iv1.inc, %n "
|
|
|
|
" br i1 %exitcond, label %for.end.loopexit, label %loop "
|
|
|
|
" "
|
|
|
|
"for.end.loopexit: "
|
|
|
|
" br label %for.end "
|
|
|
|
" "
|
|
|
|
"for.end: "
|
|
|
|
" ret void "
|
|
|
|
"} "
|
|
|
|
" "
|
2016-10-31 00:52:50 +01:00
|
|
|
"define void @f_2(i32* %X, i32* %Y, i32* %Z) { "
|
2016-10-18 19:45:16 +02:00
|
|
|
" %x = load i32, i32* %X "
|
|
|
|
" %y = load i32, i32* %Y "
|
|
|
|
" %z = load i32, i32* %Z "
|
|
|
|
" ret void "
|
2016-10-31 00:52:56 +01:00
|
|
|
"} "
|
|
|
|
" "
|
|
|
|
"define void @f_3() { "
|
|
|
|
" %x = load i32, i32* @var_0"
|
|
|
|
" %y = load i32, i32* @var_1"
|
2016-10-31 04:32:45 +01:00
|
|
|
" %z = load i32, i32* @var_2"
|
2016-10-31 00:52:56 +01:00
|
|
|
" ret void"
|
|
|
|
"} "
|
2016-10-31 04:32:43 +01:00
|
|
|
" "
|
|
|
|
"define void @f_4(i32 %a, i32 %b, i32 %c) { "
|
|
|
|
" %x = call i32 @unknown(i32 %a, i32 %b, i32 %c)"
|
|
|
|
" %y = call i32 @unknown(i32 %b, i32 %c, i32 %a)"
|
|
|
|
" %z = call i32 @unknown(i32 %c, i32 %a, i32 %b)"
|
|
|
|
" ret void"
|
|
|
|
"} "
|
2016-10-31 00:52:56 +01:00
|
|
|
,
|
2016-10-18 19:45:16 +02:00
|
|
|
Err, C);
|
|
|
|
|
|
|
|
assert(M && "Could not parse module?");
|
|
|
|
assert(!verifyModule(*M) && "Must have been well formed!");
|
|
|
|
|
2017-04-15 01:47:53 +02:00
|
|
|
runWithSE(*M, "f_1", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
|
2016-10-31 04:32:39 +01:00
|
|
|
auto *IV0 = getInstructionByName(F, "iv0");
|
|
|
|
auto *IV0Inc = getInstructionByName(F, "iv0.inc");
|
2016-10-18 19:45:16 +02:00
|
|
|
|
|
|
|
auto *FirstExprForIV0 = SE.getSCEV(IV0);
|
|
|
|
auto *FirstExprForIV0Inc = SE.getSCEV(IV0Inc);
|
|
|
|
auto *SecondExprForIV0 = SE.getSCEV(IV0);
|
|
|
|
|
|
|
|
EXPECT_TRUE(isa<SCEVAddRecExpr>(FirstExprForIV0));
|
|
|
|
EXPECT_TRUE(isa<SCEVAddRecExpr>(FirstExprForIV0Inc));
|
|
|
|
EXPECT_TRUE(isa<SCEVAddRecExpr>(SecondExprForIV0));
|
2016-10-31 04:32:39 +01:00
|
|
|
});
|
2016-10-18 19:45:16 +02:00
|
|
|
|
2016-10-31 04:32:43 +01:00
|
|
|
auto CheckCommutativeMulExprs = [&](ScalarEvolution &SE, const SCEV *A,
|
|
|
|
const SCEV *B, const SCEV *C) {
|
|
|
|
EXPECT_EQ(SE.getMulExpr(A, B), SE.getMulExpr(B, A));
|
|
|
|
EXPECT_EQ(SE.getMulExpr(B, C), SE.getMulExpr(C, B));
|
|
|
|
EXPECT_EQ(SE.getMulExpr(A, C), SE.getMulExpr(C, A));
|
2016-10-18 19:45:16 +02:00
|
|
|
|
2016-10-31 04:32:43 +01:00
|
|
|
SmallVector<const SCEV *, 3> Ops0 = {A, B, C};
|
|
|
|
SmallVector<const SCEV *, 3> Ops1 = {A, C, B};
|
|
|
|
SmallVector<const SCEV *, 3> Ops2 = {B, A, C};
|
|
|
|
SmallVector<const SCEV *, 3> Ops3 = {B, C, A};
|
|
|
|
SmallVector<const SCEV *, 3> Ops4 = {C, B, A};
|
|
|
|
SmallVector<const SCEV *, 3> Ops5 = {C, A, B};
|
2016-10-18 19:45:16 +02:00
|
|
|
|
|
|
|
auto *Mul0 = SE.getMulExpr(Ops0);
|
|
|
|
auto *Mul1 = SE.getMulExpr(Ops1);
|
|
|
|
auto *Mul2 = SE.getMulExpr(Ops2);
|
|
|
|
auto *Mul3 = SE.getMulExpr(Ops3);
|
|
|
|
auto *Mul4 = SE.getMulExpr(Ops4);
|
|
|
|
auto *Mul5 = SE.getMulExpr(Ops5);
|
|
|
|
|
2016-10-31 04:32:43 +01:00
|
|
|
EXPECT_EQ(Mul0, Mul1) << "Expected " << *Mul0 << " == " << *Mul1;
|
|
|
|
EXPECT_EQ(Mul1, Mul2) << "Expected " << *Mul1 << " == " << *Mul2;
|
|
|
|
EXPECT_EQ(Mul2, Mul3) << "Expected " << *Mul2 << " == " << *Mul3;
|
|
|
|
EXPECT_EQ(Mul3, Mul4) << "Expected " << *Mul3 << " == " << *Mul4;
|
|
|
|
EXPECT_EQ(Mul4, Mul5) << "Expected " << *Mul4 << " == " << *Mul5;
|
|
|
|
};
|
|
|
|
|
2016-10-31 04:32:45 +01:00
|
|
|
for (StringRef FuncName : {"f_2", "f_3", "f_4"})
|
2017-04-15 01:47:53 +02:00
|
|
|
runWithSE(
|
|
|
|
*M, FuncName, [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
|
|
|
|
CheckCommutativeMulExprs(SE, SE.getSCEV(getInstructionByName(F, "x")),
|
|
|
|
SE.getSCEV(getInstructionByName(F, "y")),
|
|
|
|
SE.getSCEV(getInstructionByName(F, "z")));
|
|
|
|
});
|
2016-10-18 19:45:16 +02:00
|
|
|
}
|
|
|
|
|
2017-03-06 00:49:17 +01:00
|
|
|
TEST_F(ScalarEvolutionsTest, CompareSCEVComplexity) {
|
2016-11-17 17:07:52 +01:00
|
|
|
FunctionType *FTy =
|
|
|
|
FunctionType::get(Type::getVoidTy(Context), std::vector<Type *>(), false);
|
[opaque pointer types] Add a FunctionCallee wrapper type, and use it.
Recommit r352791 after tweaking DerivedTypes.h slightly, so that gcc
doesn't choke on it, hopefully.
Original Message:
The FunctionCallee type is effectively a {FunctionType*,Value*} pair,
and is a useful convenience to enable code to continue passing the
result of getOrInsertFunction() through to EmitCall, even once pointer
types lose their pointee-type.
Then:
- update the CallInst/InvokeInst instruction creation functions to
take a Callee,
- modify getOrInsertFunction to return FunctionCallee, and
- update all callers appropriately.
One area of particular note is the change to the sanitizer
code. Previously, they had been casting the result of
`getOrInsertFunction` to a `Function*` via
`checkSanitizerInterfaceFunction`, and storing that. That would report
an error if someone had already inserted a function declaraction with
a mismatching signature.
However, in general, LLVM allows for such mismatches, as
`getOrInsertFunction` will automatically insert a bitcast if
needed. As part of this cleanup, cause the sanitizer code to do the
same. (It will call its functions using the expected signature,
however they may have been declared.)
Finally, in a small number of locations, callers of
`getOrInsertFunction` actually were expecting/requiring that a brand
new function was being created. In such cases, I've switched them to
Function::Create instead.
Differential Revision: https://reviews.llvm.org/D57315
llvm-svn: 352827
2019-02-01 03:28:03 +01:00
|
|
|
Function *F = Function::Create(FTy, Function::ExternalLinkage, "f", M);
|
2016-11-17 17:07:52 +01:00
|
|
|
BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
|
|
|
|
BasicBlock *LoopBB = BasicBlock::Create(Context, "bb1", F);
|
|
|
|
BranchInst::Create(LoopBB, EntryBB);
|
|
|
|
|
|
|
|
auto *Ty = Type::getInt32Ty(Context);
|
|
|
|
SmallVector<Instruction*, 8> Muls(8), Acc(8), NextAcc(8);
|
|
|
|
|
|
|
|
Acc[0] = PHINode::Create(Ty, 2, "", LoopBB);
|
|
|
|
Acc[1] = PHINode::Create(Ty, 2, "", LoopBB);
|
|
|
|
Acc[2] = PHINode::Create(Ty, 2, "", LoopBB);
|
|
|
|
Acc[3] = PHINode::Create(Ty, 2, "", LoopBB);
|
|
|
|
Acc[4] = PHINode::Create(Ty, 2, "", LoopBB);
|
|
|
|
Acc[5] = PHINode::Create(Ty, 2, "", LoopBB);
|
|
|
|
Acc[6] = PHINode::Create(Ty, 2, "", LoopBB);
|
|
|
|
Acc[7] = PHINode::Create(Ty, 2, "", LoopBB);
|
|
|
|
|
|
|
|
for (int i = 0; i < 20; i++) {
|
|
|
|
Muls[0] = BinaryOperator::CreateMul(Acc[0], Acc[0], "", LoopBB);
|
|
|
|
NextAcc[0] = BinaryOperator::CreateAdd(Muls[0], Acc[4], "", LoopBB);
|
|
|
|
Muls[1] = BinaryOperator::CreateMul(Acc[1], Acc[1], "", LoopBB);
|
|
|
|
NextAcc[1] = BinaryOperator::CreateAdd(Muls[1], Acc[5], "", LoopBB);
|
|
|
|
Muls[2] = BinaryOperator::CreateMul(Acc[2], Acc[2], "", LoopBB);
|
|
|
|
NextAcc[2] = BinaryOperator::CreateAdd(Muls[2], Acc[6], "", LoopBB);
|
|
|
|
Muls[3] = BinaryOperator::CreateMul(Acc[3], Acc[3], "", LoopBB);
|
|
|
|
NextAcc[3] = BinaryOperator::CreateAdd(Muls[3], Acc[7], "", LoopBB);
|
|
|
|
|
|
|
|
Muls[4] = BinaryOperator::CreateMul(Acc[4], Acc[4], "", LoopBB);
|
|
|
|
NextAcc[4] = BinaryOperator::CreateAdd(Muls[4], Acc[0], "", LoopBB);
|
|
|
|
Muls[5] = BinaryOperator::CreateMul(Acc[5], Acc[5], "", LoopBB);
|
|
|
|
NextAcc[5] = BinaryOperator::CreateAdd(Muls[5], Acc[1], "", LoopBB);
|
|
|
|
Muls[6] = BinaryOperator::CreateMul(Acc[6], Acc[6], "", LoopBB);
|
|
|
|
NextAcc[6] = BinaryOperator::CreateAdd(Muls[6], Acc[2], "", LoopBB);
|
|
|
|
Muls[7] = BinaryOperator::CreateMul(Acc[7], Acc[7], "", LoopBB);
|
|
|
|
NextAcc[7] = BinaryOperator::CreateAdd(Muls[7], Acc[3], "", LoopBB);
|
|
|
|
Acc = NextAcc;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto II = LoopBB->begin();
|
|
|
|
for (int i = 0; i < 8; i++) {
|
|
|
|
PHINode *Phi = cast<PHINode>(&*II++);
|
|
|
|
Phi->addIncoming(Acc[i], LoopBB);
|
|
|
|
Phi->addIncoming(UndefValue::get(Ty), EntryBB);
|
|
|
|
}
|
|
|
|
|
|
|
|
BasicBlock *ExitBB = BasicBlock::Create(Context, "bb2", F);
|
|
|
|
BranchInst::Create(LoopBB, ExitBB, UndefValue::get(Type::getInt1Ty(Context)),
|
|
|
|
LoopBB);
|
|
|
|
|
|
|
|
Acc[0] = BinaryOperator::CreateAdd(Acc[0], Acc[1], "", ExitBB);
|
|
|
|
Acc[1] = BinaryOperator::CreateAdd(Acc[2], Acc[3], "", ExitBB);
|
|
|
|
Acc[2] = BinaryOperator::CreateAdd(Acc[4], Acc[5], "", ExitBB);
|
|
|
|
Acc[3] = BinaryOperator::CreateAdd(Acc[6], Acc[7], "", ExitBB);
|
|
|
|
Acc[0] = BinaryOperator::CreateAdd(Acc[0], Acc[1], "", ExitBB);
|
|
|
|
Acc[1] = BinaryOperator::CreateAdd(Acc[2], Acc[3], "", ExitBB);
|
|
|
|
Acc[0] = BinaryOperator::CreateAdd(Acc[0], Acc[1], "", ExitBB);
|
|
|
|
|
|
|
|
ReturnInst::Create(Context, nullptr, ExitBB);
|
|
|
|
|
|
|
|
ScalarEvolution SE = buildSE(*F);
|
|
|
|
|
|
|
|
EXPECT_NE(nullptr, SE.getSCEV(Acc[0]));
|
|
|
|
}
|
|
|
|
|
2017-03-06 00:49:17 +01:00
|
|
|
TEST_F(ScalarEvolutionsTest, CompareValueComplexity) {
|
|
|
|
IntegerType *IntPtrTy = M.getDataLayout().getIntPtrType(Context);
|
|
|
|
PointerType *IntPtrPtrTy = IntPtrTy->getPointerTo();
|
|
|
|
|
|
|
|
FunctionType *FTy =
|
|
|
|
FunctionType::get(Type::getVoidTy(Context), {IntPtrTy, IntPtrTy}, false);
|
[opaque pointer types] Add a FunctionCallee wrapper type, and use it.
Recommit r352791 after tweaking DerivedTypes.h slightly, so that gcc
doesn't choke on it, hopefully.
Original Message:
The FunctionCallee type is effectively a {FunctionType*,Value*} pair,
and is a useful convenience to enable code to continue passing the
result of getOrInsertFunction() through to EmitCall, even once pointer
types lose their pointee-type.
Then:
- update the CallInst/InvokeInst instruction creation functions to
take a Callee,
- modify getOrInsertFunction to return FunctionCallee, and
- update all callers appropriately.
One area of particular note is the change to the sanitizer
code. Previously, they had been casting the result of
`getOrInsertFunction` to a `Function*` via
`checkSanitizerInterfaceFunction`, and storing that. That would report
an error if someone had already inserted a function declaraction with
a mismatching signature.
However, in general, LLVM allows for such mismatches, as
`getOrInsertFunction` will automatically insert a bitcast if
needed. As part of this cleanup, cause the sanitizer code to do the
same. (It will call its functions using the expected signature,
however they may have been declared.)
Finally, in a small number of locations, callers of
`getOrInsertFunction` actually were expecting/requiring that a brand
new function was being created. In such cases, I've switched them to
Function::Create instead.
Differential Revision: https://reviews.llvm.org/D57315
llvm-svn: 352827
2019-02-01 03:28:03 +01:00
|
|
|
Function *F = Function::Create(FTy, Function::ExternalLinkage, "f", M);
|
2017-03-06 00:49:17 +01:00
|
|
|
BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
|
|
|
|
|
|
|
|
Value *X = &*F->arg_begin();
|
|
|
|
Value *Y = &*std::next(F->arg_begin());
|
|
|
|
|
|
|
|
const int ValueDepth = 10;
|
|
|
|
for (int i = 0; i < ValueDepth; i++) {
|
2019-02-01 21:44:24 +01:00
|
|
|
X = new LoadInst(IntPtrTy, new IntToPtrInst(X, IntPtrPtrTy, "", EntryBB),
|
|
|
|
"",
|
2017-03-06 00:49:17 +01:00
|
|
|
/*isVolatile*/ false, EntryBB);
|
2019-02-01 21:44:24 +01:00
|
|
|
Y = new LoadInst(IntPtrTy, new IntToPtrInst(Y, IntPtrPtrTy, "", EntryBB),
|
|
|
|
"",
|
2017-03-06 00:49:17 +01:00
|
|
|
/*isVolatile*/ false, EntryBB);
|
|
|
|
}
|
|
|
|
|
|
|
|
auto *MulA = BinaryOperator::CreateMul(X, Y, "", EntryBB);
|
|
|
|
auto *MulB = BinaryOperator::CreateMul(Y, X, "", EntryBB);
|
|
|
|
ReturnInst::Create(Context, nullptr, EntryBB);
|
|
|
|
|
|
|
|
// This test isn't checking for correctness. Today making A and B resolve to
|
|
|
|
// the same SCEV would require deeper searching in CompareValueComplexity,
|
|
|
|
// which will slow down compilation. However, this test can fail (with LLVM's
|
|
|
|
// behavior still being correct) if we ever have a smarter
|
|
|
|
// CompareValueComplexity that is both fast and more accurate.
|
|
|
|
|
|
|
|
ScalarEvolution SE = buildSE(*F);
|
|
|
|
auto *A = SE.getSCEV(MulA);
|
|
|
|
auto *B = SE.getSCEV(MulB);
|
|
|
|
EXPECT_NE(A, B);
|
|
|
|
}
|
|
|
|
|
2017-02-06 13:38:06 +01:00
|
|
|
TEST_F(ScalarEvolutionsTest, SCEVAddExpr) {
|
|
|
|
Type *Ty32 = Type::getInt32Ty(Context);
|
|
|
|
Type *ArgTys[] = {Type::getInt64Ty(Context), Ty32};
|
|
|
|
|
|
|
|
FunctionType *FTy =
|
|
|
|
FunctionType::get(Type::getVoidTy(Context), ArgTys, false);
|
[opaque pointer types] Add a FunctionCallee wrapper type, and use it.
Recommit r352791 after tweaking DerivedTypes.h slightly, so that gcc
doesn't choke on it, hopefully.
Original Message:
The FunctionCallee type is effectively a {FunctionType*,Value*} pair,
and is a useful convenience to enable code to continue passing the
result of getOrInsertFunction() through to EmitCall, even once pointer
types lose their pointee-type.
Then:
- update the CallInst/InvokeInst instruction creation functions to
take a Callee,
- modify getOrInsertFunction to return FunctionCallee, and
- update all callers appropriately.
One area of particular note is the change to the sanitizer
code. Previously, they had been casting the result of
`getOrInsertFunction` to a `Function*` via
`checkSanitizerInterfaceFunction`, and storing that. That would report
an error if someone had already inserted a function declaraction with
a mismatching signature.
However, in general, LLVM allows for such mismatches, as
`getOrInsertFunction` will automatically insert a bitcast if
needed. As part of this cleanup, cause the sanitizer code to do the
same. (It will call its functions using the expected signature,
however they may have been declared.)
Finally, in a small number of locations, callers of
`getOrInsertFunction` actually were expecting/requiring that a brand
new function was being created. In such cases, I've switched them to
Function::Create instead.
Differential Revision: https://reviews.llvm.org/D57315
llvm-svn: 352827
2019-02-01 03:28:03 +01:00
|
|
|
Function *F = Function::Create(FTy, Function::ExternalLinkage, "f", M);
|
2017-02-06 13:38:06 +01:00
|
|
|
|
|
|
|
Argument *A1 = &*F->arg_begin();
|
|
|
|
Argument *A2 = &*(std::next(F->arg_begin()));
|
|
|
|
BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
|
|
|
|
|
|
|
|
Instruction *Trunc = CastInst::CreateTruncOrBitCast(A1, Ty32, "", EntryBB);
|
|
|
|
Instruction *Mul1 = BinaryOperator::CreateMul(Trunc, A2, "", EntryBB);
|
|
|
|
Instruction *Add1 = BinaryOperator::CreateAdd(Mul1, Trunc, "", EntryBB);
|
|
|
|
Mul1 = BinaryOperator::CreateMul(Add1, Trunc, "", EntryBB);
|
|
|
|
Instruction *Add2 = BinaryOperator::CreateAdd(Mul1, Add1, "", EntryBB);
|
2017-02-06 22:27:12 +01:00
|
|
|
// FIXME: The size of this is arbitrary and doesn't seem to change the
|
|
|
|
// result, but SCEV will do quadratic work for these so a large number here
|
|
|
|
// will be extremely slow. We should revisit what and how this is testing
|
|
|
|
// SCEV.
|
|
|
|
for (int i = 0; i < 10; i++) {
|
2017-02-06 13:38:06 +01:00
|
|
|
Mul1 = BinaryOperator::CreateMul(Add2, Add1, "", EntryBB);
|
|
|
|
Add1 = Add2;
|
|
|
|
Add2 = BinaryOperator::CreateAdd(Mul1, Add1, "", EntryBB);
|
|
|
|
}
|
|
|
|
|
|
|
|
ReturnInst::Create(Context, nullptr, EntryBB);
|
|
|
|
ScalarEvolution SE = buildSE(*F);
|
|
|
|
EXPECT_NE(nullptr, SE.getSCEV(Mul1));
|
|
|
|
}
|
|
|
|
|
2017-04-14 17:50:04 +02:00
|
|
|
static Instruction &GetInstByName(Function &F, StringRef Name) {
|
|
|
|
for (auto &I : instructions(F))
|
|
|
|
if (I.getName() == Name)
|
|
|
|
return I;
|
|
|
|
llvm_unreachable("Could not find instructions!");
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(ScalarEvolutionsTest, SCEVNormalization) {
|
|
|
|
LLVMContext C;
|
|
|
|
SMDiagnostic Err;
|
|
|
|
std::unique_ptr<Module> M = parseAssemblyString(
|
|
|
|
"target datalayout = \"e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128\" "
|
|
|
|
" "
|
|
|
|
"@var_0 = external global i32, align 4"
|
|
|
|
"@var_1 = external global i32, align 4"
|
|
|
|
"@var_2 = external global i32, align 4"
|
|
|
|
" "
|
|
|
|
"declare i32 @unknown(i32, i32, i32)"
|
|
|
|
" "
|
|
|
|
"define void @f_1(i8* nocapture %arr, i32 %n, i32* %A, i32* %B) "
|
|
|
|
" local_unnamed_addr { "
|
|
|
|
"entry: "
|
|
|
|
" br label %loop.ph "
|
|
|
|
" "
|
|
|
|
"loop.ph: "
|
|
|
|
" br label %loop "
|
|
|
|
" "
|
|
|
|
"loop: "
|
|
|
|
" %iv0 = phi i32 [ %iv0.inc, %loop ], [ 0, %loop.ph ] "
|
|
|
|
" %iv1 = phi i32 [ %iv1.inc, %loop ], [ -2147483648, %loop.ph ] "
|
|
|
|
" %iv0.inc = add i32 %iv0, 1 "
|
|
|
|
" %iv1.inc = add i32 %iv1, 3 "
|
|
|
|
" br i1 undef, label %for.end.loopexit, label %loop "
|
|
|
|
" "
|
|
|
|
"for.end.loopexit: "
|
|
|
|
" ret void "
|
|
|
|
"} "
|
2017-04-25 02:09:19 +02:00
|
|
|
" "
|
|
|
|
"define void @f_2(i32 %a, i32 %b, i32 %c, i32 %d) "
|
|
|
|
" local_unnamed_addr { "
|
|
|
|
"entry: "
|
|
|
|
" br label %loop_0 "
|
|
|
|
" "
|
|
|
|
"loop_0: "
|
|
|
|
" br i1 undef, label %loop_0, label %loop_1 "
|
|
|
|
" "
|
|
|
|
"loop_1: "
|
|
|
|
" br i1 undef, label %loop_2, label %loop_1 "
|
|
|
|
" "
|
|
|
|
" "
|
|
|
|
"loop_2: "
|
|
|
|
" br i1 undef, label %end, label %loop_2 "
|
|
|
|
" "
|
|
|
|
"end: "
|
|
|
|
" ret void "
|
|
|
|
"} "
|
2017-04-14 17:50:04 +02:00
|
|
|
,
|
|
|
|
Err, C);
|
|
|
|
|
|
|
|
assert(M && "Could not parse module?");
|
|
|
|
assert(!verifyModule(*M) && "Must have been well formed!");
|
|
|
|
|
2017-04-15 01:47:53 +02:00
|
|
|
runWithSE(*M, "f_1", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
|
2017-04-14 17:50:04 +02:00
|
|
|
auto &I0 = GetInstByName(F, "iv0");
|
|
|
|
auto &I1 = *I0.getNextNode();
|
|
|
|
|
|
|
|
auto *S0 = cast<SCEVAddRecExpr>(SE.getSCEV(&I0));
|
|
|
|
PostIncLoopSet Loops;
|
|
|
|
Loops.insert(S0->getLoop());
|
|
|
|
auto *N0 = normalizeForPostIncUse(S0, Loops, SE);
|
|
|
|
auto *D0 = denormalizeForPostIncUse(N0, Loops, SE);
|
|
|
|
EXPECT_EQ(S0, D0) << *S0 << " " << *D0;
|
|
|
|
|
|
|
|
auto *S1 = cast<SCEVAddRecExpr>(SE.getSCEV(&I1));
|
|
|
|
Loops.clear();
|
|
|
|
Loops.insert(S1->getLoop());
|
|
|
|
auto *N1 = normalizeForPostIncUse(S1, Loops, SE);
|
|
|
|
auto *D1 = denormalizeForPostIncUse(N1, Loops, SE);
|
|
|
|
EXPECT_EQ(S1, D1) << *S1 << " " << *D1;
|
|
|
|
});
|
2017-04-25 02:09:19 +02:00
|
|
|
|
|
|
|
runWithSE(*M, "f_2", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
|
|
|
|
auto *L2 = *LI.begin();
|
|
|
|
auto *L1 = *std::next(LI.begin());
|
|
|
|
auto *L0 = *std::next(LI.begin(), 2);
|
|
|
|
|
|
|
|
auto GetAddRec = [&SE](const Loop *L, std::initializer_list<const SCEV *> Ops) {
|
|
|
|
SmallVector<const SCEV *, 4> OpsCopy(Ops);
|
|
|
|
return SE.getAddRecExpr(OpsCopy, L, SCEV::FlagAnyWrap);
|
|
|
|
};
|
|
|
|
|
|
|
|
auto GetAdd = [&SE](std::initializer_list<const SCEV *> Ops) {
|
|
|
|
SmallVector<const SCEV *, 4> OpsCopy(Ops);
|
|
|
|
return SE.getAddExpr(OpsCopy, SCEV::FlagAnyWrap);
|
|
|
|
};
|
|
|
|
|
|
|
|
// We first populate the AddRecs vector with a few "interesting" SCEV
|
|
|
|
// expressions, and then we go through the list and assert that each
|
|
|
|
// expression in it has an invertible normalization.
|
|
|
|
|
|
|
|
std::vector<const SCEV *> Exprs;
|
|
|
|
{
|
|
|
|
const SCEV *V0 = SE.getSCEV(&*F.arg_begin());
|
|
|
|
const SCEV *V1 = SE.getSCEV(&*std::next(F.arg_begin(), 1));
|
|
|
|
const SCEV *V2 = SE.getSCEV(&*std::next(F.arg_begin(), 2));
|
|
|
|
const SCEV *V3 = SE.getSCEV(&*std::next(F.arg_begin(), 3));
|
|
|
|
|
|
|
|
Exprs.push_back(GetAddRec(L0, {V0})); // 0
|
|
|
|
Exprs.push_back(GetAddRec(L0, {V0, V1})); // 1
|
|
|
|
Exprs.push_back(GetAddRec(L0, {V0, V1, V2})); // 2
|
|
|
|
Exprs.push_back(GetAddRec(L0, {V0, V1, V2, V3})); // 3
|
|
|
|
|
|
|
|
Exprs.push_back(
|
|
|
|
GetAddRec(L1, {Exprs[1], Exprs[2], Exprs[3], Exprs[0]})); // 4
|
|
|
|
Exprs.push_back(
|
|
|
|
GetAddRec(L1, {Exprs[1], Exprs[2], Exprs[0], Exprs[3]})); // 5
|
|
|
|
Exprs.push_back(
|
|
|
|
GetAddRec(L1, {Exprs[1], Exprs[3], Exprs[3], Exprs[1]})); // 6
|
|
|
|
|
|
|
|
Exprs.push_back(GetAdd({Exprs[6], Exprs[3], V2})); // 7
|
|
|
|
|
|
|
|
Exprs.push_back(
|
|
|
|
GetAddRec(L2, {Exprs[4], Exprs[3], Exprs[3], Exprs[5]})); // 8
|
|
|
|
|
|
|
|
Exprs.push_back(
|
|
|
|
GetAddRec(L2, {Exprs[4], Exprs[6], Exprs[7], Exprs[3], V0})); // 9
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<PostIncLoopSet> LoopSets;
|
|
|
|
for (int i = 0; i < 8; i++) {
|
|
|
|
LoopSets.emplace_back();
|
|
|
|
if (i & 1)
|
|
|
|
LoopSets.back().insert(L0);
|
|
|
|
if (i & 2)
|
|
|
|
LoopSets.back().insert(L1);
|
|
|
|
if (i & 4)
|
|
|
|
LoopSets.back().insert(L2);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (const auto &LoopSet : LoopSets)
|
|
|
|
for (auto *S : Exprs) {
|
|
|
|
{
|
|
|
|
auto *N = llvm::normalizeForPostIncUse(S, LoopSet, SE);
|
|
|
|
auto *D = llvm::denormalizeForPostIncUse(N, LoopSet, SE);
|
|
|
|
|
|
|
|
// Normalization and then denormalizing better give us back the same
|
|
|
|
// value.
|
|
|
|
EXPECT_EQ(S, D) << "S = " << *S << " D = " << *D << " N = " << *N;
|
|
|
|
}
|
|
|
|
{
|
|
|
|
auto *D = llvm::denormalizeForPostIncUse(S, LoopSet, SE);
|
|
|
|
auto *N = llvm::normalizeForPostIncUse(D, LoopSet, SE);
|
|
|
|
|
|
|
|
// Denormalization and then normalizing better give us back the same
|
|
|
|
// value.
|
|
|
|
EXPECT_EQ(S, N) << "S = " << *S << " N = " << *N;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
});
|
2017-04-14 17:50:04 +02:00
|
|
|
}
|
|
|
|
|
2017-04-17 22:40:05 +02:00
|
|
|
// Expect the call of getZeroExtendExpr will not cost exponential time.
|
|
|
|
TEST_F(ScalarEvolutionsTest, SCEVZeroExtendExpr) {
|
|
|
|
LLVMContext C;
|
|
|
|
SMDiagnostic Err;
|
|
|
|
|
|
|
|
// Generate a function like below:
|
|
|
|
// define void @foo() {
|
|
|
|
// entry:
|
|
|
|
// br label %for.cond
|
|
|
|
//
|
|
|
|
// for.cond:
|
|
|
|
// %0 = phi i64 [ 100, %entry ], [ %dec, %for.inc ]
|
|
|
|
// %cmp = icmp sgt i64 %0, 90
|
|
|
|
// br i1 %cmp, label %for.inc, label %for.cond1
|
|
|
|
//
|
|
|
|
// for.inc:
|
|
|
|
// %dec = add nsw i64 %0, -1
|
|
|
|
// br label %for.cond
|
|
|
|
//
|
|
|
|
// for.cond1:
|
|
|
|
// %1 = phi i64 [ 100, %for.cond ], [ %dec5, %for.inc2 ]
|
|
|
|
// %cmp3 = icmp sgt i64 %1, 90
|
|
|
|
// br i1 %cmp3, label %for.inc2, label %for.cond4
|
|
|
|
//
|
|
|
|
// for.inc2:
|
|
|
|
// %dec5 = add nsw i64 %1, -1
|
|
|
|
// br label %for.cond1
|
|
|
|
//
|
|
|
|
// ......
|
|
|
|
//
|
|
|
|
// for.cond89:
|
|
|
|
// %19 = phi i64 [ 100, %for.cond84 ], [ %dec94, %for.inc92 ]
|
|
|
|
// %cmp93 = icmp sgt i64 %19, 90
|
|
|
|
// br i1 %cmp93, label %for.inc92, label %for.end
|
|
|
|
//
|
|
|
|
// for.inc92:
|
|
|
|
// %dec94 = add nsw i64 %19, -1
|
|
|
|
// br label %for.cond89
|
|
|
|
//
|
|
|
|
// for.end:
|
|
|
|
// %gep = getelementptr i8, i8* null, i64 %dec
|
|
|
|
// %gep6 = getelementptr i8, i8* %gep, i64 %dec5
|
|
|
|
// ......
|
|
|
|
// %gep95 = getelementptr i8, i8* %gep91, i64 %dec94
|
|
|
|
// ret void
|
|
|
|
// }
|
|
|
|
FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context), {}, false);
|
[opaque pointer types] Add a FunctionCallee wrapper type, and use it.
Recommit r352791 after tweaking DerivedTypes.h slightly, so that gcc
doesn't choke on it, hopefully.
Original Message:
The FunctionCallee type is effectively a {FunctionType*,Value*} pair,
and is a useful convenience to enable code to continue passing the
result of getOrInsertFunction() through to EmitCall, even once pointer
types lose their pointee-type.
Then:
- update the CallInst/InvokeInst instruction creation functions to
take a Callee,
- modify getOrInsertFunction to return FunctionCallee, and
- update all callers appropriately.
One area of particular note is the change to the sanitizer
code. Previously, they had been casting the result of
`getOrInsertFunction` to a `Function*` via
`checkSanitizerInterfaceFunction`, and storing that. That would report
an error if someone had already inserted a function declaraction with
a mismatching signature.
However, in general, LLVM allows for such mismatches, as
`getOrInsertFunction` will automatically insert a bitcast if
needed. As part of this cleanup, cause the sanitizer code to do the
same. (It will call its functions using the expected signature,
however they may have been declared.)
Finally, in a small number of locations, callers of
`getOrInsertFunction` actually were expecting/requiring that a brand
new function was being created. In such cases, I've switched them to
Function::Create instead.
Differential Revision: https://reviews.llvm.org/D57315
llvm-svn: 352827
2019-02-01 03:28:03 +01:00
|
|
|
Function *F = Function::Create(FTy, Function::ExternalLinkage, "foo", M);
|
2017-04-17 22:40:05 +02:00
|
|
|
|
|
|
|
BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
|
|
|
|
BasicBlock *CondBB = BasicBlock::Create(Context, "for.cond", F);
|
|
|
|
BasicBlock *EndBB = BasicBlock::Create(Context, "for.end", F);
|
|
|
|
BranchInst::Create(CondBB, EntryBB);
|
|
|
|
BasicBlock *PrevBB = EntryBB;
|
|
|
|
|
|
|
|
Type *I64Ty = Type::getInt64Ty(Context);
|
|
|
|
Type *I8Ty = Type::getInt8Ty(Context);
|
|
|
|
Type *I8PtrTy = Type::getInt8PtrTy(Context);
|
|
|
|
Value *Accum = Constant::getNullValue(I8PtrTy);
|
|
|
|
int Iters = 20;
|
|
|
|
for (int i = 0; i < Iters; i++) {
|
|
|
|
BasicBlock *IncBB = BasicBlock::Create(Context, "for.inc", F, EndBB);
|
|
|
|
auto *PN = PHINode::Create(I64Ty, 2, "", CondBB);
|
|
|
|
PN->addIncoming(ConstantInt::get(Context, APInt(64, 100)), PrevBB);
|
|
|
|
auto *Cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_SGT, PN,
|
|
|
|
ConstantInt::get(Context, APInt(64, 90)), "cmp",
|
|
|
|
CondBB);
|
|
|
|
BasicBlock *NextBB;
|
|
|
|
if (i != Iters - 1)
|
|
|
|
NextBB = BasicBlock::Create(Context, "for.cond", F, EndBB);
|
|
|
|
else
|
|
|
|
NextBB = EndBB;
|
|
|
|
BranchInst::Create(IncBB, NextBB, Cmp, CondBB);
|
|
|
|
auto *Dec = BinaryOperator::CreateNSWAdd(
|
|
|
|
PN, ConstantInt::get(Context, APInt(64, -1)), "dec", IncBB);
|
|
|
|
PN->addIncoming(Dec, IncBB);
|
|
|
|
BranchInst::Create(CondBB, IncBB);
|
|
|
|
|
2018-11-08 06:07:58 +01:00
|
|
|
Accum = GetElementPtrInst::Create(I8Ty, Accum, PN, "gep", EndBB);
|
2017-04-17 22:40:05 +02:00
|
|
|
|
|
|
|
PrevBB = CondBB;
|
|
|
|
CondBB = NextBB;
|
|
|
|
}
|
|
|
|
ReturnInst::Create(Context, nullptr, EndBB);
|
|
|
|
ScalarEvolution SE = buildSE(*F);
|
|
|
|
const SCEV *S = SE.getSCEV(Accum);
|
|
|
|
Type *I128Ty = Type::getInt128Ty(Context);
|
|
|
|
SE.getZeroExtendExpr(S, I128Ty);
|
|
|
|
}
|
2017-05-27 05:22:55 +02:00
|
|
|
|
2017-08-03 10:41:30 +02:00
|
|
|
// Make sure that SCEV invalidates exit limits after invalidating the values it
|
|
|
|
// depends on when we forget a loop.
|
|
|
|
TEST_F(ScalarEvolutionsTest, SCEVExitLimitForgetLoop) {
|
|
|
|
/*
|
|
|
|
* Create the following code:
|
|
|
|
* func(i64 addrspace(10)* %arg)
|
|
|
|
* top:
|
|
|
|
* br label %L.ph
|
|
|
|
* L.ph:
|
|
|
|
* br label %L
|
|
|
|
* L:
|
|
|
|
* %phi = phi i64 [i64 0, %L.ph], [ %add, %L2 ]
|
|
|
|
* %add = add i64 %phi2, 1
|
|
|
|
* %cond = icmp slt i64 %add, 1000; then becomes 2000.
|
|
|
|
* br i1 %cond, label %post, label %L2
|
|
|
|
* post:
|
|
|
|
* ret void
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
// Create a module with non-integral pointers in it's datalayout
|
|
|
|
Module NIM("nonintegral", Context);
|
|
|
|
std::string DataLayout = M.getDataLayoutStr();
|
|
|
|
if (!DataLayout.empty())
|
|
|
|
DataLayout += "-";
|
|
|
|
DataLayout += "ni:10";
|
|
|
|
NIM.setDataLayout(DataLayout);
|
|
|
|
|
|
|
|
Type *T_int64 = Type::getInt64Ty(Context);
|
|
|
|
Type *T_pint64 = T_int64->getPointerTo(10);
|
|
|
|
|
|
|
|
FunctionType *FTy =
|
|
|
|
FunctionType::get(Type::getVoidTy(Context), {T_pint64}, false);
|
[opaque pointer types] Add a FunctionCallee wrapper type, and use it.
Recommit r352791 after tweaking DerivedTypes.h slightly, so that gcc
doesn't choke on it, hopefully.
Original Message:
The FunctionCallee type is effectively a {FunctionType*,Value*} pair,
and is a useful convenience to enable code to continue passing the
result of getOrInsertFunction() through to EmitCall, even once pointer
types lose their pointee-type.
Then:
- update the CallInst/InvokeInst instruction creation functions to
take a Callee,
- modify getOrInsertFunction to return FunctionCallee, and
- update all callers appropriately.
One area of particular note is the change to the sanitizer
code. Previously, they had been casting the result of
`getOrInsertFunction` to a `Function*` via
`checkSanitizerInterfaceFunction`, and storing that. That would report
an error if someone had already inserted a function declaraction with
a mismatching signature.
However, in general, LLVM allows for such mismatches, as
`getOrInsertFunction` will automatically insert a bitcast if
needed. As part of this cleanup, cause the sanitizer code to do the
same. (It will call its functions using the expected signature,
however they may have been declared.)
Finally, in a small number of locations, callers of
`getOrInsertFunction` actually were expecting/requiring that a brand
new function was being created. In such cases, I've switched them to
Function::Create instead.
Differential Revision: https://reviews.llvm.org/D57315
llvm-svn: 352827
2019-02-01 03:28:03 +01:00
|
|
|
Function *F = Function::Create(FTy, Function::ExternalLinkage, "foo", NIM);
|
2017-08-03 10:41:30 +02:00
|
|
|
|
|
|
|
BasicBlock *Top = BasicBlock::Create(Context, "top", F);
|
|
|
|
BasicBlock *LPh = BasicBlock::Create(Context, "L.ph", F);
|
|
|
|
BasicBlock *L = BasicBlock::Create(Context, "L", F);
|
|
|
|
BasicBlock *Post = BasicBlock::Create(Context, "post", F);
|
|
|
|
|
|
|
|
IRBuilder<> Builder(Top);
|
|
|
|
Builder.CreateBr(LPh);
|
|
|
|
|
|
|
|
Builder.SetInsertPoint(LPh);
|
|
|
|
Builder.CreateBr(L);
|
|
|
|
|
|
|
|
Builder.SetInsertPoint(L);
|
|
|
|
PHINode *Phi = Builder.CreatePHI(T_int64, 2);
|
|
|
|
auto *Add = cast<Instruction>(
|
|
|
|
Builder.CreateAdd(Phi, ConstantInt::get(T_int64, 1), "add"));
|
|
|
|
auto *Limit = ConstantInt::get(T_int64, 1000);
|
|
|
|
auto *Cond = cast<Instruction>(
|
|
|
|
Builder.CreateICmp(ICmpInst::ICMP_SLT, Add, Limit, "cond"));
|
|
|
|
auto *Br = cast<Instruction>(Builder.CreateCondBr(Cond, L, Post));
|
|
|
|
Phi->addIncoming(ConstantInt::get(T_int64, 0), LPh);
|
|
|
|
Phi->addIncoming(Add, L);
|
|
|
|
|
|
|
|
Builder.SetInsertPoint(Post);
|
|
|
|
Builder.CreateRetVoid();
|
|
|
|
|
|
|
|
ScalarEvolution SE = buildSE(*F);
|
|
|
|
auto *Loop = LI->getLoopFor(L);
|
|
|
|
const SCEV *EC = SE.getBackedgeTakenCount(Loop);
|
|
|
|
EXPECT_FALSE(isa<SCEVCouldNotCompute>(EC));
|
2017-08-04 07:06:44 +02:00
|
|
|
EXPECT_TRUE(isa<SCEVConstant>(EC));
|
2017-08-04 08:03:51 +02:00
|
|
|
EXPECT_EQ(cast<SCEVConstant>(EC)->getAPInt().getLimitedValue(), 999u);
|
2017-08-03 10:41:30 +02:00
|
|
|
|
2017-10-13 07:50:52 +02:00
|
|
|
// The add recurrence {5,+,1} does not correspond to any PHI in the IR, and
|
|
|
|
// that is relevant to this test.
|
|
|
|
auto *Five = SE.getConstant(APInt(/*numBits=*/64, 5));
|
|
|
|
auto *AR =
|
|
|
|
SE.getAddRecExpr(Five, SE.getOne(T_int64), Loop, SCEV::FlagAnyWrap);
|
|
|
|
const SCEV *ARAtLoopExit = SE.getSCEVAtScope(AR, nullptr);
|
|
|
|
EXPECT_FALSE(isa<SCEVCouldNotCompute>(ARAtLoopExit));
|
|
|
|
EXPECT_TRUE(isa<SCEVConstant>(ARAtLoopExit));
|
|
|
|
EXPECT_EQ(cast<SCEVConstant>(ARAtLoopExit)->getAPInt().getLimitedValue(),
|
|
|
|
1004u);
|
|
|
|
|
2017-08-03 10:41:30 +02:00
|
|
|
SE.forgetLoop(Loop);
|
|
|
|
Br->eraseFromParent();
|
|
|
|
Cond->eraseFromParent();
|
|
|
|
|
|
|
|
Builder.SetInsertPoint(L);
|
2017-08-03 17:59:37 +02:00
|
|
|
auto *NewCond = Builder.CreateICmp(
|
|
|
|
ICmpInst::ICMP_SLT, Add, ConstantInt::get(T_int64, 2000), "new.cond");
|
|
|
|
Builder.CreateCondBr(NewCond, L, Post);
|
2017-08-03 10:41:30 +02:00
|
|
|
const SCEV *NewEC = SE.getBackedgeTakenCount(Loop);
|
2017-08-04 07:06:44 +02:00
|
|
|
EXPECT_FALSE(isa<SCEVCouldNotCompute>(NewEC));
|
|
|
|
EXPECT_TRUE(isa<SCEVConstant>(NewEC));
|
2017-08-04 08:03:51 +02:00
|
|
|
EXPECT_EQ(cast<SCEVConstant>(NewEC)->getAPInt().getLimitedValue(), 1999u);
|
2017-10-13 07:50:52 +02:00
|
|
|
const SCEV *NewARAtLoopExit = SE.getSCEVAtScope(AR, nullptr);
|
|
|
|
EXPECT_FALSE(isa<SCEVCouldNotCompute>(NewARAtLoopExit));
|
|
|
|
EXPECT_TRUE(isa<SCEVConstant>(NewARAtLoopExit));
|
|
|
|
EXPECT_EQ(cast<SCEVConstant>(NewARAtLoopExit)->getAPInt().getLimitedValue(),
|
|
|
|
2004u);
|
2017-08-03 10:41:30 +02:00
|
|
|
}
|
|
|
|
|
2017-10-17 03:03:56 +02:00
|
|
|
// Make sure that SCEV invalidates exit limits after invalidating the values it
|
|
|
|
// depends on when we forget a value.
|
|
|
|
TEST_F(ScalarEvolutionsTest, SCEVExitLimitForgetValue) {
|
|
|
|
/*
|
|
|
|
* Create the following code:
|
|
|
|
* func(i64 addrspace(10)* %arg)
|
|
|
|
* top:
|
|
|
|
* br label %L.ph
|
|
|
|
* L.ph:
|
|
|
|
* %load = load i64 addrspace(10)* %arg
|
|
|
|
* br label %L
|
|
|
|
* L:
|
|
|
|
* %phi = phi i64 [i64 0, %L.ph], [ %add, %L2 ]
|
|
|
|
* %add = add i64 %phi2, 1
|
|
|
|
* %cond = icmp slt i64 %add, %load ; then becomes 2000.
|
|
|
|
* br i1 %cond, label %post, label %L2
|
|
|
|
* post:
|
|
|
|
* ret void
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
// Create a module with non-integral pointers in it's datalayout
|
|
|
|
Module NIM("nonintegral", Context);
|
|
|
|
std::string DataLayout = M.getDataLayoutStr();
|
|
|
|
if (!DataLayout.empty())
|
|
|
|
DataLayout += "-";
|
|
|
|
DataLayout += "ni:10";
|
|
|
|
NIM.setDataLayout(DataLayout);
|
|
|
|
|
|
|
|
Type *T_int64 = Type::getInt64Ty(Context);
|
|
|
|
Type *T_pint64 = T_int64->getPointerTo(10);
|
|
|
|
|
|
|
|
FunctionType *FTy =
|
|
|
|
FunctionType::get(Type::getVoidTy(Context), {T_pint64}, false);
|
[opaque pointer types] Add a FunctionCallee wrapper type, and use it.
Recommit r352791 after tweaking DerivedTypes.h slightly, so that gcc
doesn't choke on it, hopefully.
Original Message:
The FunctionCallee type is effectively a {FunctionType*,Value*} pair,
and is a useful convenience to enable code to continue passing the
result of getOrInsertFunction() through to EmitCall, even once pointer
types lose their pointee-type.
Then:
- update the CallInst/InvokeInst instruction creation functions to
take a Callee,
- modify getOrInsertFunction to return FunctionCallee, and
- update all callers appropriately.
One area of particular note is the change to the sanitizer
code. Previously, they had been casting the result of
`getOrInsertFunction` to a `Function*` via
`checkSanitizerInterfaceFunction`, and storing that. That would report
an error if someone had already inserted a function declaraction with
a mismatching signature.
However, in general, LLVM allows for such mismatches, as
`getOrInsertFunction` will automatically insert a bitcast if
needed. As part of this cleanup, cause the sanitizer code to do the
same. (It will call its functions using the expected signature,
however they may have been declared.)
Finally, in a small number of locations, callers of
`getOrInsertFunction` actually were expecting/requiring that a brand
new function was being created. In such cases, I've switched them to
Function::Create instead.
Differential Revision: https://reviews.llvm.org/D57315
llvm-svn: 352827
2019-02-01 03:28:03 +01:00
|
|
|
Function *F = Function::Create(FTy, Function::ExternalLinkage, "foo", NIM);
|
2017-10-17 03:03:56 +02:00
|
|
|
|
|
|
|
Argument *Arg = &*F->arg_begin();
|
|
|
|
|
|
|
|
BasicBlock *Top = BasicBlock::Create(Context, "top", F);
|
|
|
|
BasicBlock *LPh = BasicBlock::Create(Context, "L.ph", F);
|
|
|
|
BasicBlock *L = BasicBlock::Create(Context, "L", F);
|
|
|
|
BasicBlock *Post = BasicBlock::Create(Context, "post", F);
|
|
|
|
|
|
|
|
IRBuilder<> Builder(Top);
|
|
|
|
Builder.CreateBr(LPh);
|
|
|
|
|
|
|
|
Builder.SetInsertPoint(LPh);
|
|
|
|
auto *Load = cast<Instruction>(Builder.CreateLoad(T_int64, Arg, "load"));
|
|
|
|
Builder.CreateBr(L);
|
|
|
|
|
|
|
|
Builder.SetInsertPoint(L);
|
|
|
|
PHINode *Phi = Builder.CreatePHI(T_int64, 2);
|
|
|
|
auto *Add = cast<Instruction>(
|
|
|
|
Builder.CreateAdd(Phi, ConstantInt::get(T_int64, 1), "add"));
|
|
|
|
auto *Cond = cast<Instruction>(
|
|
|
|
Builder.CreateICmp(ICmpInst::ICMP_SLT, Add, Load, "cond"));
|
|
|
|
auto *Br = cast<Instruction>(Builder.CreateCondBr(Cond, L, Post));
|
|
|
|
Phi->addIncoming(ConstantInt::get(T_int64, 0), LPh);
|
|
|
|
Phi->addIncoming(Add, L);
|
|
|
|
|
|
|
|
Builder.SetInsertPoint(Post);
|
|
|
|
Builder.CreateRetVoid();
|
|
|
|
|
|
|
|
ScalarEvolution SE = buildSE(*F);
|
|
|
|
auto *Loop = LI->getLoopFor(L);
|
|
|
|
const SCEV *EC = SE.getBackedgeTakenCount(Loop);
|
|
|
|
EXPECT_FALSE(isa<SCEVCouldNotCompute>(EC));
|
|
|
|
EXPECT_FALSE(isa<SCEVConstant>(EC));
|
|
|
|
|
|
|
|
SE.forgetValue(Load);
|
|
|
|
Br->eraseFromParent();
|
|
|
|
Cond->eraseFromParent();
|
|
|
|
Load->eraseFromParent();
|
|
|
|
|
|
|
|
Builder.SetInsertPoint(L);
|
|
|
|
auto *NewCond = Builder.CreateICmp(
|
|
|
|
ICmpInst::ICMP_SLT, Add, ConstantInt::get(T_int64, 2000), "new.cond");
|
|
|
|
Builder.CreateCondBr(NewCond, L, Post);
|
|
|
|
const SCEV *NewEC = SE.getBackedgeTakenCount(Loop);
|
|
|
|
EXPECT_FALSE(isa<SCEVCouldNotCompute>(NewEC));
|
|
|
|
EXPECT_TRUE(isa<SCEVConstant>(NewEC));
|
|
|
|
EXPECT_EQ(cast<SCEVConstant>(NewEC)->getAPInt().getLimitedValue(), 1999u);
|
|
|
|
}
|
|
|
|
|
2017-09-05 21:54:03 +02:00
|
|
|
TEST_F(ScalarEvolutionsTest, SCEVAddRecFromPHIwithLargeConstants) {
|
|
|
|
// Reference: https://reviews.llvm.org/D37265
|
|
|
|
// Make sure that SCEV does not blow up when constructing an AddRec
|
|
|
|
// with predicates for a phi with the update pattern:
|
|
|
|
// (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum
|
|
|
|
// when either the initial value of the Phi or the InvariantAccum are
|
|
|
|
// constants that are too large to fit in an ix but are zero when truncated to
|
|
|
|
// ix.
|
|
|
|
FunctionType *FTy =
|
|
|
|
FunctionType::get(Type::getVoidTy(Context), std::vector<Type *>(), false);
|
[opaque pointer types] Add a FunctionCallee wrapper type, and use it.
Recommit r352791 after tweaking DerivedTypes.h slightly, so that gcc
doesn't choke on it, hopefully.
Original Message:
The FunctionCallee type is effectively a {FunctionType*,Value*} pair,
and is a useful convenience to enable code to continue passing the
result of getOrInsertFunction() through to EmitCall, even once pointer
types lose their pointee-type.
Then:
- update the CallInst/InvokeInst instruction creation functions to
take a Callee,
- modify getOrInsertFunction to return FunctionCallee, and
- update all callers appropriately.
One area of particular note is the change to the sanitizer
code. Previously, they had been casting the result of
`getOrInsertFunction` to a `Function*` via
`checkSanitizerInterfaceFunction`, and storing that. That would report
an error if someone had already inserted a function declaraction with
a mismatching signature.
However, in general, LLVM allows for such mismatches, as
`getOrInsertFunction` will automatically insert a bitcast if
needed. As part of this cleanup, cause the sanitizer code to do the
same. (It will call its functions using the expected signature,
however they may have been declared.)
Finally, in a small number of locations, callers of
`getOrInsertFunction` actually were expecting/requiring that a brand
new function was being created. In such cases, I've switched them to
Function::Create instead.
Differential Revision: https://reviews.llvm.org/D57315
llvm-svn: 352827
2019-02-01 03:28:03 +01:00
|
|
|
Function *F =
|
|
|
|
Function::Create(FTy, Function::ExternalLinkage, "addrecphitest", M);
|
2017-09-05 21:54:03 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
Create IR:
|
|
|
|
entry:
|
|
|
|
br label %loop
|
|
|
|
loop:
|
|
|
|
%0 = phi i64 [-9223372036854775808, %entry], [%3, %loop]
|
|
|
|
%1 = shl i64 %0, 32
|
|
|
|
%2 = ashr exact i64 %1, 32
|
|
|
|
%3 = add i64 %2, -9223372036854775808
|
|
|
|
br i1 undef, label %exit, label %loop
|
|
|
|
exit:
|
|
|
|
ret void
|
|
|
|
*/
|
|
|
|
BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
|
|
|
|
BasicBlock *LoopBB = BasicBlock::Create(Context, "loop", F);
|
|
|
|
BasicBlock *ExitBB = BasicBlock::Create(Context, "exit", F);
|
|
|
|
|
|
|
|
// entry:
|
|
|
|
BranchInst::Create(LoopBB, EntryBB);
|
|
|
|
// loop:
|
|
|
|
auto *MinInt64 =
|
|
|
|
ConstantInt::get(Context, APInt(64, 0x8000000000000000U, true));
|
|
|
|
auto *Int64_32 = ConstantInt::get(Context, APInt(64, 32));
|
|
|
|
auto *Br = BranchInst::Create(
|
|
|
|
LoopBB, ExitBB, UndefValue::get(Type::getInt1Ty(Context)), LoopBB);
|
|
|
|
auto *Phi = PHINode::Create(Type::getInt64Ty(Context), 2, "", Br);
|
|
|
|
auto *Shl = BinaryOperator::CreateShl(Phi, Int64_32, "", Br);
|
|
|
|
auto *AShr = BinaryOperator::CreateExactAShr(Shl, Int64_32, "", Br);
|
|
|
|
auto *Add = BinaryOperator::CreateAdd(AShr, MinInt64, "", Br);
|
|
|
|
Phi->addIncoming(MinInt64, EntryBB);
|
|
|
|
Phi->addIncoming(Add, LoopBB);
|
|
|
|
// exit:
|
|
|
|
ReturnInst::Create(Context, nullptr, ExitBB);
|
|
|
|
|
|
|
|
// Make sure that SCEV doesn't blow up
|
|
|
|
ScalarEvolution SE = buildSE(*F);
|
|
|
|
SCEVUnionPredicate Preds;
|
|
|
|
const SCEV *Expr = SE.getSCEV(Phi);
|
|
|
|
EXPECT_NE(nullptr, Expr);
|
|
|
|
EXPECT_TRUE(isa<SCEVUnknown>(Expr));
|
|
|
|
auto Result = SE.createAddRecFromPHIWithCasts(cast<SCEVUnknown>(Expr));
|
|
|
|
}
|
|
|
|
|
2017-10-11 21:05:14 +02:00
|
|
|
TEST_F(ScalarEvolutionsTest, SCEVAddRecFromPHIwithLargeConstantAccum) {
|
|
|
|
// Make sure that SCEV does not blow up when constructing an AddRec
|
|
|
|
// with predicates for a phi with the update pattern:
|
|
|
|
// (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum
|
|
|
|
// when the InvariantAccum is a constant that is too large to fit in an
|
|
|
|
// ix but are zero when truncated to ix, and the initial value of the
|
|
|
|
// phi is not a constant.
|
|
|
|
Type *Int32Ty = Type::getInt32Ty(Context);
|
|
|
|
SmallVector<Type *, 1> Types;
|
|
|
|
Types.push_back(Int32Ty);
|
|
|
|
FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context), Types, false);
|
[opaque pointer types] Add a FunctionCallee wrapper type, and use it.
Recommit r352791 after tweaking DerivedTypes.h slightly, so that gcc
doesn't choke on it, hopefully.
Original Message:
The FunctionCallee type is effectively a {FunctionType*,Value*} pair,
and is a useful convenience to enable code to continue passing the
result of getOrInsertFunction() through to EmitCall, even once pointer
types lose their pointee-type.
Then:
- update the CallInst/InvokeInst instruction creation functions to
take a Callee,
- modify getOrInsertFunction to return FunctionCallee, and
- update all callers appropriately.
One area of particular note is the change to the sanitizer
code. Previously, they had been casting the result of
`getOrInsertFunction` to a `Function*` via
`checkSanitizerInterfaceFunction`, and storing that. That would report
an error if someone had already inserted a function declaraction with
a mismatching signature.
However, in general, LLVM allows for such mismatches, as
`getOrInsertFunction` will automatically insert a bitcast if
needed. As part of this cleanup, cause the sanitizer code to do the
same. (It will call its functions using the expected signature,
however they may have been declared.)
Finally, in a small number of locations, callers of
`getOrInsertFunction` actually were expecting/requiring that a brand
new function was being created. In such cases, I've switched them to
Function::Create instead.
Differential Revision: https://reviews.llvm.org/D57315
llvm-svn: 352827
2019-02-01 03:28:03 +01:00
|
|
|
Function *F =
|
|
|
|
Function::Create(FTy, Function::ExternalLinkage, "addrecphitest", M);
|
2017-10-11 21:05:14 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
Create IR:
|
|
|
|
define @addrecphitest(i32)
|
|
|
|
entry:
|
|
|
|
br label %loop
|
|
|
|
loop:
|
|
|
|
%1 = phi i32 [%0, %entry], [%4, %loop]
|
|
|
|
%2 = shl i32 %1, 16
|
|
|
|
%3 = ashr exact i32 %2, 16
|
|
|
|
%4 = add i32 %3, -2147483648
|
|
|
|
br i1 undef, label %exit, label %loop
|
|
|
|
exit:
|
|
|
|
ret void
|
|
|
|
*/
|
|
|
|
BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
|
|
|
|
BasicBlock *LoopBB = BasicBlock::Create(Context, "loop", F);
|
|
|
|
BasicBlock *ExitBB = BasicBlock::Create(Context, "exit", F);
|
|
|
|
|
|
|
|
// entry:
|
|
|
|
BranchInst::Create(LoopBB, EntryBB);
|
|
|
|
// loop:
|
|
|
|
auto *MinInt32 = ConstantInt::get(Context, APInt(32, 0x80000000U, true));
|
|
|
|
auto *Int32_16 = ConstantInt::get(Context, APInt(32, 16));
|
|
|
|
auto *Br = BranchInst::Create(
|
|
|
|
LoopBB, ExitBB, UndefValue::get(Type::getInt1Ty(Context)), LoopBB);
|
|
|
|
auto *Phi = PHINode::Create(Int32Ty, 2, "", Br);
|
|
|
|
auto *Shl = BinaryOperator::CreateShl(Phi, Int32_16, "", Br);
|
|
|
|
auto *AShr = BinaryOperator::CreateExactAShr(Shl, Int32_16, "", Br);
|
|
|
|
auto *Add = BinaryOperator::CreateAdd(AShr, MinInt32, "", Br);
|
|
|
|
auto *Arg = &*(F->arg_begin());
|
|
|
|
Phi->addIncoming(Arg, EntryBB);
|
|
|
|
Phi->addIncoming(Add, LoopBB);
|
|
|
|
// exit:
|
|
|
|
ReturnInst::Create(Context, nullptr, ExitBB);
|
|
|
|
|
|
|
|
// Make sure that SCEV doesn't blow up
|
|
|
|
ScalarEvolution SE = buildSE(*F);
|
|
|
|
SCEVUnionPredicate Preds;
|
|
|
|
const SCEV *Expr = SE.getSCEV(Phi);
|
|
|
|
EXPECT_NE(nullptr, Expr);
|
|
|
|
EXPECT_TRUE(isa<SCEVUnknown>(Expr));
|
|
|
|
auto Result = SE.createAddRecFromPHIWithCasts(cast<SCEVUnknown>(Expr));
|
|
|
|
}
|
|
|
|
|
2017-09-22 17:47:57 +02:00
|
|
|
TEST_F(ScalarEvolutionsTest, SCEVFoldSumOfTruncs) {
|
|
|
|
// Verify that the following SCEV gets folded to a zero:
|
|
|
|
// (-1 * (trunc i64 (-1 * %0) to i32)) + (-1 * (trunc i64 %0 to i32)
|
|
|
|
Type *ArgTy = Type::getInt64Ty(Context);
|
|
|
|
Type *Int32Ty = Type::getInt32Ty(Context);
|
|
|
|
SmallVector<Type *, 1> Types;
|
|
|
|
Types.push_back(ArgTy);
|
|
|
|
FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context), Types, false);
|
[opaque pointer types] Add a FunctionCallee wrapper type, and use it.
Recommit r352791 after tweaking DerivedTypes.h slightly, so that gcc
doesn't choke on it, hopefully.
Original Message:
The FunctionCallee type is effectively a {FunctionType*,Value*} pair,
and is a useful convenience to enable code to continue passing the
result of getOrInsertFunction() through to EmitCall, even once pointer
types lose their pointee-type.
Then:
- update the CallInst/InvokeInst instruction creation functions to
take a Callee,
- modify getOrInsertFunction to return FunctionCallee, and
- update all callers appropriately.
One area of particular note is the change to the sanitizer
code. Previously, they had been casting the result of
`getOrInsertFunction` to a `Function*` via
`checkSanitizerInterfaceFunction`, and storing that. That would report
an error if someone had already inserted a function declaraction with
a mismatching signature.
However, in general, LLVM allows for such mismatches, as
`getOrInsertFunction` will automatically insert a bitcast if
needed. As part of this cleanup, cause the sanitizer code to do the
same. (It will call its functions using the expected signature,
however they may have been declared.)
Finally, in a small number of locations, callers of
`getOrInsertFunction` actually were expecting/requiring that a brand
new function was being created. In such cases, I've switched them to
Function::Create instead.
Differential Revision: https://reviews.llvm.org/D57315
llvm-svn: 352827
2019-02-01 03:28:03 +01:00
|
|
|
Function *F = Function::Create(FTy, Function::ExternalLinkage, "f", M);
|
2017-09-22 17:47:57 +02:00
|
|
|
BasicBlock *BB = BasicBlock::Create(Context, "entry", F);
|
|
|
|
ReturnInst::Create(Context, nullptr, BB);
|
|
|
|
|
|
|
|
ScalarEvolution SE = buildSE(*F);
|
|
|
|
|
|
|
|
auto *Arg = &*(F->arg_begin());
|
|
|
|
const auto *ArgSCEV = SE.getSCEV(Arg);
|
|
|
|
|
|
|
|
// Build the SCEV
|
|
|
|
const auto *A0 = SE.getNegativeSCEV(ArgSCEV);
|
|
|
|
const auto *A1 = SE.getTruncateExpr(A0, Int32Ty);
|
|
|
|
const auto *A = SE.getNegativeSCEV(A1);
|
|
|
|
|
|
|
|
const auto *B0 = SE.getTruncateExpr(ArgSCEV, Int32Ty);
|
|
|
|
const auto *B = SE.getNegativeSCEV(B0);
|
|
|
|
|
|
|
|
const auto *Expr = SE.getAddExpr(A, B);
|
|
|
|
// Verify that the SCEV was folded to 0
|
|
|
|
const auto *ZeroConst = SE.getConstant(Int32Ty, 0);
|
|
|
|
EXPECT_EQ(Expr, ZeroConst);
|
|
|
|
}
|
|
|
|
|
[SCEV][NFC] Introduces expression sizes estimation
This patch introduces the field `ExpressionSize` in SCEV. This field is
calculated only once on SCEV creation, and it represents the complexity of
this SCEV from arithmetical point of view (not from the point of the number
of actual different SCEV nodes that are used in the expression). Roughly
saying, it is the number of operands and operations symbols when we print this
SCEV.
A formal definition is following: if SCEV `X` has operands
`Op1`, `Op2`, ..., `OpN`,
then
Size(X) = 1 + Size(Op1) + Size(Op2) + ... + Size(OpN).
Size of SCEVConstant and SCEVUnknown is one.
Expression size may be used as a universal way to limit SCEV transformations
for huge SCEVs. Currently, we have a bunch of options that represents various
limits (such as recursion depth limit) that may not make any sense from the
point of view of a LLVM users who is not familiar with SCEV internals, and all
these different options pursue one goal. A more general rule that may
potentially allow us to get rid of this redundancy in options is "do not make
transformations with SCEVs of huge size". It can apply to all SCEV traversals
and transformations that may need to visit a SCEV node more than once, hence
they are prone to combinatorial explosions.
This patch only introduces SCEV sizes calculation as NFC, its utilization will
be introduced in follow-up patches.
Differential Revision: https://reviews.llvm.org/D35989
Reviewed By: reames
llvm-svn: 351725
2019-01-21 07:19:50 +01:00
|
|
|
// Check logic of SCEV expression size computation.
|
|
|
|
TEST_F(ScalarEvolutionsTest, SCEVComputeExpressionSize) {
|
|
|
|
/*
|
|
|
|
* Create the following code:
|
|
|
|
* void func(i64 %a, i64 %b)
|
|
|
|
* entry:
|
|
|
|
* %s1 = add i64 %a, 1
|
|
|
|
* %s2 = udiv i64 %s1, %b
|
|
|
|
* br label %exit
|
|
|
|
* exit:
|
|
|
|
* ret
|
|
|
|
*/
|
|
|
|
|
|
|
|
// Create a module.
|
|
|
|
Module M("SCEVComputeExpressionSize", Context);
|
|
|
|
|
|
|
|
Type *T_int64 = Type::getInt64Ty(Context);
|
|
|
|
|
|
|
|
FunctionType *FTy =
|
|
|
|
FunctionType::get(Type::getVoidTy(Context), { T_int64, T_int64 }, false);
|
[opaque pointer types] Add a FunctionCallee wrapper type, and use it.
Recommit r352791 after tweaking DerivedTypes.h slightly, so that gcc
doesn't choke on it, hopefully.
Original Message:
The FunctionCallee type is effectively a {FunctionType*,Value*} pair,
and is a useful convenience to enable code to continue passing the
result of getOrInsertFunction() through to EmitCall, even once pointer
types lose their pointee-type.
Then:
- update the CallInst/InvokeInst instruction creation functions to
take a Callee,
- modify getOrInsertFunction to return FunctionCallee, and
- update all callers appropriately.
One area of particular note is the change to the sanitizer
code. Previously, they had been casting the result of
`getOrInsertFunction` to a `Function*` via
`checkSanitizerInterfaceFunction`, and storing that. That would report
an error if someone had already inserted a function declaraction with
a mismatching signature.
However, in general, LLVM allows for such mismatches, as
`getOrInsertFunction` will automatically insert a bitcast if
needed. As part of this cleanup, cause the sanitizer code to do the
same. (It will call its functions using the expected signature,
however they may have been declared.)
Finally, in a small number of locations, callers of
`getOrInsertFunction` actually were expecting/requiring that a brand
new function was being created. In such cases, I've switched them to
Function::Create instead.
Differential Revision: https://reviews.llvm.org/D57315
llvm-svn: 352827
2019-02-01 03:28:03 +01:00
|
|
|
Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M);
|
[SCEV][NFC] Introduces expression sizes estimation
This patch introduces the field `ExpressionSize` in SCEV. This field is
calculated only once on SCEV creation, and it represents the complexity of
this SCEV from arithmetical point of view (not from the point of the number
of actual different SCEV nodes that are used in the expression). Roughly
saying, it is the number of operands and operations symbols when we print this
SCEV.
A formal definition is following: if SCEV `X` has operands
`Op1`, `Op2`, ..., `OpN`,
then
Size(X) = 1 + Size(Op1) + Size(Op2) + ... + Size(OpN).
Size of SCEVConstant and SCEVUnknown is one.
Expression size may be used as a universal way to limit SCEV transformations
for huge SCEVs. Currently, we have a bunch of options that represents various
limits (such as recursion depth limit) that may not make any sense from the
point of view of a LLVM users who is not familiar with SCEV internals, and all
these different options pursue one goal. A more general rule that may
potentially allow us to get rid of this redundancy in options is "do not make
transformations with SCEVs of huge size". It can apply to all SCEV traversals
and transformations that may need to visit a SCEV node more than once, hence
they are prone to combinatorial explosions.
This patch only introduces SCEV sizes calculation as NFC, its utilization will
be introduced in follow-up patches.
Differential Revision: https://reviews.llvm.org/D35989
Reviewed By: reames
llvm-svn: 351725
2019-01-21 07:19:50 +01:00
|
|
|
Argument *A = &*F->arg_begin();
|
|
|
|
Argument *B = &*std::next(F->arg_begin());
|
|
|
|
ConstantInt *C = ConstantInt::get(Context, APInt(64, 1));
|
|
|
|
|
|
|
|
BasicBlock *Entry = BasicBlock::Create(Context, "entry", F);
|
|
|
|
BasicBlock *Exit = BasicBlock::Create(Context, "exit", F);
|
|
|
|
|
|
|
|
IRBuilder<> Builder(Entry);
|
|
|
|
auto *S1 = cast<Instruction>(Builder.CreateAdd(A, C, "s1"));
|
|
|
|
auto *S2 = cast<Instruction>(Builder.CreateUDiv(S1, B, "s2"));
|
|
|
|
Builder.CreateBr(Exit);
|
|
|
|
|
|
|
|
Builder.SetInsertPoint(Exit);
|
2019-01-21 08:27:47 +01:00
|
|
|
Builder.CreateRetVoid();
|
[SCEV][NFC] Introduces expression sizes estimation
This patch introduces the field `ExpressionSize` in SCEV. This field is
calculated only once on SCEV creation, and it represents the complexity of
this SCEV from arithmetical point of view (not from the point of the number
of actual different SCEV nodes that are used in the expression). Roughly
saying, it is the number of operands and operations symbols when we print this
SCEV.
A formal definition is following: if SCEV `X` has operands
`Op1`, `Op2`, ..., `OpN`,
then
Size(X) = 1 + Size(Op1) + Size(Op2) + ... + Size(OpN).
Size of SCEVConstant and SCEVUnknown is one.
Expression size may be used as a universal way to limit SCEV transformations
for huge SCEVs. Currently, we have a bunch of options that represents various
limits (such as recursion depth limit) that may not make any sense from the
point of view of a LLVM users who is not familiar with SCEV internals, and all
these different options pursue one goal. A more general rule that may
potentially allow us to get rid of this redundancy in options is "do not make
transformations with SCEVs of huge size". It can apply to all SCEV traversals
and transformations that may need to visit a SCEV node more than once, hence
they are prone to combinatorial explosions.
This patch only introduces SCEV sizes calculation as NFC, its utilization will
be introduced in follow-up patches.
Differential Revision: https://reviews.llvm.org/D35989
Reviewed By: reames
llvm-svn: 351725
2019-01-21 07:19:50 +01:00
|
|
|
|
|
|
|
ScalarEvolution SE = buildSE(*F);
|
|
|
|
// Get S2 first to move it to cache.
|
|
|
|
const SCEV *AS = SE.getSCEV(A);
|
|
|
|
const SCEV *BS = SE.getSCEV(B);
|
|
|
|
const SCEV *CS = SE.getSCEV(C);
|
|
|
|
const SCEV *S1S = SE.getSCEV(S1);
|
|
|
|
const SCEV *S2S = SE.getSCEV(S2);
|
2019-01-21 08:27:47 +01:00
|
|
|
EXPECT_EQ(AS->getExpressionSize(), 1u);
|
|
|
|
EXPECT_EQ(BS->getExpressionSize(), 1u);
|
|
|
|
EXPECT_EQ(CS->getExpressionSize(), 1u);
|
|
|
|
EXPECT_EQ(S1S->getExpressionSize(), 3u);
|
|
|
|
EXPECT_EQ(S2S->getExpressionSize(), 5u);
|
[SCEV][NFC] Introduces expression sizes estimation
This patch introduces the field `ExpressionSize` in SCEV. This field is
calculated only once on SCEV creation, and it represents the complexity of
this SCEV from arithmetical point of view (not from the point of the number
of actual different SCEV nodes that are used in the expression). Roughly
saying, it is the number of operands and operations symbols when we print this
SCEV.
A formal definition is following: if SCEV `X` has operands
`Op1`, `Op2`, ..., `OpN`,
then
Size(X) = 1 + Size(Op1) + Size(Op2) + ... + Size(OpN).
Size of SCEVConstant and SCEVUnknown is one.
Expression size may be used as a universal way to limit SCEV transformations
for huge SCEVs. Currently, we have a bunch of options that represents various
limits (such as recursion depth limit) that may not make any sense from the
point of view of a LLVM users who is not familiar with SCEV internals, and all
these different options pursue one goal. A more general rule that may
potentially allow us to get rid of this redundancy in options is "do not make
transformations with SCEVs of huge size". It can apply to all SCEV traversals
and transformations that may need to visit a SCEV node more than once, hence
they are prone to combinatorial explosions.
This patch only introduces SCEV sizes calculation as NFC, its utilization will
be introduced in follow-up patches.
Differential Revision: https://reviews.llvm.org/D35989
Reviewed By: reames
llvm-svn: 351725
2019-01-21 07:19:50 +01:00
|
|
|
}
|
|
|
|
|
2020-01-10 10:30:02 +01:00
|
|
|
TEST_F(ScalarEvolutionsTest, SCEVLoopDecIntrinsic) {
|
|
|
|
LLVMContext C;
|
|
|
|
SMDiagnostic Err;
|
|
|
|
std::unique_ptr<Module> M = parseAssemblyString(
|
|
|
|
"define void @foo(i32 %N) { "
|
|
|
|
"entry: "
|
|
|
|
" %cmp3 = icmp sgt i32 %N, 0 "
|
|
|
|
" br i1 %cmp3, label %for.body, label %for.cond.cleanup "
|
|
|
|
"for.cond.cleanup: "
|
|
|
|
" ret void "
|
|
|
|
"for.body: "
|
|
|
|
" %i.04 = phi i32 [ %inc, %for.body ], [ 100, %entry ] "
|
|
|
|
" %inc = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %i.04, i32 1) "
|
|
|
|
" %exitcond = icmp ne i32 %inc, 0 "
|
|
|
|
" br i1 %exitcond, label %for.cond.cleanup, label %for.body "
|
|
|
|
"} "
|
|
|
|
"declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) ",
|
|
|
|
Err, C);
|
|
|
|
|
|
|
|
ASSERT_TRUE(M && "Could not parse module?");
|
|
|
|
ASSERT_TRUE(!verifyModule(*M) && "Must have been well formed!");
|
|
|
|
|
|
|
|
runWithSE(*M, "foo", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
|
|
|
|
auto *ScevInc = SE.getSCEV(getInstructionByName(F, "inc"));
|
|
|
|
EXPECT_TRUE(isa<SCEVAddRecExpr>(ScevInc));
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2019-08-07 19:38:38 +02:00
|
|
|
TEST_F(ScalarEvolutionsTest, SCEVComputeConstantDifference) {
|
|
|
|
LLVMContext C;
|
|
|
|
SMDiagnostic Err;
|
|
|
|
std::unique_ptr<Module> M = parseAssemblyString(
|
|
|
|
"define void @foo(i32 %sz, i32 %pp) { "
|
|
|
|
"entry: "
|
|
|
|
" %v0 = add i32 %pp, 0 "
|
|
|
|
" %v3 = add i32 %pp, 3 "
|
|
|
|
" br label %loop.body "
|
|
|
|
"loop.body: "
|
|
|
|
" %iv = phi i32 [ %iv.next, %loop.body ], [ 0, %entry ] "
|
|
|
|
" %xa = add nsw i32 %iv, %v0 "
|
|
|
|
" %yy = add nsw i32 %iv, %v3 "
|
|
|
|
" %xb = sub nsw i32 %yy, 3 "
|
|
|
|
" %iv.next = add nsw i32 %iv, 1 "
|
|
|
|
" %cmp = icmp sle i32 %iv.next, %sz "
|
|
|
|
" br i1 %cmp, label %loop.body, label %exit "
|
|
|
|
"exit: "
|
|
|
|
" ret void "
|
|
|
|
"} ",
|
|
|
|
Err, C);
|
|
|
|
|
|
|
|
ASSERT_TRUE(M && "Could not parse module?");
|
|
|
|
ASSERT_TRUE(!verifyModule(*M) && "Must have been well formed!");
|
|
|
|
|
|
|
|
runWithSE(*M, "foo", [](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
|
|
|
|
auto *ScevV0 = SE.getSCEV(getInstructionByName(F, "v0")); // %pp
|
|
|
|
auto *ScevV3 = SE.getSCEV(getInstructionByName(F, "v3")); // (3 + %pp)
|
|
|
|
auto *ScevIV = SE.getSCEV(getInstructionByName(F, "iv")); // {0,+,1}
|
|
|
|
auto *ScevXA = SE.getSCEV(getInstructionByName(F, "xa")); // {%pp,+,1}
|
|
|
|
auto *ScevYY = SE.getSCEV(getInstructionByName(F, "yy")); // {(3 + %pp),+,1}
|
|
|
|
auto *ScevXB = SE.getSCEV(getInstructionByName(F, "xb")); // {%pp,+,1}
|
|
|
|
auto *ScevIVNext = SE.getSCEV(getInstructionByName(F, "iv.next")); // {1,+,1}
|
|
|
|
|
|
|
|
auto diff = [&SE](const SCEV *LHS, const SCEV *RHS) -> Optional<int> {
|
|
|
|
auto ConstantDiffOrNone = computeConstantDifference(SE, LHS, RHS);
|
|
|
|
if (!ConstantDiffOrNone)
|
|
|
|
return None;
|
|
|
|
|
|
|
|
auto ExtDiff = ConstantDiffOrNone->getSExtValue();
|
|
|
|
int Diff = ExtDiff;
|
|
|
|
assert(Diff == ExtDiff && "Integer overflow");
|
|
|
|
return Diff;
|
|
|
|
};
|
|
|
|
|
|
|
|
EXPECT_EQ(diff(ScevV3, ScevV0), 3);
|
|
|
|
EXPECT_EQ(diff(ScevV0, ScevV3), -3);
|
|
|
|
EXPECT_EQ(diff(ScevV0, ScevV0), 0);
|
|
|
|
EXPECT_EQ(diff(ScevV3, ScevV3), 0);
|
|
|
|
EXPECT_EQ(diff(ScevIV, ScevIV), 0);
|
|
|
|
EXPECT_EQ(diff(ScevXA, ScevXB), 0);
|
|
|
|
EXPECT_EQ(diff(ScevXA, ScevYY), -3);
|
|
|
|
EXPECT_EQ(diff(ScevYY, ScevXB), 3);
|
|
|
|
EXPECT_EQ(diff(ScevIV, ScevIVNext), -1);
|
|
|
|
EXPECT_EQ(diff(ScevIVNext, ScevIV), 1);
|
|
|
|
EXPECT_EQ(diff(ScevIVNext, ScevIVNext), 0);
|
|
|
|
EXPECT_EQ(diff(ScevV0, ScevIV), None);
|
|
|
|
EXPECT_EQ(diff(ScevIVNext, ScevV3), None);
|
|
|
|
EXPECT_EQ(diff(ScevYY, ScevV3), None);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2020-09-18 10:50:01 +02:00
|
|
|
TEST_F(ScalarEvolutionsTest, SCEVrewriteUnknowns) {
|
|
|
|
LLVMContext C;
|
|
|
|
SMDiagnostic Err;
|
|
|
|
std::unique_ptr<Module> M = parseAssemblyString(
|
|
|
|
"define void @foo(i32 %i) { "
|
|
|
|
"entry: "
|
|
|
|
" %cmp3 = icmp ult i32 %i, 16 "
|
|
|
|
" br i1 %cmp3, label %loop.body, label %exit "
|
|
|
|
"loop.body: "
|
|
|
|
" %iv = phi i32 [ %iv.next, %loop.body ], [ %i, %entry ] "
|
|
|
|
" %iv.next = add nsw i32 %iv, 1 "
|
|
|
|
" %cmp = icmp eq i32 %iv.next, 16 "
|
|
|
|
" br i1 %cmp, label %exit, label %loop.body "
|
|
|
|
"exit: "
|
|
|
|
" ret void "
|
|
|
|
"} ",
|
|
|
|
Err, C);
|
|
|
|
|
|
|
|
ASSERT_TRUE(M && "Could not parse module?");
|
|
|
|
ASSERT_TRUE(!verifyModule(*M) && "Must have been well formed!");
|
|
|
|
|
|
|
|
runWithSE(*M, "foo", [](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
|
|
|
|
auto *ScevIV = SE.getSCEV(getInstructionByName(F, "iv")); // {0,+,1}
|
|
|
|
auto *ScevI = SE.getSCEV(getArgByName(F, "i")); // {0,+,1}
|
|
|
|
|
|
|
|
ValueToSCEVMapTy RewriteMap;
|
|
|
|
RewriteMap[cast<SCEVUnknown>(ScevI)->getValue()] =
|
|
|
|
SE.getUMinExpr(ScevI, SE.getConstant(ScevI->getType(), 17));
|
|
|
|
auto *WithUMin = SCEVParameterRewriter::rewrite(ScevIV, SE, RewriteMap);
|
|
|
|
|
|
|
|
EXPECT_NE(WithUMin, ScevIV);
|
|
|
|
auto *AR = dyn_cast<SCEVAddRecExpr>(WithUMin);
|
|
|
|
EXPECT_TRUE(AR);
|
|
|
|
EXPECT_EQ(AR->getStart(),
|
|
|
|
SE.getUMinExpr(ScevI, SE.getConstant(ScevI->getType(), 17)));
|
|
|
|
EXPECT_EQ(AR->getStepRecurrence(SE),
|
|
|
|
cast<SCEVAddRecExpr>(ScevIV)->getStepRecurrence(SE));
|
|
|
|
});
|
|
|
|
}
|
2020-09-22 12:03:52 +02:00
|
|
|
|
|
|
|
TEST_F(ScalarEvolutionsTest, SCEVAddNUW) {
|
|
|
|
LLVMContext C;
|
|
|
|
SMDiagnostic Err;
|
|
|
|
std::unique_ptr<Module> M = parseAssemblyString("define void @foo(i32 %x) { "
|
|
|
|
" ret void "
|
|
|
|
"} ",
|
|
|
|
Err, C);
|
|
|
|
|
|
|
|
ASSERT_TRUE(M && "Could not parse module?");
|
|
|
|
ASSERT_TRUE(!verifyModule(*M) && "Must have been well formed!");
|
|
|
|
|
|
|
|
runWithSE(*M, "foo", [](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
|
|
|
|
auto *X = SE.getSCEV(getArgByName(F, "x"));
|
|
|
|
auto *One = SE.getOne(X->getType());
|
|
|
|
auto *Sum = SE.getAddExpr(X, One, SCEV::FlagNUW);
|
|
|
|
EXPECT_TRUE(SE.isKnownPredicate(ICmpInst::ICMP_UGE, Sum, X));
|
|
|
|
EXPECT_TRUE(SE.isKnownPredicate(ICmpInst::ICMP_UGT, Sum, X));
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2020-09-24 12:06:55 +02:00
|
|
|
TEST_F(ScalarEvolutionsTest, SCEVgetRanges) {
|
|
|
|
LLVMContext C;
|
|
|
|
SMDiagnostic Err;
|
|
|
|
std::unique_ptr<Module> M = parseAssemblyString(
|
|
|
|
"define void @foo(i32 %i) { "
|
|
|
|
"entry: "
|
|
|
|
" br label %loop.body "
|
|
|
|
"loop.body: "
|
|
|
|
" %iv = phi i32 [ %iv.next, %loop.body ], [ 0, %entry ] "
|
|
|
|
" %iv.next = add nsw i32 %iv, 1 "
|
|
|
|
" %cmp = icmp eq i32 %iv.next, 16 "
|
|
|
|
" br i1 %cmp, label %exit, label %loop.body "
|
|
|
|
"exit: "
|
|
|
|
" ret void "
|
|
|
|
"} ",
|
|
|
|
Err, C);
|
|
|
|
|
|
|
|
runWithSE(*M, "foo", [](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
|
|
|
|
auto *ScevIV = SE.getSCEV(getInstructionByName(F, "iv")); // {0,+,1}
|
|
|
|
auto *ScevI = SE.getSCEV(getArgByName(F, "i"));
|
|
|
|
EXPECT_EQ(SE.getUnsignedRange(ScevIV).getLower(), 0);
|
|
|
|
EXPECT_EQ(SE.getUnsignedRange(ScevIV).getUpper(), 16);
|
|
|
|
|
|
|
|
auto *Add = SE.getAddExpr(ScevI, ScevIV);
|
|
|
|
ValueToSCEVMapTy RewriteMap;
|
|
|
|
RewriteMap[cast<SCEVUnknown>(ScevI)->getValue()] =
|
|
|
|
SE.getUMinExpr(ScevI, SE.getConstant(ScevI->getType(), 17));
|
|
|
|
auto *AddWithUMin = SCEVParameterRewriter::rewrite(Add, SE, RewriteMap);
|
|
|
|
EXPECT_EQ(SE.getUnsignedRange(AddWithUMin).getLower(), 0);
|
|
|
|
EXPECT_EQ(SE.getUnsignedRange(AddWithUMin).getUpper(), 33);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(ScalarEvolutionsTest, SCEVgetExitLimitForGuardedLoop) {
|
|
|
|
LLVMContext C;
|
|
|
|
SMDiagnostic Err;
|
|
|
|
std::unique_ptr<Module> M = parseAssemblyString(
|
|
|
|
"define void @foo(i32 %i) { "
|
|
|
|
"entry: "
|
|
|
|
" %cmp3 = icmp ult i32 %i, 16 "
|
|
|
|
" br i1 %cmp3, label %loop.body, label %exit "
|
|
|
|
"loop.body: "
|
|
|
|
" %iv = phi i32 [ %iv.next, %loop.body ], [ %i, %entry ] "
|
|
|
|
" %iv.next = add nsw i32 %iv, 1 "
|
|
|
|
" %cmp = icmp eq i32 %iv.next, 16 "
|
|
|
|
" br i1 %cmp, label %exit, label %loop.body "
|
|
|
|
"exit: "
|
|
|
|
" ret void "
|
|
|
|
"} ",
|
|
|
|
Err, C);
|
|
|
|
|
|
|
|
ASSERT_TRUE(M && "Could not parse module?");
|
|
|
|
ASSERT_TRUE(!verifyModule(*M) && "Must have been well formed!");
|
|
|
|
|
|
|
|
runWithSE(*M, "foo", [](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
|
|
|
|
auto *ScevIV = SE.getSCEV(getInstructionByName(F, "iv")); // {0,+,1}
|
|
|
|
const Loop *L = cast<SCEVAddRecExpr>(ScevIV)->getLoop();
|
|
|
|
|
|
|
|
const SCEV *BTC = SE.getBackedgeTakenCount(L);
|
|
|
|
EXPECT_FALSE(isa<SCEVConstant>(BTC));
|
|
|
|
const SCEV *MaxBTC = SE.getConstantMaxBackedgeTakenCount(L);
|
|
|
|
EXPECT_EQ(cast<SCEVConstant>(MaxBTC)->getAPInt(), 15);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2020-10-08 05:50:44 +02:00
|
|
|
TEST_F(ScalarEvolutionsTest, ImpliedViaAddRecStart) {
|
|
|
|
LLVMContext C;
|
|
|
|
SMDiagnostic Err;
|
|
|
|
std::unique_ptr<Module> M = parseAssemblyString(
|
|
|
|
"define void @foo(i32* %p) { "
|
|
|
|
"entry: "
|
|
|
|
" %x = load i32, i32* %p, !range !0 "
|
|
|
|
" br label %loop "
|
|
|
|
"loop: "
|
|
|
|
" %iv = phi i32 [ %x, %entry], [%iv.next, %backedge] "
|
|
|
|
" %ne.check = icmp ne i32 %iv, 0 "
|
|
|
|
" br i1 %ne.check, label %backedge, label %exit "
|
|
|
|
"backedge: "
|
|
|
|
" %iv.next = add i32 %iv, -1 "
|
|
|
|
" br label %loop "
|
|
|
|
"exit:"
|
|
|
|
" ret void "
|
|
|
|
"} "
|
|
|
|
"!0 = !{i32 0, i32 2147483647}",
|
|
|
|
Err, C);
|
|
|
|
|
|
|
|
ASSERT_TRUE(M && "Could not parse module?");
|
|
|
|
ASSERT_TRUE(!verifyModule(*M) && "Must have been well formed!");
|
|
|
|
|
|
|
|
runWithSE(*M, "foo", [](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
|
|
|
|
auto *X = SE.getSCEV(getInstructionByName(F, "x"));
|
|
|
|
auto *Context = getInstructionByName(F, "iv.next");
|
|
|
|
EXPECT_TRUE(SE.isKnownPredicateAt(ICmpInst::ICMP_NE, X,
|
|
|
|
SE.getZero(X->getType()), Context));
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(ScalarEvolutionsTest, UnsignedIsImpliedViaOperations) {
|
|
|
|
LLVMContext C;
|
|
|
|
SMDiagnostic Err;
|
|
|
|
std::unique_ptr<Module> M =
|
|
|
|
parseAssemblyString("define void @foo(i32* %p1, i32* %p2) { "
|
|
|
|
"entry: "
|
|
|
|
" %x = load i32, i32* %p1, !range !0 "
|
|
|
|
" %cond = icmp ne i32 %x, 0 "
|
|
|
|
" br i1 %cond, label %guarded, label %exit "
|
|
|
|
"guarded: "
|
|
|
|
" %y = add i32 %x, -1 "
|
|
|
|
" ret void "
|
|
|
|
"exit: "
|
|
|
|
" ret void "
|
|
|
|
"} "
|
|
|
|
"!0 = !{i32 0, i32 2147483647}",
|
|
|
|
Err, C);
|
|
|
|
|
|
|
|
ASSERT_TRUE(M && "Could not parse module?");
|
|
|
|
ASSERT_TRUE(!verifyModule(*M) && "Must have been well formed!");
|
|
|
|
|
|
|
|
runWithSE(*M, "foo", [](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
|
|
|
|
auto *X = SE.getSCEV(getInstructionByName(F, "x"));
|
|
|
|
auto *Y = SE.getSCEV(getInstructionByName(F, "y"));
|
|
|
|
auto *Guarded = getInstructionByName(F, "y")->getParent();
|
|
|
|
ASSERT_TRUE(Guarded);
|
|
|
|
EXPECT_TRUE(
|
|
|
|
SE.isBasicBlockEntryGuardedByCond(Guarded, ICmpInst::ICMP_ULT, Y, X));
|
|
|
|
EXPECT_TRUE(
|
|
|
|
SE.isBasicBlockEntryGuardedByCond(Guarded, ICmpInst::ICMP_UGT, X, Y));
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2020-10-28 07:28:39 +01:00
|
|
|
TEST_F(ScalarEvolutionsTest, ProveImplicationViaNarrowing) {
|
|
|
|
LLVMContext C;
|
|
|
|
SMDiagnostic Err;
|
|
|
|
std::unique_ptr<Module> M = parseAssemblyString(
|
|
|
|
"define i32 @foo(i32 %start, i32* %q) { "
|
|
|
|
"entry: "
|
|
|
|
" %wide.start = zext i32 %start to i64 "
|
|
|
|
" br label %loop "
|
|
|
|
"loop: "
|
|
|
|
" %wide.iv = phi i64 [%wide.start, %entry], [%wide.iv.next, %backedge] "
|
|
|
|
" %iv = phi i32 [%start, %entry], [%iv.next, %backedge] "
|
|
|
|
" %cond = icmp eq i64 %wide.iv, 0 "
|
|
|
|
" br i1 %cond, label %exit, label %backedge "
|
|
|
|
"backedge: "
|
|
|
|
" %iv.next = add i32 %iv, -1 "
|
|
|
|
" %index = zext i32 %iv.next to i64 "
|
|
|
|
" %load.addr = getelementptr i32, i32* %q, i64 %index "
|
|
|
|
" %stop = load i32, i32* %load.addr "
|
|
|
|
" %loop.cond = icmp eq i32 %stop, 0 "
|
|
|
|
" %wide.iv.next = add nsw i64 %wide.iv, -1 "
|
|
|
|
" br i1 %loop.cond, label %loop, label %failure "
|
|
|
|
"exit: "
|
|
|
|
" ret i32 0 "
|
|
|
|
"failure: "
|
|
|
|
" unreachable "
|
|
|
|
"} ",
|
|
|
|
Err, C);
|
|
|
|
|
|
|
|
ASSERT_TRUE(M && "Could not parse module?");
|
|
|
|
ASSERT_TRUE(!verifyModule(*M) && "Must have been well formed!");
|
|
|
|
|
|
|
|
runWithSE(*M, "foo", [](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
|
|
|
|
auto *IV = SE.getSCEV(getInstructionByName(F, "iv"));
|
|
|
|
auto *Zero = SE.getZero(IV->getType());
|
|
|
|
auto *Backedge = getInstructionByName(F, "iv.next")->getParent();
|
|
|
|
ASSERT_TRUE(Backedge);
|
|
|
|
(void)IV;
|
|
|
|
(void)Zero;
|
|
|
|
// FIXME: This can only be proved with turned on option
|
|
|
|
// scalar-evolution-use-expensive-range-sharpening which is currently off.
|
|
|
|
// Enable the check once it's switched true by default.
|
|
|
|
// EXPECT_TRUE(SE.isBasicBlockEntryGuardedByCond(Backedge,
|
|
|
|
// ICmpInst::ICMP_UGT,
|
|
|
|
// IV, Zero));
|
2020-10-26 23:56:39 +01:00
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(ScalarEvolutionsTest, ImpliedCond) {
|
|
|
|
LLVMContext C;
|
|
|
|
SMDiagnostic Err;
|
|
|
|
std::unique_ptr<Module> M = parseAssemblyString(
|
|
|
|
"define void @foo(i32 %len) { "
|
|
|
|
"entry: "
|
|
|
|
" br label %loop "
|
|
|
|
"loop: "
|
|
|
|
" %iv = phi i32 [ 0, %entry], [%iv.next, %loop] "
|
|
|
|
" %iv.next = add nsw i32 %iv, 1 "
|
|
|
|
" %cmp = icmp slt i32 %iv, %len "
|
|
|
|
" br i1 %cmp, label %loop, label %exit "
|
|
|
|
"exit:"
|
|
|
|
" ret void "
|
|
|
|
"}",
|
|
|
|
Err, C);
|
|
|
|
|
|
|
|
ASSERT_TRUE(M && "Could not parse module?");
|
|
|
|
ASSERT_TRUE(!verifyModule(*M) && "Must have been well formed!");
|
|
|
|
|
|
|
|
runWithSE(*M, "foo", [](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
|
|
|
|
Instruction *IV = getInstructionByName(F, "iv");
|
|
|
|
Type *Ty = IV->getType();
|
|
|
|
const SCEV *Zero = SE.getZero(Ty);
|
|
|
|
const SCEV *MinusOne = SE.getMinusOne(Ty);
|
|
|
|
// {0,+,1}<nuw><nsw>
|
|
|
|
const SCEV *AddRec_0_1 = SE.getSCEV(IV);
|
|
|
|
// {0,+,-1}<nw>
|
|
|
|
const SCEV *AddRec_0_N1 = SE.getNegativeSCEV(AddRec_0_1);
|
|
|
|
|
|
|
|
// {0,+,1}<nuw><nsw> > 0 -> {0,+,-1}<nw> < 0
|
|
|
|
EXPECT_TRUE(isImpliedCond(SE, ICmpInst::ICMP_SLT, AddRec_0_N1, Zero,
|
|
|
|
ICmpInst::ICMP_SGT, AddRec_0_1, Zero));
|
|
|
|
// {0,+,-1}<nw> < -1 -> {0,+,1}<nuw><nsw> > 0
|
|
|
|
EXPECT_TRUE(isImpliedCond(SE, ICmpInst::ICMP_SGT, AddRec_0_1, Zero,
|
|
|
|
ICmpInst::ICMP_SLT, AddRec_0_N1, MinusOne));
|
2020-10-28 07:28:39 +01:00
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2020-10-29 10:30:37 +01:00
|
|
|
TEST_F(ScalarEvolutionsTest, MatchURem) {
|
|
|
|
LLVMContext C;
|
|
|
|
SMDiagnostic Err;
|
|
|
|
std::unique_ptr<Module> M = parseAssemblyString(
|
|
|
|
"target datalayout = \"e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128\" "
|
|
|
|
" "
|
|
|
|
"define void @test(i32 %a, i32 %b, i16 %c, i64 %d) {"
|
|
|
|
"entry: "
|
|
|
|
" %rem1 = urem i32 %a, 2"
|
|
|
|
" %rem2 = urem i32 %a, 5"
|
|
|
|
" %rem3 = urem i32 %a, %b"
|
|
|
|
" %c.ext = zext i16 %c to i32"
|
|
|
|
" %rem4 = urem i32 %c.ext, 2"
|
|
|
|
" %ext = zext i32 %rem4 to i64"
|
|
|
|
" %rem5 = urem i64 %d, 17179869184"
|
|
|
|
" ret void "
|
|
|
|
"} ",
|
|
|
|
Err, C);
|
|
|
|
|
|
|
|
assert(M && "Could not parse module?");
|
|
|
|
assert(!verifyModule(*M) && "Must have been well formed!");
|
|
|
|
|
|
|
|
runWithSE(*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
|
|
|
|
for (auto *N : {"rem1", "rem2", "rem3", "rem5"}) {
|
|
|
|
auto *URemI = getInstructionByName(F, N);
|
|
|
|
auto *S = SE.getSCEV(URemI);
|
|
|
|
const SCEV *LHS, *RHS;
|
|
|
|
EXPECT_TRUE(matchURem(SE, S, LHS, RHS));
|
|
|
|
EXPECT_EQ(LHS, SE.getSCEV(URemI->getOperand(0)));
|
|
|
|
EXPECT_EQ(RHS, SE.getSCEV(URemI->getOperand(1)));
|
|
|
|
EXPECT_EQ(LHS->getType(), S->getType());
|
|
|
|
EXPECT_EQ(RHS->getType(), S->getType());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the case where the urem operand is zero-extended. Make sure the
|
|
|
|
// match results are extended to the size of the input expression.
|
|
|
|
auto *Ext = getInstructionByName(F, "ext");
|
|
|
|
auto *URem1 = getInstructionByName(F, "rem4");
|
|
|
|
auto *S = SE.getSCEV(Ext);
|
|
|
|
const SCEV *LHS, *RHS;
|
|
|
|
EXPECT_TRUE(matchURem(SE, S, LHS, RHS));
|
|
|
|
EXPECT_NE(LHS, SE.getSCEV(URem1->getOperand(0)));
|
|
|
|
// RHS and URem1->getOperand(1) have different widths, so compare the
|
|
|
|
// integer values.
|
|
|
|
EXPECT_EQ(cast<SCEVConstant>(RHS)->getValue()->getZExtValue(),
|
|
|
|
cast<SCEVConstant>(SE.getSCEV(URem1->getOperand(1)))
|
|
|
|
->getValue()
|
|
|
|
->getZExtValue());
|
|
|
|
EXPECT_EQ(LHS->getType(), S->getType());
|
|
|
|
EXPECT_EQ(RHS->getType(), S->getType());
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2010-08-03 01:49:30 +02:00
|
|
|
} // end namespace llvm
|