2016-03-01 19:46:54 +01:00
|
|
|
//===- MemorySSA.cpp - Unit tests for MemorySSA ---------------------------===//
|
|
|
|
//
|
2019-01-19 09:50:56 +01:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2016-03-01 19:46:54 +01:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
2017-06-06 13:06:56 +02:00
|
|
|
#include "llvm/Analysis/MemorySSA.h"
|
2016-03-01 19:46:54 +01:00
|
|
|
#include "llvm/Analysis/AliasAnalysis.h"
|
|
|
|
#include "llvm/Analysis/BasicAliasAnalysis.h"
|
2017-04-11 22:06:36 +02:00
|
|
|
#include "llvm/Analysis/MemorySSAUpdater.h"
|
2016-03-01 19:46:54 +01:00
|
|
|
#include "llvm/IR/BasicBlock.h"
|
2016-06-21 20:39:20 +02:00
|
|
|
#include "llvm/IR/DataLayout.h"
|
2016-03-01 19:46:54 +01:00
|
|
|
#include "llvm/IR/Dominators.h"
|
|
|
|
#include "llvm/IR/IRBuilder.h"
|
|
|
|
#include "llvm/IR/Instructions.h"
|
|
|
|
#include "llvm/IR/LLVMContext.h"
|
|
|
|
#include "gtest/gtest.h"
|
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
2016-04-29 20:42:55 +02:00
|
|
|
const static char DLString[] = "e-i64:64-f80:128-n8:16:32:64-S128";
|
|
|
|
|
|
|
|
/// There's a lot of common setup between these tests. This fixture helps reduce
|
|
|
|
/// that. Tests should mock up a function, store it in F, and then call
|
|
|
|
/// setupAnalyses().
|
|
|
|
class MemorySSATest : public testing::Test {
|
|
|
|
protected:
|
|
|
|
// N.B. Many of these members depend on each other (e.g. the Module depends on
|
|
|
|
// the Context, etc.). So, order matters here (and in TestAnalyses).
|
2016-04-14 23:59:01 +02:00
|
|
|
LLVMContext C;
|
2016-04-29 20:42:55 +02:00
|
|
|
Module M;
|
|
|
|
IRBuilder<> B;
|
|
|
|
DataLayout DL;
|
2016-03-01 19:46:54 +01:00
|
|
|
TargetLibraryInfoImpl TLII;
|
2016-04-29 20:42:55 +02:00
|
|
|
TargetLibraryInfo TLI;
|
|
|
|
Function *F;
|
|
|
|
|
|
|
|
// Things that we need to build after the function is created.
|
|
|
|
struct TestAnalyses {
|
|
|
|
DominatorTree DT;
|
2016-12-19 09:22:17 +01:00
|
|
|
AssumptionCache AC;
|
2016-04-29 20:42:55 +02:00
|
|
|
AAResults AA;
|
|
|
|
BasicAAResult BAA;
|
2016-10-04 01:12:35 +02:00
|
|
|
// We need to defer MSSA construction until AA is *entirely* set up, which
|
|
|
|
// requires calling addAAResult. Hence, we just use a pointer here.
|
|
|
|
std::unique_ptr<MemorySSA> MSSA;
|
2016-06-01 23:30:40 +02:00
|
|
|
MemorySSAWalker *Walker;
|
2016-04-29 20:42:55 +02:00
|
|
|
|
|
|
|
TestAnalyses(MemorySSATest &Test)
|
2016-12-19 09:22:17 +01:00
|
|
|
: DT(*Test.F), AC(*Test.F), AA(Test.TLI),
|
llvm: Add support for "-fno-delete-null-pointer-checks"
Summary:
Support for this option is needed for building Linux kernel.
This is a very frequently requested feature by kernel developers.
More details : https://lkml.org/lkml/2018/4/4/601
GCC option description for -fdelete-null-pointer-checks:
This Assume that programs cannot safely dereference null pointers,
and that no code or data element resides at address zero.
-fno-delete-null-pointer-checks is the inverse of this implying that
null pointer dereferencing is not undefined.
This feature is implemented in LLVM IR in this CL as the function attribute
"null-pointer-is-valid"="true" in IR (Under review at D47894).
The CL updates several passes that assumed null pointer dereferencing is
undefined to not optimize when the "null-pointer-is-valid"="true"
attribute is present.
Reviewers: t.p.northover, efriedma, jyknight, chandlerc, rnk, srhines, void, george.burgess.iv
Reviewed By: efriedma, george.burgess.iv
Subscribers: eraman, haicheng, george.burgess.iv, drinkcat, theraven, reames, sanjoy, xbolva00, llvm-commits
Differential Revision: https://reviews.llvm.org/D47895
llvm-svn: 336613
2018-07-10 00:27:23 +02:00
|
|
|
BAA(Test.DL, *Test.F, Test.TLI, AC, &DT) {
|
2016-04-29 20:42:55 +02:00
|
|
|
AA.addAAResult(BAA);
|
2019-08-15 17:54:37 +02:00
|
|
|
MSSA = std::make_unique<MemorySSA>(*Test.F, &AA, &DT);
|
2016-10-04 01:12:35 +02:00
|
|
|
Walker = MSSA->getWalker();
|
2016-04-29 20:42:55 +02:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
std::unique_ptr<TestAnalyses> Analyses;
|
|
|
|
|
|
|
|
void setupAnalyses() {
|
|
|
|
assert(F);
|
|
|
|
Analyses.reset(new TestAnalyses(*this));
|
|
|
|
}
|
|
|
|
|
|
|
|
public:
|
|
|
|
MemorySSATest()
|
|
|
|
: M("MemorySSATest", C), B(C), DL(DLString), TLI(TLII), F(nullptr) {}
|
|
|
|
};
|
2016-03-01 19:46:54 +01:00
|
|
|
|
2016-09-26 19:44:31 +02:00
|
|
|
TEST_F(MemorySSATest, CreateALoad) {
|
2016-06-21 20:39:20 +02:00
|
|
|
// We create a diamond where there is a store on one side, and then after
|
2016-07-31 23:08:10 +02:00
|
|
|
// building MemorySSA, create a load after the merge point, and use it to test
|
2016-09-26 19:44:31 +02:00
|
|
|
// updating by creating an access for the load.
|
2016-06-21 20:39:20 +02:00
|
|
|
F = Function::Create(
|
|
|
|
FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
|
|
|
|
GlobalValue::ExternalLinkage, "F", &M);
|
|
|
|
BasicBlock *Entry(BasicBlock::Create(C, "", F));
|
|
|
|
BasicBlock *Left(BasicBlock::Create(C, "", F));
|
|
|
|
BasicBlock *Right(BasicBlock::Create(C, "", F));
|
|
|
|
BasicBlock *Merge(BasicBlock::Create(C, "", F));
|
|
|
|
B.SetInsertPoint(Entry);
|
|
|
|
B.CreateCondBr(B.getTrue(), Left, Right);
|
|
|
|
B.SetInsertPoint(Left);
|
|
|
|
Argument *PointerArg = &*F->arg_begin();
|
2016-09-26 19:44:31 +02:00
|
|
|
B.CreateStore(B.getInt8(16), PointerArg);
|
2016-06-21 20:39:20 +02:00
|
|
|
BranchInst::Create(Merge, Left);
|
|
|
|
BranchInst::Create(Merge, Right);
|
|
|
|
|
|
|
|
setupAnalyses();
|
2016-10-04 01:12:35 +02:00
|
|
|
MemorySSA &MSSA = *Analyses->MSSA;
|
2017-02-22 23:19:55 +01:00
|
|
|
MemorySSAUpdater Updater(&MSSA);
|
2016-06-21 20:39:20 +02:00
|
|
|
// Add the load
|
|
|
|
B.SetInsertPoint(Merge);
|
2019-02-01 21:44:24 +01:00
|
|
|
LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), PointerArg);
|
2016-06-21 20:39:20 +02:00
|
|
|
|
2016-09-26 19:44:31 +02:00
|
|
|
// MemoryPHI should already exist.
|
|
|
|
MemoryPhi *MP = MSSA.getMemoryAccess(Merge);
|
|
|
|
EXPECT_NE(MP, nullptr);
|
2016-06-21 20:39:20 +02:00
|
|
|
|
|
|
|
// Create the load memory acccess
|
2017-02-22 23:19:55 +01:00
|
|
|
MemoryUse *LoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
|
|
|
|
LoadInst, MP, Merge, MemorySSA::Beginning));
|
2016-06-21 20:39:20 +02:00
|
|
|
MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess();
|
|
|
|
EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess));
|
|
|
|
MSSA.verifyMemorySSA();
|
|
|
|
}
|
Introduce a basic MemorySSA updater, that supports insertDef,
insertUse, moveBefore and moveAfter operations.
Summary:
This creates a basic MemorySSA updater that handles arbitrary
insertion of uses and defs into MemorySSA, as well as arbitrary
movement around the CFG. It replaces the current splice API.
It can be made to handle arbitrary control flow changes.
Currently, it uses the same updater algorithm from D28934.
The main difference is because MemorySSA is single variable, we have
the complete def and use list, and don't need anyone to give it to us
as part of the API. We also have to rename stores below us in some
cases.
If we go that direction in that patch, i will merge all the updater
implementations (using an updater_traits or something to provide the
get* functions we use, called read*/write* in that patch).
Sadly, the current SSAUpdater algorithm is way too slow to use for
what we are doing here.
I have updated the tests we have to basically build memoryssa
incrementally using the updater api, and make sure it still comes out
the same.
Reviewers: george.burgess.iv
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29047
llvm-svn: 293356
2017-01-28 02:23:13 +01:00
|
|
|
TEST_F(MemorySSATest, CreateLoadsAndStoreUpdater) {
|
|
|
|
// We create a diamond, then build memoryssa with no memory accesses, and
|
|
|
|
// incrementally update it by inserting a store in the, entry, a load in the
|
|
|
|
// merge point, then a store in the branch, another load in the merge point,
|
|
|
|
// and then a store in the entry.
|
|
|
|
F = Function::Create(
|
|
|
|
FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
|
|
|
|
GlobalValue::ExternalLinkage, "F", &M);
|
|
|
|
BasicBlock *Entry(BasicBlock::Create(C, "", F));
|
|
|
|
BasicBlock *Left(BasicBlock::Create(C, "", F));
|
|
|
|
BasicBlock *Right(BasicBlock::Create(C, "", F));
|
|
|
|
BasicBlock *Merge(BasicBlock::Create(C, "", F));
|
|
|
|
B.SetInsertPoint(Entry);
|
|
|
|
B.CreateCondBr(B.getTrue(), Left, Right);
|
|
|
|
B.SetInsertPoint(Left, Left->begin());
|
|
|
|
Argument *PointerArg = &*F->arg_begin();
|
|
|
|
B.SetInsertPoint(Left);
|
|
|
|
B.CreateBr(Merge);
|
|
|
|
B.SetInsertPoint(Right);
|
|
|
|
B.CreateBr(Merge);
|
|
|
|
|
|
|
|
setupAnalyses();
|
|
|
|
MemorySSA &MSSA = *Analyses->MSSA;
|
|
|
|
MemorySSAUpdater Updater(&MSSA);
|
|
|
|
// Add the store
|
|
|
|
B.SetInsertPoint(Entry, Entry->begin());
|
|
|
|
StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
|
2017-02-22 23:19:55 +01:00
|
|
|
MemoryAccess *EntryStoreAccess = Updater.createMemoryAccessInBB(
|
Introduce a basic MemorySSA updater, that supports insertDef,
insertUse, moveBefore and moveAfter operations.
Summary:
This creates a basic MemorySSA updater that handles arbitrary
insertion of uses and defs into MemorySSA, as well as arbitrary
movement around the CFG. It replaces the current splice API.
It can be made to handle arbitrary control flow changes.
Currently, it uses the same updater algorithm from D28934.
The main difference is because MemorySSA is single variable, we have
the complete def and use list, and don't need anyone to give it to us
as part of the API. We also have to rename stores below us in some
cases.
If we go that direction in that patch, i will merge all the updater
implementations (using an updater_traits or something to provide the
get* functions we use, called read*/write* in that patch).
Sadly, the current SSAUpdater algorithm is way too slow to use for
what we are doing here.
I have updated the tests we have to basically build memoryssa
incrementally using the updater api, and make sure it still comes out
the same.
Reviewers: george.burgess.iv
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29047
llvm-svn: 293356
2017-01-28 02:23:13 +01:00
|
|
|
EntryStore, nullptr, Entry, MemorySSA::Beginning);
|
|
|
|
Updater.insertDef(cast<MemoryDef>(EntryStoreAccess));
|
|
|
|
|
|
|
|
// Add the load
|
|
|
|
B.SetInsertPoint(Merge, Merge->begin());
|
2019-02-01 21:44:24 +01:00
|
|
|
LoadInst *FirstLoad = B.CreateLoad(B.getInt8Ty(), PointerArg);
|
Introduce a basic MemorySSA updater, that supports insertDef,
insertUse, moveBefore and moveAfter operations.
Summary:
This creates a basic MemorySSA updater that handles arbitrary
insertion of uses and defs into MemorySSA, as well as arbitrary
movement around the CFG. It replaces the current splice API.
It can be made to handle arbitrary control flow changes.
Currently, it uses the same updater algorithm from D28934.
The main difference is because MemorySSA is single variable, we have
the complete def and use list, and don't need anyone to give it to us
as part of the API. We also have to rename stores below us in some
cases.
If we go that direction in that patch, i will merge all the updater
implementations (using an updater_traits or something to provide the
get* functions we use, called read*/write* in that patch).
Sadly, the current SSAUpdater algorithm is way too slow to use for
what we are doing here.
I have updated the tests we have to basically build memoryssa
incrementally using the updater api, and make sure it still comes out
the same.
Reviewers: george.burgess.iv
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29047
llvm-svn: 293356
2017-01-28 02:23:13 +01:00
|
|
|
|
|
|
|
// MemoryPHI should not already exist.
|
|
|
|
MemoryPhi *MP = MSSA.getMemoryAccess(Merge);
|
|
|
|
EXPECT_EQ(MP, nullptr);
|
|
|
|
|
|
|
|
// Create the load memory access
|
2017-02-22 23:19:55 +01:00
|
|
|
MemoryUse *FirstLoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
|
Introduce a basic MemorySSA updater, that supports insertDef,
insertUse, moveBefore and moveAfter operations.
Summary:
This creates a basic MemorySSA updater that handles arbitrary
insertion of uses and defs into MemorySSA, as well as arbitrary
movement around the CFG. It replaces the current splice API.
It can be made to handle arbitrary control flow changes.
Currently, it uses the same updater algorithm from D28934.
The main difference is because MemorySSA is single variable, we have
the complete def and use list, and don't need anyone to give it to us
as part of the API. We also have to rename stores below us in some
cases.
If we go that direction in that patch, i will merge all the updater
implementations (using an updater_traits or something to provide the
get* functions we use, called read*/write* in that patch).
Sadly, the current SSAUpdater algorithm is way too slow to use for
what we are doing here.
I have updated the tests we have to basically build memoryssa
incrementally using the updater api, and make sure it still comes out
the same.
Reviewers: george.burgess.iv
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29047
llvm-svn: 293356
2017-01-28 02:23:13 +01:00
|
|
|
FirstLoad, nullptr, Merge, MemorySSA::Beginning));
|
|
|
|
Updater.insertUse(FirstLoadAccess);
|
|
|
|
// Should just have a load using the entry access, because it should discover
|
|
|
|
// the phi is trivial
|
|
|
|
EXPECT_EQ(FirstLoadAccess->getDefiningAccess(), EntryStoreAccess);
|
|
|
|
|
|
|
|
// Create a store on the left
|
|
|
|
// Add the store
|
|
|
|
B.SetInsertPoint(Left, Left->begin());
|
|
|
|
StoreInst *LeftStore = B.CreateStore(B.getInt8(16), PointerArg);
|
2017-02-22 23:19:55 +01:00
|
|
|
MemoryAccess *LeftStoreAccess = Updater.createMemoryAccessInBB(
|
Introduce a basic MemorySSA updater, that supports insertDef,
insertUse, moveBefore and moveAfter operations.
Summary:
This creates a basic MemorySSA updater that handles arbitrary
insertion of uses and defs into MemorySSA, as well as arbitrary
movement around the CFG. It replaces the current splice API.
It can be made to handle arbitrary control flow changes.
Currently, it uses the same updater algorithm from D28934.
The main difference is because MemorySSA is single variable, we have
the complete def and use list, and don't need anyone to give it to us
as part of the API. We also have to rename stores below us in some
cases.
If we go that direction in that patch, i will merge all the updater
implementations (using an updater_traits or something to provide the
get* functions we use, called read*/write* in that patch).
Sadly, the current SSAUpdater algorithm is way too slow to use for
what we are doing here.
I have updated the tests we have to basically build memoryssa
incrementally using the updater api, and make sure it still comes out
the same.
Reviewers: george.burgess.iv
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29047
llvm-svn: 293356
2017-01-28 02:23:13 +01:00
|
|
|
LeftStore, nullptr, Left, MemorySSA::Beginning);
|
2017-02-20 23:26:03 +01:00
|
|
|
Updater.insertDef(cast<MemoryDef>(LeftStoreAccess), false);
|
[MemorySSA] Make insertDef insert corresponding phi nodes.
Summary:
The original assumption for the insertDef method was that it would not
materialize Defs out of no-where, hence it will not insert phis needed
after inserting a Def.
However, when cloning an instruction (use case used in LICM), we do
materialize Defs "out of no-where". If the block receiving a Def has at
least one other Def, then no processing is needed. If the block just
received its first Def, we must check where Phi placement is needed.
The only new usage of insertDef is in LICM, hence the trigger for the bug.
But the original goal of the method also fails to apply for the move()
method. If we move a Def from the entry point of a diamond to either the
left or right blocks, then the merge block must add a phi.
While this usecase does not currently occur, or may be viewed as an
incorrect transformation, MSSA must behave corectly given the scenario.
Resolves PR40749 and PR40754.
Reviewers: george.burgess.iv
Subscribers: sanjoy, jlebar, Prazek, jdoerfert, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D58652
llvm-svn: 355040
2019-02-27 23:20:22 +01:00
|
|
|
|
|
|
|
// MemoryPHI should exist after adding LeftStore.
|
|
|
|
MP = MSSA.getMemoryAccess(Merge);
|
|
|
|
EXPECT_NE(MP, nullptr);
|
|
|
|
|
Introduce a basic MemorySSA updater, that supports insertDef,
insertUse, moveBefore and moveAfter operations.
Summary:
This creates a basic MemorySSA updater that handles arbitrary
insertion of uses and defs into MemorySSA, as well as arbitrary
movement around the CFG. It replaces the current splice API.
It can be made to handle arbitrary control flow changes.
Currently, it uses the same updater algorithm from D28934.
The main difference is because MemorySSA is single variable, we have
the complete def and use list, and don't need anyone to give it to us
as part of the API. We also have to rename stores below us in some
cases.
If we go that direction in that patch, i will merge all the updater
implementations (using an updater_traits or something to provide the
get* functions we use, called read*/write* in that patch).
Sadly, the current SSAUpdater algorithm is way too slow to use for
what we are doing here.
I have updated the tests we have to basically build memoryssa
incrementally using the updater api, and make sure it still comes out
the same.
Reviewers: george.burgess.iv
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29047
llvm-svn: 293356
2017-01-28 02:23:13 +01:00
|
|
|
// Add the second load
|
|
|
|
B.SetInsertPoint(Merge, Merge->begin());
|
2019-02-01 21:44:24 +01:00
|
|
|
LoadInst *SecondLoad = B.CreateLoad(B.getInt8Ty(), PointerArg);
|
Introduce a basic MemorySSA updater, that supports insertDef,
insertUse, moveBefore and moveAfter operations.
Summary:
This creates a basic MemorySSA updater that handles arbitrary
insertion of uses and defs into MemorySSA, as well as arbitrary
movement around the CFG. It replaces the current splice API.
It can be made to handle arbitrary control flow changes.
Currently, it uses the same updater algorithm from D28934.
The main difference is because MemorySSA is single variable, we have
the complete def and use list, and don't need anyone to give it to us
as part of the API. We also have to rename stores below us in some
cases.
If we go that direction in that patch, i will merge all the updater
implementations (using an updater_traits or something to provide the
get* functions we use, called read*/write* in that patch).
Sadly, the current SSAUpdater algorithm is way too slow to use for
what we are doing here.
I have updated the tests we have to basically build memoryssa
incrementally using the updater api, and make sure it still comes out
the same.
Reviewers: george.burgess.iv
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29047
llvm-svn: 293356
2017-01-28 02:23:13 +01:00
|
|
|
|
|
|
|
// Create the load memory access
|
2017-02-22 23:19:55 +01:00
|
|
|
MemoryUse *SecondLoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
|
Introduce a basic MemorySSA updater, that supports insertDef,
insertUse, moveBefore and moveAfter operations.
Summary:
This creates a basic MemorySSA updater that handles arbitrary
insertion of uses and defs into MemorySSA, as well as arbitrary
movement around the CFG. It replaces the current splice API.
It can be made to handle arbitrary control flow changes.
Currently, it uses the same updater algorithm from D28934.
The main difference is because MemorySSA is single variable, we have
the complete def and use list, and don't need anyone to give it to us
as part of the API. We also have to rename stores below us in some
cases.
If we go that direction in that patch, i will merge all the updater
implementations (using an updater_traits or something to provide the
get* functions we use, called read*/write* in that patch).
Sadly, the current SSAUpdater algorithm is way too slow to use for
what we are doing here.
I have updated the tests we have to basically build memoryssa
incrementally using the updater api, and make sure it still comes out
the same.
Reviewers: george.burgess.iv
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29047
llvm-svn: 293356
2017-01-28 02:23:13 +01:00
|
|
|
SecondLoad, nullptr, Merge, MemorySSA::Beginning));
|
|
|
|
Updater.insertUse(SecondLoadAccess);
|
|
|
|
// Now the load should be a phi of the entry store and the left store
|
|
|
|
MemoryPhi *MergePhi =
|
|
|
|
dyn_cast<MemoryPhi>(SecondLoadAccess->getDefiningAccess());
|
|
|
|
EXPECT_NE(MergePhi, nullptr);
|
|
|
|
EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
|
|
|
|
EXPECT_EQ(MergePhi->getIncomingValue(1), LeftStoreAccess);
|
|
|
|
// Now create a store below the existing one in the entry
|
|
|
|
B.SetInsertPoint(Entry, --Entry->end());
|
|
|
|
StoreInst *SecondEntryStore = B.CreateStore(B.getInt8(16), PointerArg);
|
2017-02-22 23:19:55 +01:00
|
|
|
MemoryAccess *SecondEntryStoreAccess = Updater.createMemoryAccessInBB(
|
Introduce a basic MemorySSA updater, that supports insertDef,
insertUse, moveBefore and moveAfter operations.
Summary:
This creates a basic MemorySSA updater that handles arbitrary
insertion of uses and defs into MemorySSA, as well as arbitrary
movement around the CFG. It replaces the current splice API.
It can be made to handle arbitrary control flow changes.
Currently, it uses the same updater algorithm from D28934.
The main difference is because MemorySSA is single variable, we have
the complete def and use list, and don't need anyone to give it to us
as part of the API. We also have to rename stores below us in some
cases.
If we go that direction in that patch, i will merge all the updater
implementations (using an updater_traits or something to provide the
get* functions we use, called read*/write* in that patch).
Sadly, the current SSAUpdater algorithm is way too slow to use for
what we are doing here.
I have updated the tests we have to basically build memoryssa
incrementally using the updater api, and make sure it still comes out
the same.
Reviewers: george.burgess.iv
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29047
llvm-svn: 293356
2017-01-28 02:23:13 +01:00
|
|
|
SecondEntryStore, nullptr, Entry, MemorySSA::End);
|
2017-02-20 23:26:03 +01:00
|
|
|
// Insert it twice just to test renaming
|
|
|
|
Updater.insertDef(cast<MemoryDef>(SecondEntryStoreAccess), false);
|
|
|
|
EXPECT_NE(FirstLoadAccess->getDefiningAccess(), MergePhi);
|
|
|
|
Updater.insertDef(cast<MemoryDef>(SecondEntryStoreAccess), true);
|
|
|
|
EXPECT_EQ(FirstLoadAccess->getDefiningAccess(), MergePhi);
|
Introduce a basic MemorySSA updater, that supports insertDef,
insertUse, moveBefore and moveAfter operations.
Summary:
This creates a basic MemorySSA updater that handles arbitrary
insertion of uses and defs into MemorySSA, as well as arbitrary
movement around the CFG. It replaces the current splice API.
It can be made to handle arbitrary control flow changes.
Currently, it uses the same updater algorithm from D28934.
The main difference is because MemorySSA is single variable, we have
the complete def and use list, and don't need anyone to give it to us
as part of the API. We also have to rename stores below us in some
cases.
If we go that direction in that patch, i will merge all the updater
implementations (using an updater_traits or something to provide the
get* functions we use, called read*/write* in that patch).
Sadly, the current SSAUpdater algorithm is way too slow to use for
what we are doing here.
I have updated the tests we have to basically build memoryssa
incrementally using the updater api, and make sure it still comes out
the same.
Reviewers: george.burgess.iv
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29047
llvm-svn: 293356
2017-01-28 02:23:13 +01:00
|
|
|
// and make sure the phi below it got updated, despite being blocks away
|
|
|
|
MergePhi = dyn_cast<MemoryPhi>(SecondLoadAccess->getDefiningAccess());
|
|
|
|
EXPECT_NE(MergePhi, nullptr);
|
|
|
|
EXPECT_EQ(MergePhi->getIncomingValue(0), SecondEntryStoreAccess);
|
|
|
|
EXPECT_EQ(MergePhi->getIncomingValue(1), LeftStoreAccess);
|
|
|
|
MSSA.verifyMemorySSA();
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(MemorySSATest, CreateALoadUpdater) {
|
|
|
|
// We create a diamond, then build memoryssa with no memory accesses, and
|
|
|
|
// incrementally update it by inserting a store in one of the branches, and a
|
|
|
|
// load in the merge point
|
|
|
|
F = Function::Create(
|
|
|
|
FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
|
|
|
|
GlobalValue::ExternalLinkage, "F", &M);
|
|
|
|
BasicBlock *Entry(BasicBlock::Create(C, "", F));
|
|
|
|
BasicBlock *Left(BasicBlock::Create(C, "", F));
|
|
|
|
BasicBlock *Right(BasicBlock::Create(C, "", F));
|
|
|
|
BasicBlock *Merge(BasicBlock::Create(C, "", F));
|
|
|
|
B.SetInsertPoint(Entry);
|
|
|
|
B.CreateCondBr(B.getTrue(), Left, Right);
|
|
|
|
B.SetInsertPoint(Left, Left->begin());
|
|
|
|
Argument *PointerArg = &*F->arg_begin();
|
|
|
|
B.SetInsertPoint(Left);
|
|
|
|
B.CreateBr(Merge);
|
|
|
|
B.SetInsertPoint(Right);
|
|
|
|
B.CreateBr(Merge);
|
|
|
|
|
|
|
|
setupAnalyses();
|
|
|
|
MemorySSA &MSSA = *Analyses->MSSA;
|
|
|
|
MemorySSAUpdater Updater(&MSSA);
|
|
|
|
B.SetInsertPoint(Left, Left->begin());
|
|
|
|
// Add the store
|
|
|
|
StoreInst *SI = B.CreateStore(B.getInt8(16), PointerArg);
|
|
|
|
MemoryAccess *StoreAccess =
|
2017-02-22 23:19:55 +01:00
|
|
|
Updater.createMemoryAccessInBB(SI, nullptr, Left, MemorySSA::Beginning);
|
Introduce a basic MemorySSA updater, that supports insertDef,
insertUse, moveBefore and moveAfter operations.
Summary:
This creates a basic MemorySSA updater that handles arbitrary
insertion of uses and defs into MemorySSA, as well as arbitrary
movement around the CFG. It replaces the current splice API.
It can be made to handle arbitrary control flow changes.
Currently, it uses the same updater algorithm from D28934.
The main difference is because MemorySSA is single variable, we have
the complete def and use list, and don't need anyone to give it to us
as part of the API. We also have to rename stores below us in some
cases.
If we go that direction in that patch, i will merge all the updater
implementations (using an updater_traits or something to provide the
get* functions we use, called read*/write* in that patch).
Sadly, the current SSAUpdater algorithm is way too slow to use for
what we are doing here.
I have updated the tests we have to basically build memoryssa
incrementally using the updater api, and make sure it still comes out
the same.
Reviewers: george.burgess.iv
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29047
llvm-svn: 293356
2017-01-28 02:23:13 +01:00
|
|
|
Updater.insertDef(cast<MemoryDef>(StoreAccess));
|
|
|
|
|
[MemorySSA] Make insertDef insert corresponding phi nodes.
Summary:
The original assumption for the insertDef method was that it would not
materialize Defs out of no-where, hence it will not insert phis needed
after inserting a Def.
However, when cloning an instruction (use case used in LICM), we do
materialize Defs "out of no-where". If the block receiving a Def has at
least one other Def, then no processing is needed. If the block just
received its first Def, we must check where Phi placement is needed.
The only new usage of insertDef is in LICM, hence the trigger for the bug.
But the original goal of the method also fails to apply for the move()
method. If we move a Def from the entry point of a diamond to either the
left or right blocks, then the merge block must add a phi.
While this usecase does not currently occur, or may be viewed as an
incorrect transformation, MSSA must behave corectly given the scenario.
Resolves PR40749 and PR40754.
Reviewers: george.burgess.iv
Subscribers: sanjoy, jlebar, Prazek, jdoerfert, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D58652
llvm-svn: 355040
2019-02-27 23:20:22 +01:00
|
|
|
// MemoryPHI should be created when inserting the def
|
|
|
|
MemoryPhi *MP = MSSA.getMemoryAccess(Merge);
|
|
|
|
EXPECT_NE(MP, nullptr);
|
|
|
|
|
Introduce a basic MemorySSA updater, that supports insertDef,
insertUse, moveBefore and moveAfter operations.
Summary:
This creates a basic MemorySSA updater that handles arbitrary
insertion of uses and defs into MemorySSA, as well as arbitrary
movement around the CFG. It replaces the current splice API.
It can be made to handle arbitrary control flow changes.
Currently, it uses the same updater algorithm from D28934.
The main difference is because MemorySSA is single variable, we have
the complete def and use list, and don't need anyone to give it to us
as part of the API. We also have to rename stores below us in some
cases.
If we go that direction in that patch, i will merge all the updater
implementations (using an updater_traits or something to provide the
get* functions we use, called read*/write* in that patch).
Sadly, the current SSAUpdater algorithm is way too slow to use for
what we are doing here.
I have updated the tests we have to basically build memoryssa
incrementally using the updater api, and make sure it still comes out
the same.
Reviewers: george.burgess.iv
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29047
llvm-svn: 293356
2017-01-28 02:23:13 +01:00
|
|
|
// Add the load
|
|
|
|
B.SetInsertPoint(Merge, Merge->begin());
|
2019-02-01 21:44:24 +01:00
|
|
|
LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), PointerArg);
|
Introduce a basic MemorySSA updater, that supports insertDef,
insertUse, moveBefore and moveAfter operations.
Summary:
This creates a basic MemorySSA updater that handles arbitrary
insertion of uses and defs into MemorySSA, as well as arbitrary
movement around the CFG. It replaces the current splice API.
It can be made to handle arbitrary control flow changes.
Currently, it uses the same updater algorithm from D28934.
The main difference is because MemorySSA is single variable, we have
the complete def and use list, and don't need anyone to give it to us
as part of the API. We also have to rename stores below us in some
cases.
If we go that direction in that patch, i will merge all the updater
implementations (using an updater_traits or something to provide the
get* functions we use, called read*/write* in that patch).
Sadly, the current SSAUpdater algorithm is way too slow to use for
what we are doing here.
I have updated the tests we have to basically build memoryssa
incrementally using the updater api, and make sure it still comes out
the same.
Reviewers: george.burgess.iv
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29047
llvm-svn: 293356
2017-01-28 02:23:13 +01:00
|
|
|
|
|
|
|
// Create the load memory acccess
|
2017-02-22 23:19:55 +01:00
|
|
|
MemoryUse *LoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
|
Introduce a basic MemorySSA updater, that supports insertDef,
insertUse, moveBefore and moveAfter operations.
Summary:
This creates a basic MemorySSA updater that handles arbitrary
insertion of uses and defs into MemorySSA, as well as arbitrary
movement around the CFG. It replaces the current splice API.
It can be made to handle arbitrary control flow changes.
Currently, it uses the same updater algorithm from D28934.
The main difference is because MemorySSA is single variable, we have
the complete def and use list, and don't need anyone to give it to us
as part of the API. We also have to rename stores below us in some
cases.
If we go that direction in that patch, i will merge all the updater
implementations (using an updater_traits or something to provide the
get* functions we use, called read*/write* in that patch).
Sadly, the current SSAUpdater algorithm is way too slow to use for
what we are doing here.
I have updated the tests we have to basically build memoryssa
incrementally using the updater api, and make sure it still comes out
the same.
Reviewers: george.burgess.iv
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29047
llvm-svn: 293356
2017-01-28 02:23:13 +01:00
|
|
|
LoadInst, nullptr, Merge, MemorySSA::Beginning));
|
|
|
|
Updater.insertUse(LoadAccess);
|
|
|
|
MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess();
|
|
|
|
EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess));
|
|
|
|
MSSA.verifyMemorySSA();
|
|
|
|
}
|
2016-06-21 20:39:20 +02:00
|
|
|
|
2017-06-07 18:46:53 +02:00
|
|
|
TEST_F(MemorySSATest, SinkLoad) {
|
|
|
|
F = Function::Create(
|
|
|
|
FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
|
|
|
|
GlobalValue::ExternalLinkage, "F", &M);
|
|
|
|
BasicBlock *Entry(BasicBlock::Create(C, "", F));
|
|
|
|
BasicBlock *Left(BasicBlock::Create(C, "", F));
|
|
|
|
BasicBlock *Right(BasicBlock::Create(C, "", F));
|
|
|
|
BasicBlock *Merge(BasicBlock::Create(C, "", F));
|
|
|
|
B.SetInsertPoint(Entry);
|
|
|
|
B.CreateCondBr(B.getTrue(), Left, Right);
|
|
|
|
B.SetInsertPoint(Left, Left->begin());
|
|
|
|
Argument *PointerArg = &*F->arg_begin();
|
|
|
|
B.SetInsertPoint(Left);
|
|
|
|
B.CreateBr(Merge);
|
|
|
|
B.SetInsertPoint(Right);
|
|
|
|
B.CreateBr(Merge);
|
|
|
|
|
|
|
|
// Load in left block
|
|
|
|
B.SetInsertPoint(Left, Left->begin());
|
2019-02-01 21:44:24 +01:00
|
|
|
LoadInst *LoadInst1 = B.CreateLoad(B.getInt8Ty(), PointerArg);
|
2017-06-07 18:46:53 +02:00
|
|
|
// Store in merge block
|
|
|
|
B.SetInsertPoint(Merge, Merge->begin());
|
|
|
|
B.CreateStore(B.getInt8(16), PointerArg);
|
|
|
|
|
|
|
|
setupAnalyses();
|
|
|
|
MemorySSA &MSSA = *Analyses->MSSA;
|
|
|
|
MemorySSAUpdater Updater(&MSSA);
|
|
|
|
|
|
|
|
// Mimic sinking of a load:
|
|
|
|
// - clone load
|
|
|
|
// - insert in "exit" block
|
|
|
|
// - insert in mssa
|
|
|
|
// - remove from original block
|
|
|
|
|
|
|
|
LoadInst *LoadInstClone = cast<LoadInst>(LoadInst1->clone());
|
|
|
|
Merge->getInstList().insert(Merge->begin(), LoadInstClone);
|
|
|
|
MemoryAccess * NewLoadAccess =
|
|
|
|
Updater.createMemoryAccessInBB(LoadInstClone, nullptr,
|
|
|
|
LoadInstClone->getParent(),
|
|
|
|
MemorySSA::Beginning);
|
|
|
|
Updater.insertUse(cast<MemoryUse>(NewLoadAccess));
|
|
|
|
MSSA.verifyMemorySSA();
|
|
|
|
Updater.removeMemoryAccess(MSSA.getMemoryAccess(LoadInst1));
|
|
|
|
MSSA.verifyMemorySSA();
|
|
|
|
}
|
|
|
|
|
2016-07-31 23:08:20 +02:00
|
|
|
TEST_F(MemorySSATest, MoveAStore) {
|
|
|
|
// We create a diamond where there is a in the entry, a store on one side, and
|
|
|
|
// a load at the end. After building MemorySSA, we test updating by moving
|
Introduce a basic MemorySSA updater, that supports insertDef,
insertUse, moveBefore and moveAfter operations.
Summary:
This creates a basic MemorySSA updater that handles arbitrary
insertion of uses and defs into MemorySSA, as well as arbitrary
movement around the CFG. It replaces the current splice API.
It can be made to handle arbitrary control flow changes.
Currently, it uses the same updater algorithm from D28934.
The main difference is because MemorySSA is single variable, we have
the complete def and use list, and don't need anyone to give it to us
as part of the API. We also have to rename stores below us in some
cases.
If we go that direction in that patch, i will merge all the updater
implementations (using an updater_traits or something to provide the
get* functions we use, called read*/write* in that patch).
Sadly, the current SSAUpdater algorithm is way too slow to use for
what we are doing here.
I have updated the tests we have to basically build memoryssa
incrementally using the updater api, and make sure it still comes out
the same.
Reviewers: george.burgess.iv
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29047
llvm-svn: 293356
2017-01-28 02:23:13 +01:00
|
|
|
// the store from the side block to the entry block. This destroys the old
|
|
|
|
// access.
|
2016-07-31 23:08:20 +02:00
|
|
|
F = Function::Create(
|
|
|
|
FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
|
|
|
|
GlobalValue::ExternalLinkage, "F", &M);
|
|
|
|
BasicBlock *Entry(BasicBlock::Create(C, "", F));
|
|
|
|
BasicBlock *Left(BasicBlock::Create(C, "", F));
|
|
|
|
BasicBlock *Right(BasicBlock::Create(C, "", F));
|
|
|
|
BasicBlock *Merge(BasicBlock::Create(C, "", F));
|
|
|
|
B.SetInsertPoint(Entry);
|
|
|
|
Argument *PointerArg = &*F->arg_begin();
|
|
|
|
StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
|
|
|
|
B.CreateCondBr(B.getTrue(), Left, Right);
|
|
|
|
B.SetInsertPoint(Left);
|
|
|
|
StoreInst *SideStore = B.CreateStore(B.getInt8(16), PointerArg);
|
|
|
|
BranchInst::Create(Merge, Left);
|
|
|
|
BranchInst::Create(Merge, Right);
|
|
|
|
B.SetInsertPoint(Merge);
|
2019-02-01 21:44:24 +01:00
|
|
|
B.CreateLoad(B.getInt8Ty(), PointerArg);
|
2016-07-31 23:08:20 +02:00
|
|
|
setupAnalyses();
|
2016-10-04 01:12:35 +02:00
|
|
|
MemorySSA &MSSA = *Analyses->MSSA;
|
2017-02-22 23:19:55 +01:00
|
|
|
MemorySSAUpdater Updater(&MSSA);
|
2016-07-31 23:08:20 +02:00
|
|
|
// Move the store
|
|
|
|
SideStore->moveBefore(Entry->getTerminator());
|
|
|
|
MemoryAccess *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore);
|
|
|
|
MemoryAccess *SideStoreAccess = MSSA.getMemoryAccess(SideStore);
|
2017-02-22 23:19:55 +01:00
|
|
|
MemoryAccess *NewStoreAccess = Updater.createMemoryAccessAfter(
|
2016-08-03 21:59:11 +02:00
|
|
|
SideStore, EntryStoreAccess, EntryStoreAccess);
|
2016-07-31 23:08:20 +02:00
|
|
|
EntryStoreAccess->replaceAllUsesWith(NewStoreAccess);
|
2017-02-22 23:19:55 +01:00
|
|
|
Updater.removeMemoryAccess(SideStoreAccess);
|
2016-07-31 23:08:20 +02:00
|
|
|
MSSA.verifyMemorySSA();
|
|
|
|
}
|
|
|
|
|
Introduce a basic MemorySSA updater, that supports insertDef,
insertUse, moveBefore and moveAfter operations.
Summary:
This creates a basic MemorySSA updater that handles arbitrary
insertion of uses and defs into MemorySSA, as well as arbitrary
movement around the CFG. It replaces the current splice API.
It can be made to handle arbitrary control flow changes.
Currently, it uses the same updater algorithm from D28934.
The main difference is because MemorySSA is single variable, we have
the complete def and use list, and don't need anyone to give it to us
as part of the API. We also have to rename stores below us in some
cases.
If we go that direction in that patch, i will merge all the updater
implementations (using an updater_traits or something to provide the
get* functions we use, called read*/write* in that patch).
Sadly, the current SSAUpdater algorithm is way too slow to use for
what we are doing here.
I have updated the tests we have to basically build memoryssa
incrementally using the updater api, and make sure it still comes out
the same.
Reviewers: george.burgess.iv
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29047
llvm-svn: 293356
2017-01-28 02:23:13 +01:00
|
|
|
TEST_F(MemorySSATest, MoveAStoreUpdater) {
|
|
|
|
// We create a diamond where there is a in the entry, a store on one side, and
|
|
|
|
// a load at the end. After building MemorySSA, we test updating by moving
|
|
|
|
// the store from the side block to the entry block. This destroys the old
|
|
|
|
// access.
|
|
|
|
F = Function::Create(
|
|
|
|
FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
|
|
|
|
GlobalValue::ExternalLinkage, "F", &M);
|
|
|
|
BasicBlock *Entry(BasicBlock::Create(C, "", F));
|
|
|
|
BasicBlock *Left(BasicBlock::Create(C, "", F));
|
|
|
|
BasicBlock *Right(BasicBlock::Create(C, "", F));
|
|
|
|
BasicBlock *Merge(BasicBlock::Create(C, "", F));
|
|
|
|
B.SetInsertPoint(Entry);
|
|
|
|
Argument *PointerArg = &*F->arg_begin();
|
|
|
|
StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
|
|
|
|
B.CreateCondBr(B.getTrue(), Left, Right);
|
|
|
|
B.SetInsertPoint(Left);
|
|
|
|
auto *SideStore = B.CreateStore(B.getInt8(16), PointerArg);
|
|
|
|
BranchInst::Create(Merge, Left);
|
|
|
|
BranchInst::Create(Merge, Right);
|
|
|
|
B.SetInsertPoint(Merge);
|
2019-02-01 21:44:24 +01:00
|
|
|
auto *MergeLoad = B.CreateLoad(B.getInt8Ty(), PointerArg);
|
Introduce a basic MemorySSA updater, that supports insertDef,
insertUse, moveBefore and moveAfter operations.
Summary:
This creates a basic MemorySSA updater that handles arbitrary
insertion of uses and defs into MemorySSA, as well as arbitrary
movement around the CFG. It replaces the current splice API.
It can be made to handle arbitrary control flow changes.
Currently, it uses the same updater algorithm from D28934.
The main difference is because MemorySSA is single variable, we have
the complete def and use list, and don't need anyone to give it to us
as part of the API. We also have to rename stores below us in some
cases.
If we go that direction in that patch, i will merge all the updater
implementations (using an updater_traits or something to provide the
get* functions we use, called read*/write* in that patch).
Sadly, the current SSAUpdater algorithm is way too slow to use for
what we are doing here.
I have updated the tests we have to basically build memoryssa
incrementally using the updater api, and make sure it still comes out
the same.
Reviewers: george.burgess.iv
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29047
llvm-svn: 293356
2017-01-28 02:23:13 +01:00
|
|
|
setupAnalyses();
|
|
|
|
MemorySSA &MSSA = *Analyses->MSSA;
|
|
|
|
MemorySSAUpdater Updater(&MSSA);
|
|
|
|
|
|
|
|
// Move the store
|
|
|
|
SideStore->moveBefore(Entry->getTerminator());
|
|
|
|
auto *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore);
|
|
|
|
auto *SideStoreAccess = MSSA.getMemoryAccess(SideStore);
|
2017-02-22 23:19:55 +01:00
|
|
|
auto *NewStoreAccess = Updater.createMemoryAccessAfter(
|
Introduce a basic MemorySSA updater, that supports insertDef,
insertUse, moveBefore and moveAfter operations.
Summary:
This creates a basic MemorySSA updater that handles arbitrary
insertion of uses and defs into MemorySSA, as well as arbitrary
movement around the CFG. It replaces the current splice API.
It can be made to handle arbitrary control flow changes.
Currently, it uses the same updater algorithm from D28934.
The main difference is because MemorySSA is single variable, we have
the complete def and use list, and don't need anyone to give it to us
as part of the API. We also have to rename stores below us in some
cases.
If we go that direction in that patch, i will merge all the updater
implementations (using an updater_traits or something to provide the
get* functions we use, called read*/write* in that patch).
Sadly, the current SSAUpdater algorithm is way too slow to use for
what we are doing here.
I have updated the tests we have to basically build memoryssa
incrementally using the updater api, and make sure it still comes out
the same.
Reviewers: george.burgess.iv
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29047
llvm-svn: 293356
2017-01-28 02:23:13 +01:00
|
|
|
SideStore, EntryStoreAccess, EntryStoreAccess);
|
|
|
|
// Before, the load will point to a phi of the EntryStore and SideStore.
|
|
|
|
auto *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(MergeLoad));
|
|
|
|
EXPECT_TRUE(isa<MemoryPhi>(LoadAccess->getDefiningAccess()));
|
|
|
|
MemoryPhi *MergePhi = cast<MemoryPhi>(LoadAccess->getDefiningAccess());
|
|
|
|
EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
|
|
|
|
EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess);
|
2017-02-22 23:19:55 +01:00
|
|
|
Updater.removeMemoryAccess(SideStoreAccess);
|
Introduce a basic MemorySSA updater, that supports insertDef,
insertUse, moveBefore and moveAfter operations.
Summary:
This creates a basic MemorySSA updater that handles arbitrary
insertion of uses and defs into MemorySSA, as well as arbitrary
movement around the CFG. It replaces the current splice API.
It can be made to handle arbitrary control flow changes.
Currently, it uses the same updater algorithm from D28934.
The main difference is because MemorySSA is single variable, we have
the complete def and use list, and don't need anyone to give it to us
as part of the API. We also have to rename stores below us in some
cases.
If we go that direction in that patch, i will merge all the updater
implementations (using an updater_traits or something to provide the
get* functions we use, called read*/write* in that patch).
Sadly, the current SSAUpdater algorithm is way too slow to use for
what we are doing here.
I have updated the tests we have to basically build memoryssa
incrementally using the updater api, and make sure it still comes out
the same.
Reviewers: george.burgess.iv
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29047
llvm-svn: 293356
2017-01-28 02:23:13 +01:00
|
|
|
Updater.insertDef(cast<MemoryDef>(NewStoreAccess));
|
|
|
|
// After it's a phi of the new side store access.
|
|
|
|
EXPECT_EQ(MergePhi->getIncomingValue(0), NewStoreAccess);
|
|
|
|
EXPECT_EQ(MergePhi->getIncomingValue(1), NewStoreAccess);
|
|
|
|
MSSA.verifyMemorySSA();
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(MemorySSATest, MoveAStoreUpdaterMove) {
|
|
|
|
// We create a diamond where there is a in the entry, a store on one side, and
|
|
|
|
// a load at the end. After building MemorySSA, we test updating by moving
|
|
|
|
// the store from the side block to the entry block. This does not destroy
|
|
|
|
// the old access.
|
|
|
|
F = Function::Create(
|
|
|
|
FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
|
|
|
|
GlobalValue::ExternalLinkage, "F", &M);
|
|
|
|
BasicBlock *Entry(BasicBlock::Create(C, "", F));
|
|
|
|
BasicBlock *Left(BasicBlock::Create(C, "", F));
|
|
|
|
BasicBlock *Right(BasicBlock::Create(C, "", F));
|
|
|
|
BasicBlock *Merge(BasicBlock::Create(C, "", F));
|
|
|
|
B.SetInsertPoint(Entry);
|
|
|
|
Argument *PointerArg = &*F->arg_begin();
|
|
|
|
StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
|
|
|
|
B.CreateCondBr(B.getTrue(), Left, Right);
|
|
|
|
B.SetInsertPoint(Left);
|
|
|
|
auto *SideStore = B.CreateStore(B.getInt8(16), PointerArg);
|
|
|
|
BranchInst::Create(Merge, Left);
|
|
|
|
BranchInst::Create(Merge, Right);
|
|
|
|
B.SetInsertPoint(Merge);
|
2019-02-01 21:44:24 +01:00
|
|
|
auto *MergeLoad = B.CreateLoad(B.getInt8Ty(), PointerArg);
|
Introduce a basic MemorySSA updater, that supports insertDef,
insertUse, moveBefore and moveAfter operations.
Summary:
This creates a basic MemorySSA updater that handles arbitrary
insertion of uses and defs into MemorySSA, as well as arbitrary
movement around the CFG. It replaces the current splice API.
It can be made to handle arbitrary control flow changes.
Currently, it uses the same updater algorithm from D28934.
The main difference is because MemorySSA is single variable, we have
the complete def and use list, and don't need anyone to give it to us
as part of the API. We also have to rename stores below us in some
cases.
If we go that direction in that patch, i will merge all the updater
implementations (using an updater_traits or something to provide the
get* functions we use, called read*/write* in that patch).
Sadly, the current SSAUpdater algorithm is way too slow to use for
what we are doing here.
I have updated the tests we have to basically build memoryssa
incrementally using the updater api, and make sure it still comes out
the same.
Reviewers: george.burgess.iv
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29047
llvm-svn: 293356
2017-01-28 02:23:13 +01:00
|
|
|
setupAnalyses();
|
|
|
|
MemorySSA &MSSA = *Analyses->MSSA;
|
|
|
|
MemorySSAUpdater Updater(&MSSA);
|
|
|
|
|
|
|
|
// Move the store
|
|
|
|
auto *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore);
|
|
|
|
auto *SideStoreAccess = MSSA.getMemoryAccess(SideStore);
|
|
|
|
// Before, the load will point to a phi of the EntryStore and SideStore.
|
|
|
|
auto *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(MergeLoad));
|
|
|
|
EXPECT_TRUE(isa<MemoryPhi>(LoadAccess->getDefiningAccess()));
|
|
|
|
MemoryPhi *MergePhi = cast<MemoryPhi>(LoadAccess->getDefiningAccess());
|
|
|
|
EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
|
|
|
|
EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess);
|
|
|
|
SideStore->moveBefore(*EntryStore->getParent(), ++EntryStore->getIterator());
|
|
|
|
Updater.moveAfter(SideStoreAccess, EntryStoreAccess);
|
|
|
|
// After, it's a phi of the side store.
|
|
|
|
EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess);
|
|
|
|
EXPECT_EQ(MergePhi->getIncomingValue(1), SideStoreAccess);
|
|
|
|
|
|
|
|
MSSA.verifyMemorySSA();
|
|
|
|
}
|
|
|
|
|
2017-01-30 12:35:39 +01:00
|
|
|
TEST_F(MemorySSATest, MoveAStoreAllAround) {
|
|
|
|
// We create a diamond where there is a in the entry, a store on one side, and
|
|
|
|
// a load at the end. After building MemorySSA, we test updating by moving
|
|
|
|
// the store from the side block to the entry block, then to the other side
|
|
|
|
// block, then to before the load. This does not destroy the old access.
|
|
|
|
F = Function::Create(
|
|
|
|
FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
|
|
|
|
GlobalValue::ExternalLinkage, "F", &M);
|
|
|
|
BasicBlock *Entry(BasicBlock::Create(C, "", F));
|
|
|
|
BasicBlock *Left(BasicBlock::Create(C, "", F));
|
|
|
|
BasicBlock *Right(BasicBlock::Create(C, "", F));
|
|
|
|
BasicBlock *Merge(BasicBlock::Create(C, "", F));
|
|
|
|
B.SetInsertPoint(Entry);
|
|
|
|
Argument *PointerArg = &*F->arg_begin();
|
|
|
|
StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
|
|
|
|
B.CreateCondBr(B.getTrue(), Left, Right);
|
|
|
|
B.SetInsertPoint(Left);
|
|
|
|
auto *SideStore = B.CreateStore(B.getInt8(16), PointerArg);
|
|
|
|
BranchInst::Create(Merge, Left);
|
|
|
|
BranchInst::Create(Merge, Right);
|
|
|
|
B.SetInsertPoint(Merge);
|
2019-02-01 21:44:24 +01:00
|
|
|
auto *MergeLoad = B.CreateLoad(B.getInt8Ty(), PointerArg);
|
2017-01-30 12:35:39 +01:00
|
|
|
setupAnalyses();
|
|
|
|
MemorySSA &MSSA = *Analyses->MSSA;
|
|
|
|
MemorySSAUpdater Updater(&MSSA);
|
|
|
|
|
|
|
|
// Move the store
|
|
|
|
auto *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore);
|
|
|
|
auto *SideStoreAccess = MSSA.getMemoryAccess(SideStore);
|
|
|
|
// Before, the load will point to a phi of the EntryStore and SideStore.
|
|
|
|
auto *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(MergeLoad));
|
|
|
|
EXPECT_TRUE(isa<MemoryPhi>(LoadAccess->getDefiningAccess()));
|
|
|
|
MemoryPhi *MergePhi = cast<MemoryPhi>(LoadAccess->getDefiningAccess());
|
|
|
|
EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
|
|
|
|
EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess);
|
|
|
|
// Move the store before the entry store
|
|
|
|
SideStore->moveBefore(*EntryStore->getParent(), EntryStore->getIterator());
|
|
|
|
Updater.moveBefore(SideStoreAccess, EntryStoreAccess);
|
|
|
|
// After, it's a phi of the entry store.
|
|
|
|
EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
|
|
|
|
EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
|
|
|
|
MSSA.verifyMemorySSA();
|
|
|
|
// Now move the store to the right branch
|
|
|
|
SideStore->moveBefore(*Right, Right->begin());
|
|
|
|
Updater.moveToPlace(SideStoreAccess, Right, MemorySSA::Beginning);
|
|
|
|
MSSA.verifyMemorySSA();
|
|
|
|
EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
|
|
|
|
EXPECT_EQ(MergePhi->getIncomingValue(1), SideStoreAccess);
|
|
|
|
// Now move it before the load
|
|
|
|
SideStore->moveBefore(MergeLoad);
|
|
|
|
Updater.moveBefore(SideStoreAccess, LoadAccess);
|
|
|
|
EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
|
|
|
|
EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
|
|
|
|
MSSA.verifyMemorySSA();
|
|
|
|
}
|
|
|
|
|
2016-06-21 20:39:20 +02:00
|
|
|
TEST_F(MemorySSATest, RemoveAPhi) {
|
|
|
|
// We create a diamond where there is a store on one side, and then a load
|
|
|
|
// after the merge point. This enables us to test a bunch of different
|
|
|
|
// removal cases.
|
|
|
|
F = Function::Create(
|
|
|
|
FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
|
|
|
|
GlobalValue::ExternalLinkage, "F", &M);
|
|
|
|
BasicBlock *Entry(BasicBlock::Create(C, "", F));
|
|
|
|
BasicBlock *Left(BasicBlock::Create(C, "", F));
|
|
|
|
BasicBlock *Right(BasicBlock::Create(C, "", F));
|
|
|
|
BasicBlock *Merge(BasicBlock::Create(C, "", F));
|
|
|
|
B.SetInsertPoint(Entry);
|
|
|
|
B.CreateCondBr(B.getTrue(), Left, Right);
|
|
|
|
B.SetInsertPoint(Left);
|
|
|
|
Argument *PointerArg = &*F->arg_begin();
|
|
|
|
StoreInst *StoreInst = B.CreateStore(B.getInt8(16), PointerArg);
|
|
|
|
BranchInst::Create(Merge, Left);
|
|
|
|
BranchInst::Create(Merge, Right);
|
|
|
|
B.SetInsertPoint(Merge);
|
2019-02-01 21:44:24 +01:00
|
|
|
LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), PointerArg);
|
2016-06-21 20:39:20 +02:00
|
|
|
|
|
|
|
setupAnalyses();
|
2016-10-04 01:12:35 +02:00
|
|
|
MemorySSA &MSSA = *Analyses->MSSA;
|
2017-02-22 23:19:55 +01:00
|
|
|
MemorySSAUpdater Updater(&MSSA);
|
|
|
|
|
2016-06-21 20:39:20 +02:00
|
|
|
// Before, the load will be a use of a phi<store, liveonentry>.
|
|
|
|
MemoryUse *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(LoadInst));
|
|
|
|
MemoryDef *StoreAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreInst));
|
|
|
|
MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess();
|
|
|
|
EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess));
|
|
|
|
// Kill the store
|
2017-02-22 23:19:55 +01:00
|
|
|
Updater.removeMemoryAccess(StoreAccess);
|
2016-06-21 20:39:20 +02:00
|
|
|
MemoryPhi *MP = cast<MemoryPhi>(DefiningAccess);
|
|
|
|
// Verify the phi ended up as liveonentry, liveonentry
|
|
|
|
for (auto &Op : MP->incoming_values())
|
|
|
|
EXPECT_TRUE(MSSA.isLiveOnEntryDef(cast<MemoryAccess>(Op.get())));
|
|
|
|
// Replace the phi uses with the live on entry def
|
|
|
|
MP->replaceAllUsesWith(MSSA.getLiveOnEntryDef());
|
|
|
|
// Verify the load is now defined by liveOnEntryDef
|
|
|
|
EXPECT_TRUE(MSSA.isLiveOnEntryDef(LoadAccess->getDefiningAccess()));
|
|
|
|
// Remove the PHI
|
2017-02-22 23:19:55 +01:00
|
|
|
Updater.removeMemoryAccess(MP);
|
2016-06-21 20:39:20 +02:00
|
|
|
MSSA.verifyMemorySSA();
|
|
|
|
}
|
|
|
|
|
2016-04-29 20:42:55 +02:00
|
|
|
TEST_F(MemorySSATest, RemoveMemoryAccess) {
|
2016-03-01 19:46:54 +01:00
|
|
|
// We create a diamond where there is a store on one side, and then a load
|
|
|
|
// after the merge point. This enables us to test a bunch of different
|
|
|
|
// removal cases.
|
2016-04-29 20:42:55 +02:00
|
|
|
F = Function::Create(
|
2016-03-01 19:46:54 +01:00
|
|
|
FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
|
2016-04-29 20:42:55 +02:00
|
|
|
GlobalValue::ExternalLinkage, "F", &M);
|
2016-03-02 22:16:28 +01:00
|
|
|
BasicBlock *Entry(BasicBlock::Create(C, "", F));
|
|
|
|
BasicBlock *Left(BasicBlock::Create(C, "", F));
|
|
|
|
BasicBlock *Right(BasicBlock::Create(C, "", F));
|
|
|
|
BasicBlock *Merge(BasicBlock::Create(C, "", F));
|
2016-03-01 19:46:54 +01:00
|
|
|
B.SetInsertPoint(Entry);
|
|
|
|
B.CreateCondBr(B.getTrue(), Left, Right);
|
|
|
|
B.SetInsertPoint(Left);
|
|
|
|
Argument *PointerArg = &*F->arg_begin();
|
|
|
|
StoreInst *StoreInst = B.CreateStore(B.getInt8(16), PointerArg);
|
|
|
|
BranchInst::Create(Merge, Left);
|
|
|
|
BranchInst::Create(Merge, Right);
|
|
|
|
B.SetInsertPoint(Merge);
|
2019-02-01 21:44:24 +01:00
|
|
|
LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), PointerArg);
|
2016-03-01 19:46:54 +01:00
|
|
|
|
2016-04-29 20:42:55 +02:00
|
|
|
setupAnalyses();
|
2016-10-04 01:12:35 +02:00
|
|
|
MemorySSA &MSSA = *Analyses->MSSA;
|
2016-06-20 21:13:07 +02:00
|
|
|
MemorySSAWalker *Walker = Analyses->Walker;
|
2017-02-22 23:19:55 +01:00
|
|
|
MemorySSAUpdater Updater(&MSSA);
|
2016-04-29 20:42:55 +02:00
|
|
|
|
2016-03-01 19:46:54 +01:00
|
|
|
// Before, the load will be a use of a phi<store, liveonentry>. It should be
|
|
|
|
// the same after.
|
2016-04-29 20:42:55 +02:00
|
|
|
MemoryUse *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(LoadInst));
|
|
|
|
MemoryDef *StoreAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreInst));
|
2016-03-01 19:46:54 +01:00
|
|
|
MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess();
|
|
|
|
EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess));
|
|
|
|
// The load is currently clobbered by one of the phi arguments, so the walker
|
|
|
|
// should determine the clobbering access as the phi.
|
|
|
|
EXPECT_EQ(DefiningAccess, Walker->getClobberingMemoryAccess(LoadInst));
|
2017-02-22 23:19:55 +01:00
|
|
|
Updater.removeMemoryAccess(StoreAccess);
|
2016-04-29 20:42:55 +02:00
|
|
|
MSSA.verifyMemorySSA();
|
2016-03-01 19:46:54 +01:00
|
|
|
// After the removeaccess, let's see if we got the right accesses
|
|
|
|
// The load should still point to the phi ...
|
|
|
|
EXPECT_EQ(DefiningAccess, LoadAccess->getDefiningAccess());
|
|
|
|
// but we should now get live on entry for the clobbering definition of the
|
|
|
|
// load, since it will walk past the phi node since every argument is the
|
|
|
|
// same.
|
2016-10-20 22:13:45 +02:00
|
|
|
// XXX: This currently requires either removing the phi or resetting optimized
|
|
|
|
// on the load
|
|
|
|
|
|
|
|
EXPECT_FALSE(
|
|
|
|
MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(LoadInst)));
|
|
|
|
// If we reset optimized, we get live on entry.
|
|
|
|
LoadAccess->resetOptimized();
|
2016-03-01 19:46:54 +01:00
|
|
|
EXPECT_TRUE(
|
2016-04-29 20:42:55 +02:00
|
|
|
MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(LoadInst)));
|
2016-03-01 19:46:54 +01:00
|
|
|
// The phi should now be a two entry phi with two live on entry defs.
|
|
|
|
for (const auto &Op : DefiningAccess->operands()) {
|
|
|
|
MemoryAccess *Operand = cast<MemoryAccess>(&*Op);
|
2016-04-29 20:42:55 +02:00
|
|
|
EXPECT_TRUE(MSSA.isLiveOnEntryDef(Operand));
|
2016-03-01 19:46:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Now we try to remove the single valued phi
|
2017-02-22 23:19:55 +01:00
|
|
|
Updater.removeMemoryAccess(DefiningAccess);
|
2016-04-29 20:42:55 +02:00
|
|
|
MSSA.verifyMemorySSA();
|
2016-03-01 19:46:54 +01:00
|
|
|
// Now the load should be a load of live on entry.
|
2016-04-29 20:42:55 +02:00
|
|
|
EXPECT_TRUE(MSSA.isLiveOnEntryDef(LoadAccess->getDefiningAccess()));
|
|
|
|
}
|
|
|
|
|
|
|
|
// We had a bug with caching where the walker would report MemoryDef#3's clobber
|
|
|
|
// (below) was MemoryDef#1.
|
|
|
|
//
|
|
|
|
// define void @F(i8*) {
|
|
|
|
// %A = alloca i8, i8 1
|
|
|
|
// ; 1 = MemoryDef(liveOnEntry)
|
|
|
|
// store i8 0, i8* %A
|
|
|
|
// ; 2 = MemoryDef(1)
|
|
|
|
// store i8 1, i8* %A
|
|
|
|
// ; 3 = MemoryDef(2)
|
|
|
|
// store i8 2, i8* %A
|
|
|
|
// }
|
|
|
|
TEST_F(MemorySSATest, TestTripleStore) {
|
2016-06-21 20:39:20 +02:00
|
|
|
F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
|
|
|
|
GlobalValue::ExternalLinkage, "F", &M);
|
2016-04-29 20:42:55 +02:00
|
|
|
B.SetInsertPoint(BasicBlock::Create(C, "", F));
|
|
|
|
Type *Int8 = Type::getInt8Ty(C);
|
|
|
|
Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
|
|
|
|
StoreInst *S1 = B.CreateStore(ConstantInt::get(Int8, 0), Alloca);
|
|
|
|
StoreInst *S2 = B.CreateStore(ConstantInt::get(Int8, 1), Alloca);
|
|
|
|
StoreInst *S3 = B.CreateStore(ConstantInt::get(Int8, 2), Alloca);
|
|
|
|
|
|
|
|
setupAnalyses();
|
2016-10-04 01:12:35 +02:00
|
|
|
MemorySSA &MSSA = *Analyses->MSSA;
|
2016-06-20 21:13:07 +02:00
|
|
|
MemorySSAWalker *Walker = Analyses->Walker;
|
2016-04-29 20:42:55 +02:00
|
|
|
|
|
|
|
unsigned I = 0;
|
|
|
|
for (StoreInst *V : {S1, S2, S3}) {
|
|
|
|
// Everything should be clobbered by its defining access
|
2016-11-01 22:17:46 +01:00
|
|
|
MemoryAccess *DefiningAccess = MSSA.getMemoryAccess(V)->getDefiningAccess();
|
2016-04-29 20:42:55 +02:00
|
|
|
MemoryAccess *WalkerClobber = Walker->getClobberingMemoryAccess(V);
|
|
|
|
EXPECT_EQ(DefiningAccess, WalkerClobber)
|
|
|
|
<< "Store " << I << " doesn't have the correct clobbering access";
|
|
|
|
// EXPECT_EQ expands such that if we increment I above, it won't get
|
|
|
|
// incremented except when we try to print the error message.
|
|
|
|
++I;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ...And fixing the above bug made it obvious that, when walking, MemorySSA's
|
|
|
|
// walker was caching the initial node it walked. This was fine (albeit
|
|
|
|
// mostly redundant) unless the initial node being walked is a clobber for the
|
|
|
|
// query. In that case, we'd cache that the node clobbered itself.
|
|
|
|
TEST_F(MemorySSATest, TestStoreAndLoad) {
|
2016-06-21 20:39:20 +02:00
|
|
|
F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
|
|
|
|
GlobalValue::ExternalLinkage, "F", &M);
|
2016-04-29 20:42:55 +02:00
|
|
|
B.SetInsertPoint(BasicBlock::Create(C, "", F));
|
|
|
|
Type *Int8 = Type::getInt8Ty(C);
|
|
|
|
Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
|
|
|
|
Instruction *SI = B.CreateStore(ConstantInt::get(Int8, 0), Alloca);
|
2019-02-01 21:44:24 +01:00
|
|
|
Instruction *LI = B.CreateLoad(Int8, Alloca);
|
2016-04-29 20:42:55 +02:00
|
|
|
|
|
|
|
setupAnalyses();
|
2016-10-04 01:12:35 +02:00
|
|
|
MemorySSA &MSSA = *Analyses->MSSA;
|
2016-06-20 21:13:07 +02:00
|
|
|
MemorySSAWalker *Walker = Analyses->Walker;
|
2016-04-29 20:42:55 +02:00
|
|
|
|
|
|
|
MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(LI);
|
|
|
|
EXPECT_EQ(LoadClobber, MSSA.getMemoryAccess(SI));
|
|
|
|
EXPECT_TRUE(MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(SI)));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Another bug (related to the above two fixes): It was noted that, given the
|
|
|
|
// following code:
|
|
|
|
// ; 1 = MemoryDef(liveOnEntry)
|
|
|
|
// store i8 0, i8* %1
|
|
|
|
//
|
|
|
|
// ...A query to getClobberingMemoryAccess(MemoryAccess*, MemoryLocation) would
|
|
|
|
// hand back the store (correctly). A later call to
|
|
|
|
// getClobberingMemoryAccess(const Instruction*) would also hand back the store
|
|
|
|
// (incorrectly; it should return liveOnEntry).
|
|
|
|
//
|
|
|
|
// This test checks that repeated calls to either function returns what they're
|
|
|
|
// meant to.
|
|
|
|
TEST_F(MemorySSATest, TestStoreDoubleQuery) {
|
2016-06-21 20:39:20 +02:00
|
|
|
F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
|
|
|
|
GlobalValue::ExternalLinkage, "F", &M);
|
2016-04-29 20:42:55 +02:00
|
|
|
B.SetInsertPoint(BasicBlock::Create(C, "", F));
|
|
|
|
Type *Int8 = Type::getInt8Ty(C);
|
|
|
|
Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
|
|
|
|
StoreInst *SI = B.CreateStore(ConstantInt::get(Int8, 0), Alloca);
|
|
|
|
|
|
|
|
setupAnalyses();
|
2016-10-04 01:12:35 +02:00
|
|
|
MemorySSA &MSSA = *Analyses->MSSA;
|
2016-06-20 21:13:07 +02:00
|
|
|
MemorySSAWalker *Walker = Analyses->Walker;
|
2016-04-29 20:42:55 +02:00
|
|
|
|
|
|
|
MemoryAccess *StoreAccess = MSSA.getMemoryAccess(SI);
|
|
|
|
MemoryLocation StoreLoc = MemoryLocation::get(SI);
|
2016-06-21 20:39:20 +02:00
|
|
|
MemoryAccess *Clobber =
|
|
|
|
Walker->getClobberingMemoryAccess(StoreAccess, StoreLoc);
|
2016-04-29 20:42:55 +02:00
|
|
|
MemoryAccess *LiveOnEntry = Walker->getClobberingMemoryAccess(SI);
|
|
|
|
|
|
|
|
EXPECT_EQ(Clobber, StoreAccess);
|
|
|
|
EXPECT_TRUE(MSSA.isLiveOnEntryDef(LiveOnEntry));
|
|
|
|
|
|
|
|
// Try again (with entries in the cache already) for good measure...
|
|
|
|
Clobber = Walker->getClobberingMemoryAccess(StoreAccess, StoreLoc);
|
|
|
|
LiveOnEntry = Walker->getClobberingMemoryAccess(SI);
|
|
|
|
EXPECT_EQ(Clobber, StoreAccess);
|
|
|
|
EXPECT_TRUE(MSSA.isLiveOnEntryDef(LiveOnEntry));
|
2016-03-01 19:46:54 +01:00
|
|
|
}
|
2016-08-03 03:22:19 +02:00
|
|
|
|
|
|
|
// Bug: During phi optimization, the walker wouldn't cache to the proper result
|
|
|
|
// in the farthest-walked BB.
|
|
|
|
//
|
|
|
|
// Specifically, it would assume that whatever we walked to was a clobber.
|
|
|
|
// "Whatever we walked to" isn't a clobber if we hit a cache entry.
|
|
|
|
//
|
|
|
|
// ...So, we need a test case that looks like:
|
|
|
|
// A
|
|
|
|
// / \
|
|
|
|
// B |
|
|
|
|
// \ /
|
|
|
|
// C
|
|
|
|
//
|
|
|
|
// Where, when we try to optimize a thing in 'C', a blocker is found in 'B'.
|
|
|
|
// The walk must determine that the blocker exists by using cache entries *while
|
|
|
|
// walking* 'B'.
|
|
|
|
TEST_F(MemorySSATest, PartialWalkerCacheWithPhis) {
|
|
|
|
F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
|
|
|
|
GlobalValue::ExternalLinkage, "F", &M);
|
|
|
|
B.SetInsertPoint(BasicBlock::Create(C, "A", F));
|
|
|
|
Type *Int8 = Type::getInt8Ty(C);
|
|
|
|
Constant *One = ConstantInt::get(Int8, 1);
|
|
|
|
Constant *Zero = ConstantInt::get(Int8, 0);
|
|
|
|
Value *AllocA = B.CreateAlloca(Int8, One, "a");
|
|
|
|
Value *AllocB = B.CreateAlloca(Int8, One, "b");
|
|
|
|
BasicBlock *IfThen = BasicBlock::Create(C, "B", F);
|
|
|
|
BasicBlock *IfEnd = BasicBlock::Create(C, "C", F);
|
|
|
|
|
|
|
|
B.CreateCondBr(UndefValue::get(Type::getInt1Ty(C)), IfThen, IfEnd);
|
|
|
|
|
|
|
|
B.SetInsertPoint(IfThen);
|
|
|
|
Instruction *FirstStore = B.CreateStore(Zero, AllocA);
|
|
|
|
B.CreateStore(Zero, AllocB);
|
2019-02-01 21:44:24 +01:00
|
|
|
Instruction *ALoad0 = B.CreateLoad(Int8, AllocA, "");
|
2016-08-03 03:22:19 +02:00
|
|
|
Instruction *BStore = B.CreateStore(Zero, AllocB);
|
|
|
|
// Due to use optimization/etc. we make a store to A, which is removed after
|
|
|
|
// we build MSSA. This helps keep the test case simple-ish.
|
|
|
|
Instruction *KillStore = B.CreateStore(Zero, AllocA);
|
2019-02-01 21:44:24 +01:00
|
|
|
Instruction *ALoad = B.CreateLoad(Int8, AllocA, "");
|
2016-08-03 03:22:19 +02:00
|
|
|
B.CreateBr(IfEnd);
|
|
|
|
|
|
|
|
B.SetInsertPoint(IfEnd);
|
|
|
|
Instruction *BelowPhi = B.CreateStore(Zero, AllocA);
|
|
|
|
|
|
|
|
setupAnalyses();
|
2016-10-04 01:12:35 +02:00
|
|
|
MemorySSA &MSSA = *Analyses->MSSA;
|
2016-08-03 03:22:19 +02:00
|
|
|
MemorySSAWalker *Walker = Analyses->Walker;
|
2017-02-22 23:19:55 +01:00
|
|
|
MemorySSAUpdater Updater(&MSSA);
|
2016-08-03 03:22:19 +02:00
|
|
|
|
|
|
|
// Kill `KillStore`; it exists solely so that the load after it won't be
|
|
|
|
// optimized to FirstStore.
|
2017-02-22 23:19:55 +01:00
|
|
|
Updater.removeMemoryAccess(MSSA.getMemoryAccess(KillStore));
|
2016-08-03 03:22:19 +02:00
|
|
|
KillStore->eraseFromParent();
|
|
|
|
auto *ALoadMA = cast<MemoryUse>(MSSA.getMemoryAccess(ALoad));
|
|
|
|
EXPECT_EQ(ALoadMA->getDefiningAccess(), MSSA.getMemoryAccess(BStore));
|
|
|
|
|
|
|
|
// Populate the cache for the store to AllocB directly after FirstStore. It
|
|
|
|
// should point to something in block B (so something in D can't be optimized
|
|
|
|
// to it).
|
|
|
|
MemoryAccess *Load0Clobber = Walker->getClobberingMemoryAccess(ALoad0);
|
|
|
|
EXPECT_EQ(MSSA.getMemoryAccess(FirstStore), Load0Clobber);
|
|
|
|
|
|
|
|
// If the bug exists, this will introduce a bad cache entry for %a on BStore.
|
|
|
|
// It will point to the store to %b after FirstStore. This only happens during
|
|
|
|
// phi optimization.
|
|
|
|
MemoryAccess *BottomClobber = Walker->getClobberingMemoryAccess(BelowPhi);
|
|
|
|
MemoryAccess *Phi = MSSA.getMemoryAccess(IfEnd);
|
|
|
|
EXPECT_EQ(BottomClobber, Phi);
|
|
|
|
|
|
|
|
// This query will first check the cache for {%a, BStore}. It should point to
|
|
|
|
// FirstStore, not to the store after FirstStore.
|
|
|
|
MemoryAccess *UseClobber = Walker->getClobberingMemoryAccess(ALoad);
|
|
|
|
EXPECT_EQ(UseClobber, MSSA.getMemoryAccess(FirstStore));
|
|
|
|
}
|
2016-08-03 21:57:02 +02:00
|
|
|
|
|
|
|
// Test that our walker properly handles loads with the invariant group
|
|
|
|
// attribute. It's a bit hacky, since we add the invariant attribute *after*
|
|
|
|
// building MSSA. Otherwise, the use optimizer will optimize it for us, which
|
|
|
|
// isn't what we want.
|
|
|
|
// FIXME: It may be easier/cleaner to just add an 'optimize uses?' flag to MSSA.
|
|
|
|
TEST_F(MemorySSATest, WalkerInvariantLoadOpt) {
|
|
|
|
F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
|
|
|
|
GlobalValue::ExternalLinkage, "F", &M);
|
|
|
|
B.SetInsertPoint(BasicBlock::Create(C, "", F));
|
|
|
|
Type *Int8 = Type::getInt8Ty(C);
|
|
|
|
Constant *One = ConstantInt::get(Int8, 1);
|
|
|
|
Value *AllocA = B.CreateAlloca(Int8, One, "");
|
|
|
|
|
|
|
|
Instruction *Store = B.CreateStore(One, AllocA);
|
2019-02-01 21:44:24 +01:00
|
|
|
Instruction *Load = B.CreateLoad(Int8, AllocA);
|
2016-08-03 21:57:02 +02:00
|
|
|
|
|
|
|
setupAnalyses();
|
2016-10-04 01:12:35 +02:00
|
|
|
MemorySSA &MSSA = *Analyses->MSSA;
|
2016-08-03 21:57:02 +02:00
|
|
|
MemorySSAWalker *Walker = Analyses->Walker;
|
|
|
|
|
|
|
|
auto *LoadMA = cast<MemoryUse>(MSSA.getMemoryAccess(Load));
|
|
|
|
auto *StoreMA = cast<MemoryDef>(MSSA.getMemoryAccess(Store));
|
|
|
|
EXPECT_EQ(LoadMA->getDefiningAccess(), StoreMA);
|
|
|
|
|
|
|
|
// ...At the time of writing, no cache should exist for LoadMA. Be a bit
|
|
|
|
// flexible to future changes.
|
|
|
|
Walker->invalidateInfo(LoadMA);
|
|
|
|
Load->setMetadata(LLVMContext::MD_invariant_load, MDNode::get(C, {}));
|
|
|
|
|
|
|
|
MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(LoadMA);
|
|
|
|
EXPECT_EQ(LoadClobber, MSSA.getLiveOnEntryDef());
|
|
|
|
}
|
2016-10-04 01:12:35 +02:00
|
|
|
|
2016-10-20 22:13:45 +02:00
|
|
|
// Test loads get reoptimized properly by the walker.
|
|
|
|
TEST_F(MemorySSATest, WalkerReopt) {
|
2016-10-04 01:12:35 +02:00
|
|
|
F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
|
|
|
|
GlobalValue::ExternalLinkage, "F", &M);
|
|
|
|
B.SetInsertPoint(BasicBlock::Create(C, "", F));
|
|
|
|
Type *Int8 = Type::getInt8Ty(C);
|
2016-10-20 22:13:45 +02:00
|
|
|
Value *AllocaA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
|
|
|
|
Instruction *SIA = B.CreateStore(ConstantInt::get(Int8, 0), AllocaA);
|
|
|
|
Value *AllocaB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B");
|
|
|
|
Instruction *SIB = B.CreateStore(ConstantInt::get(Int8, 0), AllocaB);
|
2019-02-01 21:44:24 +01:00
|
|
|
Instruction *LIA = B.CreateLoad(Int8, AllocaA);
|
2016-10-04 01:12:35 +02:00
|
|
|
|
|
|
|
setupAnalyses();
|
|
|
|
MemorySSA &MSSA = *Analyses->MSSA;
|
2016-10-20 22:13:45 +02:00
|
|
|
MemorySSAWalker *Walker = Analyses->Walker;
|
2017-02-22 23:19:55 +01:00
|
|
|
MemorySSAUpdater Updater(&MSSA);
|
2016-10-20 22:13:45 +02:00
|
|
|
|
|
|
|
MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(LIA);
|
|
|
|
MemoryUse *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(LIA));
|
|
|
|
EXPECT_EQ(LoadClobber, MSSA.getMemoryAccess(SIA));
|
|
|
|
EXPECT_TRUE(MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(SIA)));
|
2017-02-22 23:19:55 +01:00
|
|
|
Updater.removeMemoryAccess(LoadAccess);
|
2016-10-20 22:13:45 +02:00
|
|
|
|
|
|
|
// Create the load memory access pointing to an unoptimized place.
|
2017-02-22 23:19:55 +01:00
|
|
|
MemoryUse *NewLoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
|
2016-10-20 22:13:45 +02:00
|
|
|
LIA, MSSA.getMemoryAccess(SIB), LIA->getParent(), MemorySSA::End));
|
|
|
|
// This should it cause it to be optimized
|
|
|
|
EXPECT_EQ(Walker->getClobberingMemoryAccess(NewLoadAccess), LoadClobber);
|
|
|
|
EXPECT_EQ(NewLoadAccess->getDefiningAccess(), LoadClobber);
|
2016-10-04 01:12:35 +02:00
|
|
|
}
|
2016-12-26 00:34:07 +01:00
|
|
|
|
Introduce a basic MemorySSA updater, that supports insertDef,
insertUse, moveBefore and moveAfter operations.
Summary:
This creates a basic MemorySSA updater that handles arbitrary
insertion of uses and defs into MemorySSA, as well as arbitrary
movement around the CFG. It replaces the current splice API.
It can be made to handle arbitrary control flow changes.
Currently, it uses the same updater algorithm from D28934.
The main difference is because MemorySSA is single variable, we have
the complete def and use list, and don't need anyone to give it to us
as part of the API. We also have to rename stores below us in some
cases.
If we go that direction in that patch, i will merge all the updater
implementations (using an updater_traits or something to provide the
get* functions we use, called read*/write* in that patch).
Sadly, the current SSAUpdater algorithm is way too slow to use for
what we are doing here.
I have updated the tests we have to basically build memoryssa
incrementally using the updater api, and make sure it still comes out
the same.
Reviewers: george.burgess.iv
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29047
llvm-svn: 293356
2017-01-28 02:23:13 +01:00
|
|
|
// Test out MemorySSAUpdater::moveBefore
|
|
|
|
TEST_F(MemorySSATest, MoveAboveMemoryDef) {
|
2016-12-26 00:34:07 +01:00
|
|
|
F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
|
|
|
|
GlobalValue::ExternalLinkage, "F", &M);
|
|
|
|
B.SetInsertPoint(BasicBlock::Create(C, "", F));
|
|
|
|
|
|
|
|
Type *Int8 = Type::getInt8Ty(C);
|
|
|
|
Value *A = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
|
|
|
|
Value *B_ = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B");
|
|
|
|
Value *C = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "C");
|
|
|
|
|
|
|
|
StoreInst *StoreA0 = B.CreateStore(ConstantInt::get(Int8, 0), A);
|
|
|
|
StoreInst *StoreB = B.CreateStore(ConstantInt::get(Int8, 0), B_);
|
2019-02-01 21:44:24 +01:00
|
|
|
LoadInst *LoadB = B.CreateLoad(Int8, B_);
|
2016-12-26 00:34:07 +01:00
|
|
|
StoreInst *StoreA1 = B.CreateStore(ConstantInt::get(Int8, 4), A);
|
|
|
|
StoreInst *StoreC = B.CreateStore(ConstantInt::get(Int8, 4), C);
|
|
|
|
StoreInst *StoreA2 = B.CreateStore(ConstantInt::get(Int8, 4), A);
|
2019-02-01 21:44:24 +01:00
|
|
|
LoadInst *LoadC = B.CreateLoad(Int8, C);
|
2016-12-26 00:34:07 +01:00
|
|
|
|
|
|
|
setupAnalyses();
|
|
|
|
MemorySSA &MSSA = *Analyses->MSSA;
|
|
|
|
MemorySSAWalker &Walker = *Analyses->Walker;
|
|
|
|
|
Introduce a basic MemorySSA updater, that supports insertDef,
insertUse, moveBefore and moveAfter operations.
Summary:
This creates a basic MemorySSA updater that handles arbitrary
insertion of uses and defs into MemorySSA, as well as arbitrary
movement around the CFG. It replaces the current splice API.
It can be made to handle arbitrary control flow changes.
Currently, it uses the same updater algorithm from D28934.
The main difference is because MemorySSA is single variable, we have
the complete def and use list, and don't need anyone to give it to us
as part of the API. We also have to rename stores below us in some
cases.
If we go that direction in that patch, i will merge all the updater
implementations (using an updater_traits or something to provide the
get* functions we use, called read*/write* in that patch).
Sadly, the current SSAUpdater algorithm is way too slow to use for
what we are doing here.
I have updated the tests we have to basically build memoryssa
incrementally using the updater api, and make sure it still comes out
the same.
Reviewers: george.burgess.iv
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29047
llvm-svn: 293356
2017-01-28 02:23:13 +01:00
|
|
|
MemorySSAUpdater Updater(&MSSA);
|
2016-12-26 00:34:07 +01:00
|
|
|
StoreC->moveBefore(StoreB);
|
Introduce a basic MemorySSA updater, that supports insertDef,
insertUse, moveBefore and moveAfter operations.
Summary:
This creates a basic MemorySSA updater that handles arbitrary
insertion of uses and defs into MemorySSA, as well as arbitrary
movement around the CFG. It replaces the current splice API.
It can be made to handle arbitrary control flow changes.
Currently, it uses the same updater algorithm from D28934.
The main difference is because MemorySSA is single variable, we have
the complete def and use list, and don't need anyone to give it to us
as part of the API. We also have to rename stores below us in some
cases.
If we go that direction in that patch, i will merge all the updater
implementations (using an updater_traits or something to provide the
get* functions we use, called read*/write* in that patch).
Sadly, the current SSAUpdater algorithm is way too slow to use for
what we are doing here.
I have updated the tests we have to basically build memoryssa
incrementally using the updater api, and make sure it still comes out
the same.
Reviewers: george.burgess.iv
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29047
llvm-svn: 293356
2017-01-28 02:23:13 +01:00
|
|
|
Updater.moveBefore(cast<MemoryDef>(MSSA.getMemoryAccess(StoreC)),
|
|
|
|
cast<MemoryDef>(MSSA.getMemoryAccess(StoreB)));
|
2016-12-26 00:34:07 +01:00
|
|
|
|
|
|
|
MSSA.verifyMemorySSA();
|
|
|
|
|
|
|
|
EXPECT_EQ(MSSA.getMemoryAccess(StoreB)->getDefiningAccess(),
|
|
|
|
MSSA.getMemoryAccess(StoreC));
|
|
|
|
EXPECT_EQ(MSSA.getMemoryAccess(StoreC)->getDefiningAccess(),
|
|
|
|
MSSA.getMemoryAccess(StoreA0));
|
|
|
|
EXPECT_EQ(MSSA.getMemoryAccess(StoreA2)->getDefiningAccess(),
|
|
|
|
MSSA.getMemoryAccess(StoreA1));
|
|
|
|
EXPECT_EQ(Walker.getClobberingMemoryAccess(LoadB),
|
|
|
|
MSSA.getMemoryAccess(StoreB));
|
|
|
|
EXPECT_EQ(Walker.getClobberingMemoryAccess(LoadC),
|
|
|
|
MSSA.getMemoryAccess(StoreC));
|
|
|
|
|
|
|
|
// exercise block numbering
|
|
|
|
EXPECT_TRUE(MSSA.locallyDominates(MSSA.getMemoryAccess(StoreC),
|
|
|
|
MSSA.getMemoryAccess(StoreB)));
|
|
|
|
EXPECT_TRUE(MSSA.locallyDominates(MSSA.getMemoryAccess(StoreA1),
|
|
|
|
MSSA.getMemoryAccess(StoreA2)));
|
|
|
|
}
|
Introduce a basic MemorySSA updater, that supports insertDef,
insertUse, moveBefore and moveAfter operations.
Summary:
This creates a basic MemorySSA updater that handles arbitrary
insertion of uses and defs into MemorySSA, as well as arbitrary
movement around the CFG. It replaces the current splice API.
It can be made to handle arbitrary control flow changes.
Currently, it uses the same updater algorithm from D28934.
The main difference is because MemorySSA is single variable, we have
the complete def and use list, and don't need anyone to give it to us
as part of the API. We also have to rename stores below us in some
cases.
If we go that direction in that patch, i will merge all the updater
implementations (using an updater_traits or something to provide the
get* functions we use, called read*/write* in that patch).
Sadly, the current SSAUpdater algorithm is way too slow to use for
what we are doing here.
I have updated the tests we have to basically build memoryssa
incrementally using the updater api, and make sure it still comes out
the same.
Reviewers: george.burgess.iv
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29047
llvm-svn: 293356
2017-01-28 02:23:13 +01:00
|
|
|
|
|
|
|
TEST_F(MemorySSATest, Irreducible) {
|
|
|
|
// Create the equivalent of
|
|
|
|
// x = something
|
|
|
|
// if (...)
|
|
|
|
// goto second_loop_entry
|
|
|
|
// while (...) {
|
|
|
|
// second_loop_entry:
|
|
|
|
// }
|
|
|
|
// use(x)
|
|
|
|
|
|
|
|
SmallVector<PHINode *, 8> Inserted;
|
|
|
|
IRBuilder<> B(C);
|
|
|
|
F = Function::Create(
|
|
|
|
FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
|
|
|
|
GlobalValue::ExternalLinkage, "F", &M);
|
|
|
|
|
|
|
|
// Make blocks
|
|
|
|
BasicBlock *IfBB = BasicBlock::Create(C, "if", F);
|
|
|
|
BasicBlock *LoopStartBB = BasicBlock::Create(C, "loopstart", F);
|
|
|
|
BasicBlock *LoopMainBB = BasicBlock::Create(C, "loopmain", F);
|
|
|
|
BasicBlock *AfterLoopBB = BasicBlock::Create(C, "afterloop", F);
|
|
|
|
B.SetInsertPoint(IfBB);
|
|
|
|
B.CreateCondBr(B.getTrue(), LoopMainBB, LoopStartBB);
|
|
|
|
B.SetInsertPoint(LoopStartBB);
|
|
|
|
B.CreateBr(LoopMainBB);
|
|
|
|
B.SetInsertPoint(LoopMainBB);
|
|
|
|
B.CreateCondBr(B.getTrue(), LoopStartBB, AfterLoopBB);
|
|
|
|
B.SetInsertPoint(AfterLoopBB);
|
|
|
|
Argument *FirstArg = &*F->arg_begin();
|
|
|
|
setupAnalyses();
|
|
|
|
MemorySSA &MSSA = *Analyses->MSSA;
|
|
|
|
MemorySSAUpdater Updater(&MSSA);
|
|
|
|
// Create the load memory acccess
|
2019-02-01 21:44:24 +01:00
|
|
|
LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), FirstArg);
|
2017-02-22 23:19:55 +01:00
|
|
|
MemoryUse *LoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
|
Introduce a basic MemorySSA updater, that supports insertDef,
insertUse, moveBefore and moveAfter operations.
Summary:
This creates a basic MemorySSA updater that handles arbitrary
insertion of uses and defs into MemorySSA, as well as arbitrary
movement around the CFG. It replaces the current splice API.
It can be made to handle arbitrary control flow changes.
Currently, it uses the same updater algorithm from D28934.
The main difference is because MemorySSA is single variable, we have
the complete def and use list, and don't need anyone to give it to us
as part of the API. We also have to rename stores below us in some
cases.
If we go that direction in that patch, i will merge all the updater
implementations (using an updater_traits or something to provide the
get* functions we use, called read*/write* in that patch).
Sadly, the current SSAUpdater algorithm is way too slow to use for
what we are doing here.
I have updated the tests we have to basically build memoryssa
incrementally using the updater api, and make sure it still comes out
the same.
Reviewers: george.burgess.iv
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29047
llvm-svn: 293356
2017-01-28 02:23:13 +01:00
|
|
|
LoadInst, nullptr, AfterLoopBB, MemorySSA::Beginning));
|
|
|
|
Updater.insertUse(LoadAccess);
|
|
|
|
MSSA.verifyMemorySSA();
|
|
|
|
}
|
2018-02-24 00:07:18 +01:00
|
|
|
|
|
|
|
TEST_F(MemorySSATest, MoveToBeforeLiveOnEntryInvalidatesCache) {
|
|
|
|
// Create:
|
|
|
|
// %1 = alloca i8
|
|
|
|
// ; 1 = MemoryDef(liveOnEntry)
|
|
|
|
// store i8 0, i8* %1
|
|
|
|
// ; 2 = MemoryDef(1)
|
|
|
|
// store i8 0, i8* %1
|
|
|
|
//
|
|
|
|
// ...And be sure that MSSA's caching doesn't give us `1` for the clobber of
|
|
|
|
// `2` after `1` is removed.
|
|
|
|
IRBuilder<> B(C);
|
|
|
|
F = Function::Create(
|
|
|
|
FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
|
|
|
|
GlobalValue::ExternalLinkage, "F", &M);
|
|
|
|
|
|
|
|
BasicBlock *Entry = BasicBlock::Create(C, "if", F);
|
|
|
|
B.SetInsertPoint(Entry);
|
|
|
|
|
|
|
|
Value *A = B.CreateAlloca(B.getInt8Ty());
|
|
|
|
StoreInst *StoreA = B.CreateStore(B.getInt8(0), A);
|
|
|
|
StoreInst *StoreB = B.CreateStore(B.getInt8(0), A);
|
|
|
|
|
|
|
|
setupAnalyses();
|
|
|
|
|
|
|
|
MemorySSA &MSSA = *Analyses->MSSA;
|
|
|
|
|
|
|
|
auto *DefA = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA));
|
|
|
|
auto *DefB = cast<MemoryDef>(MSSA.getMemoryAccess(StoreB));
|
|
|
|
|
|
|
|
MemoryAccess *BClobber = MSSA.getWalker()->getClobberingMemoryAccess(DefB);
|
|
|
|
ASSERT_EQ(DefA, BClobber);
|
|
|
|
|
|
|
|
MemorySSAUpdater(&MSSA).removeMemoryAccess(DefA);
|
|
|
|
StoreA->eraseFromParent();
|
|
|
|
|
|
|
|
EXPECT_EQ(DefB->getDefiningAccess(), MSSA.getLiveOnEntryDef());
|
|
|
|
|
|
|
|
EXPECT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(DefB),
|
|
|
|
MSSA.getLiveOnEntryDef())
|
|
|
|
<< "(DefA = " << DefA << ")";
|
|
|
|
}
|
2018-02-27 08:20:49 +01:00
|
|
|
|
|
|
|
TEST_F(MemorySSATest, RemovingDefInvalidatesCache) {
|
|
|
|
// Create:
|
|
|
|
// %x = alloca i8
|
|
|
|
// %y = alloca i8
|
|
|
|
// ; 1 = MemoryDef(liveOnEntry)
|
|
|
|
// store i8 0, i8* %x
|
|
|
|
// ; 2 = MemoryDef(1)
|
|
|
|
// store i8 0, i8* %y
|
|
|
|
// ; 3 = MemoryDef(2)
|
|
|
|
// store i8 0, i8* %x
|
|
|
|
//
|
|
|
|
// And be sure that MSSA's caching handles the removal of def `1`
|
|
|
|
// appropriately.
|
|
|
|
IRBuilder<> B(C);
|
|
|
|
F = Function::Create(
|
|
|
|
FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
|
|
|
|
GlobalValue::ExternalLinkage, "F", &M);
|
|
|
|
|
|
|
|
BasicBlock *Entry = BasicBlock::Create(C, "if", F);
|
|
|
|
B.SetInsertPoint(Entry);
|
|
|
|
|
|
|
|
Value *X = B.CreateAlloca(B.getInt8Ty());
|
|
|
|
Value *Y = B.CreateAlloca(B.getInt8Ty());
|
|
|
|
StoreInst *StoreX1 = B.CreateStore(B.getInt8(0), X);
|
|
|
|
StoreInst *StoreY = B.CreateStore(B.getInt8(0), Y);
|
|
|
|
StoreInst *StoreX2 = B.CreateStore(B.getInt8(0), X);
|
|
|
|
|
|
|
|
setupAnalyses();
|
|
|
|
|
|
|
|
MemorySSA &MSSA = *Analyses->MSSA;
|
|
|
|
|
|
|
|
auto *DefX1 = cast<MemoryDef>(MSSA.getMemoryAccess(StoreX1));
|
|
|
|
auto *DefY = cast<MemoryDef>(MSSA.getMemoryAccess(StoreY));
|
|
|
|
auto *DefX2 = cast<MemoryDef>(MSSA.getMemoryAccess(StoreX2));
|
|
|
|
|
|
|
|
EXPECT_EQ(DefX2->getDefiningAccess(), DefY);
|
|
|
|
MemoryAccess *X2Clobber = MSSA.getWalker()->getClobberingMemoryAccess(DefX2);
|
|
|
|
ASSERT_EQ(DefX1, X2Clobber);
|
|
|
|
|
|
|
|
MemorySSAUpdater(&MSSA).removeMemoryAccess(DefX1);
|
|
|
|
StoreX1->eraseFromParent();
|
|
|
|
|
|
|
|
EXPECT_EQ(DefX2->getDefiningAccess(), DefY);
|
|
|
|
EXPECT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(DefX2),
|
|
|
|
MSSA.getLiveOnEntryDef())
|
|
|
|
<< "(DefX1 = " << DefX1 << ")";
|
|
|
|
}
|
2018-03-08 19:03:14 +01:00
|
|
|
|
|
|
|
// Test Must alias for optimized uses
|
|
|
|
TEST_F(MemorySSATest, TestLoadMustAlias) {
|
|
|
|
F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
|
|
|
|
GlobalValue::ExternalLinkage, "F", &M);
|
|
|
|
B.SetInsertPoint(BasicBlock::Create(C, "", F));
|
|
|
|
Type *Int8 = Type::getInt8Ty(C);
|
|
|
|
Value *AllocaA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
|
|
|
|
Value *AllocaB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B");
|
|
|
|
|
|
|
|
B.CreateStore(ConstantInt::get(Int8, 1), AllocaB);
|
|
|
|
// Check load from LOE
|
2019-02-01 21:44:24 +01:00
|
|
|
LoadInst *LA1 = B.CreateLoad(Int8, AllocaA, "");
|
2018-03-08 19:03:14 +01:00
|
|
|
// Check load alias cached for second load
|
2019-02-01 21:44:24 +01:00
|
|
|
LoadInst *LA2 = B.CreateLoad(Int8, AllocaA, "");
|
2018-03-08 19:03:14 +01:00
|
|
|
|
|
|
|
B.CreateStore(ConstantInt::get(Int8, 1), AllocaA);
|
|
|
|
// Check load from store/def
|
2019-02-01 21:44:24 +01:00
|
|
|
LoadInst *LA3 = B.CreateLoad(Int8, AllocaA, "");
|
2018-03-08 19:03:14 +01:00
|
|
|
// Check load alias cached for second load
|
2019-02-01 21:44:24 +01:00
|
|
|
LoadInst *LA4 = B.CreateLoad(Int8, AllocaA, "");
|
2018-03-08 19:03:14 +01:00
|
|
|
|
|
|
|
setupAnalyses();
|
|
|
|
MemorySSA &MSSA = *Analyses->MSSA;
|
|
|
|
|
|
|
|
unsigned I = 0;
|
|
|
|
for (LoadInst *V : {LA1, LA2}) {
|
|
|
|
MemoryUse *MemUse = dyn_cast_or_null<MemoryUse>(MSSA.getMemoryAccess(V));
|
|
|
|
EXPECT_EQ(MemUse->getOptimizedAccessType(), None)
|
|
|
|
<< "Load " << I << " doesn't have the correct alias information";
|
|
|
|
// EXPECT_EQ expands such that if we increment I above, it won't get
|
|
|
|
// incremented except when we try to print the error message.
|
|
|
|
++I;
|
|
|
|
}
|
|
|
|
for (LoadInst *V : {LA3, LA4}) {
|
|
|
|
MemoryUse *MemUse = dyn_cast_or_null<MemoryUse>(MSSA.getMemoryAccess(V));
|
|
|
|
EXPECT_EQ(MemUse->getOptimizedAccessType(), MustAlias)
|
|
|
|
<< "Load " << I << " doesn't have the correct alias information";
|
|
|
|
// EXPECT_EQ expands such that if we increment I above, it won't get
|
|
|
|
// incremented except when we try to print the error message.
|
|
|
|
++I;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test Must alias for optimized defs.
|
|
|
|
TEST_F(MemorySSATest, TestStoreMustAlias) {
|
|
|
|
F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
|
|
|
|
GlobalValue::ExternalLinkage, "F", &M);
|
|
|
|
B.SetInsertPoint(BasicBlock::Create(C, "", F));
|
|
|
|
Type *Int8 = Type::getInt8Ty(C);
|
|
|
|
Value *AllocaA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
|
|
|
|
Value *AllocaB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B");
|
|
|
|
StoreInst *SA1 = B.CreateStore(ConstantInt::get(Int8, 1), AllocaA);
|
|
|
|
StoreInst *SB1 = B.CreateStore(ConstantInt::get(Int8, 1), AllocaB);
|
|
|
|
StoreInst *SA2 = B.CreateStore(ConstantInt::get(Int8, 2), AllocaA);
|
|
|
|
StoreInst *SB2 = B.CreateStore(ConstantInt::get(Int8, 2), AllocaB);
|
|
|
|
StoreInst *SA3 = B.CreateStore(ConstantInt::get(Int8, 3), AllocaA);
|
|
|
|
StoreInst *SB3 = B.CreateStore(ConstantInt::get(Int8, 3), AllocaB);
|
|
|
|
|
|
|
|
setupAnalyses();
|
|
|
|
MemorySSA &MSSA = *Analyses->MSSA;
|
|
|
|
MemorySSAWalker *Walker = Analyses->Walker;
|
|
|
|
|
|
|
|
unsigned I = 0;
|
|
|
|
for (StoreInst *V : {SA1, SB1, SA2, SB2, SA3, SB3}) {
|
|
|
|
MemoryDef *MemDef = dyn_cast_or_null<MemoryDef>(MSSA.getMemoryAccess(V));
|
|
|
|
EXPECT_EQ(MemDef->isOptimized(), false)
|
|
|
|
<< "Store " << I << " is optimized from the start?";
|
|
|
|
EXPECT_EQ(MemDef->getOptimizedAccessType(), MayAlias)
|
|
|
|
<< "Store " << I
|
|
|
|
<< " has correct alias information before being optimized?";
|
|
|
|
if (V == SA1)
|
|
|
|
Walker->getClobberingMemoryAccess(V);
|
|
|
|
else {
|
|
|
|
MemoryAccess *Def = MemDef->getDefiningAccess();
|
|
|
|
MemoryAccess *Clob = Walker->getClobberingMemoryAccess(V);
|
|
|
|
EXPECT_NE(Def, Clob) << "Store " << I
|
|
|
|
<< " has Defining Access equal to Clobbering Access";
|
|
|
|
}
|
|
|
|
EXPECT_EQ(MemDef->isOptimized(), true)
|
|
|
|
<< "Store " << I << " was not optimized";
|
|
|
|
if (I == 0 || I == 1)
|
|
|
|
EXPECT_EQ(MemDef->getOptimizedAccessType(), None)
|
|
|
|
<< "Store " << I << " doesn't have the correct alias information";
|
|
|
|
else
|
|
|
|
EXPECT_EQ(MemDef->getOptimizedAccessType(), MustAlias)
|
|
|
|
<< "Store " << I << " doesn't have the correct alias information";
|
|
|
|
// EXPECT_EQ expands such that if we increment I above, it won't get
|
|
|
|
// incremented except when we try to print the error message.
|
|
|
|
++I;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test May alias for optimized uses.
|
|
|
|
TEST_F(MemorySSATest, TestLoadMayAlias) {
|
|
|
|
F = Function::Create(FunctionType::get(B.getVoidTy(),
|
|
|
|
{B.getInt8PtrTy(), B.getInt8PtrTy()},
|
|
|
|
false),
|
|
|
|
GlobalValue::ExternalLinkage, "F", &M);
|
|
|
|
B.SetInsertPoint(BasicBlock::Create(C, "", F));
|
|
|
|
Type *Int8 = Type::getInt8Ty(C);
|
|
|
|
auto *ArgIt = F->arg_begin();
|
|
|
|
Argument *PointerA = &*ArgIt;
|
|
|
|
Argument *PointerB = &*(++ArgIt);
|
|
|
|
B.CreateStore(ConstantInt::get(Int8, 1), PointerB);
|
2019-02-01 21:44:24 +01:00
|
|
|
LoadInst *LA1 = B.CreateLoad(Int8, PointerA, "");
|
2018-03-08 19:03:14 +01:00
|
|
|
B.CreateStore(ConstantInt::get(Int8, 0), PointerA);
|
2019-02-01 21:44:24 +01:00
|
|
|
LoadInst *LB1 = B.CreateLoad(Int8, PointerB, "");
|
2018-03-08 19:03:14 +01:00
|
|
|
B.CreateStore(ConstantInt::get(Int8, 0), PointerA);
|
2019-02-01 21:44:24 +01:00
|
|
|
LoadInst *LA2 = B.CreateLoad(Int8, PointerA, "");
|
2018-03-08 19:03:14 +01:00
|
|
|
B.CreateStore(ConstantInt::get(Int8, 0), PointerB);
|
2019-02-01 21:44:24 +01:00
|
|
|
LoadInst *LB2 = B.CreateLoad(Int8, PointerB, "");
|
2018-03-08 19:03:14 +01:00
|
|
|
|
|
|
|
setupAnalyses();
|
|
|
|
MemorySSA &MSSA = *Analyses->MSSA;
|
|
|
|
|
|
|
|
unsigned I = 0;
|
|
|
|
for (LoadInst *V : {LA1, LB1}) {
|
|
|
|
MemoryUse *MemUse = dyn_cast_or_null<MemoryUse>(MSSA.getMemoryAccess(V));
|
|
|
|
EXPECT_EQ(MemUse->getOptimizedAccessType(), MayAlias)
|
|
|
|
<< "Load " << I << " doesn't have the correct alias information";
|
|
|
|
// EXPECT_EQ expands such that if we increment I above, it won't get
|
|
|
|
// incremented except when we try to print the error message.
|
|
|
|
++I;
|
|
|
|
}
|
|
|
|
for (LoadInst *V : {LA2, LB2}) {
|
|
|
|
MemoryUse *MemUse = dyn_cast_or_null<MemoryUse>(MSSA.getMemoryAccess(V));
|
|
|
|
EXPECT_EQ(MemUse->getOptimizedAccessType(), MustAlias)
|
|
|
|
<< "Load " << I << " doesn't have the correct alias information";
|
|
|
|
// EXPECT_EQ expands such that if we increment I above, it won't get
|
|
|
|
// incremented except when we try to print the error message.
|
|
|
|
++I;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test May alias for optimized defs.
|
|
|
|
TEST_F(MemorySSATest, TestStoreMayAlias) {
|
|
|
|
F = Function::Create(FunctionType::get(B.getVoidTy(),
|
|
|
|
{B.getInt8PtrTy(), B.getInt8PtrTy()},
|
|
|
|
false),
|
|
|
|
GlobalValue::ExternalLinkage, "F", &M);
|
|
|
|
B.SetInsertPoint(BasicBlock::Create(C, "", F));
|
|
|
|
Type *Int8 = Type::getInt8Ty(C);
|
|
|
|
auto *ArgIt = F->arg_begin();
|
|
|
|
Argument *PointerA = &*ArgIt;
|
|
|
|
Argument *PointerB = &*(++ArgIt);
|
|
|
|
Value *AllocaC = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "C");
|
|
|
|
// Store into arg1, must alias because it's LOE => must
|
|
|
|
StoreInst *SA1 = B.CreateStore(ConstantInt::get(Int8, 0), PointerA);
|
|
|
|
// Store into arg2, may alias store to arg1 => may
|
|
|
|
StoreInst *SB1 = B.CreateStore(ConstantInt::get(Int8, 1), PointerB);
|
|
|
|
// Store into aloca, no alias with args, so must alias LOE => must
|
|
|
|
StoreInst *SC1 = B.CreateStore(ConstantInt::get(Int8, 2), AllocaC);
|
|
|
|
// Store into arg1, may alias store to arg2 => may
|
|
|
|
StoreInst *SA2 = B.CreateStore(ConstantInt::get(Int8, 3), PointerA);
|
|
|
|
// Store into arg2, may alias store to arg1 => may
|
|
|
|
StoreInst *SB2 = B.CreateStore(ConstantInt::get(Int8, 4), PointerB);
|
|
|
|
// Store into aloca, no alias with args, so must alias SC1 => must
|
|
|
|
StoreInst *SC2 = B.CreateStore(ConstantInt::get(Int8, 5), AllocaC);
|
|
|
|
// Store into arg2, must alias store to arg2 => must
|
|
|
|
StoreInst *SB3 = B.CreateStore(ConstantInt::get(Int8, 6), PointerB);
|
|
|
|
std::initializer_list<StoreInst *> Sts = {SA1, SB1, SC1, SA2, SB2, SC2, SB3};
|
|
|
|
|
|
|
|
setupAnalyses();
|
|
|
|
MemorySSA &MSSA = *Analyses->MSSA;
|
|
|
|
MemorySSAWalker *Walker = Analyses->Walker;
|
|
|
|
|
|
|
|
unsigned I = 0;
|
|
|
|
for (StoreInst *V : Sts) {
|
|
|
|
MemoryDef *MemDef = dyn_cast_or_null<MemoryDef>(MSSA.getMemoryAccess(V));
|
|
|
|
EXPECT_EQ(MemDef->isOptimized(), false)
|
|
|
|
<< "Store " << I << " is optimized from the start?";
|
|
|
|
EXPECT_EQ(MemDef->getOptimizedAccessType(), MayAlias)
|
|
|
|
<< "Store " << I
|
|
|
|
<< " has correct alias information before being optimized?";
|
|
|
|
++I;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (StoreInst *V : Sts)
|
|
|
|
Walker->getClobberingMemoryAccess(V);
|
|
|
|
|
|
|
|
I = 0;
|
|
|
|
for (StoreInst *V : Sts) {
|
|
|
|
MemoryDef *MemDef = dyn_cast_or_null<MemoryDef>(MSSA.getMemoryAccess(V));
|
|
|
|
EXPECT_EQ(MemDef->isOptimized(), true)
|
|
|
|
<< "Store " << I << " was not optimized";
|
|
|
|
if (I == 1 || I == 3 || I == 4)
|
|
|
|
EXPECT_EQ(MemDef->getOptimizedAccessType(), MayAlias)
|
|
|
|
<< "Store " << I << " doesn't have the correct alias information";
|
|
|
|
else if (I == 0 || I == 2)
|
|
|
|
EXPECT_EQ(MemDef->getOptimizedAccessType(), None)
|
|
|
|
<< "Store " << I << " doesn't have the correct alias information";
|
|
|
|
else
|
|
|
|
EXPECT_EQ(MemDef->getOptimizedAccessType(), MustAlias)
|
|
|
|
<< "Store " << I << " doesn't have the correct alias information";
|
|
|
|
// EXPECT_EQ expands such that if we increment I above, it won't get
|
|
|
|
// incremented except when we try to print the error message.
|
|
|
|
++I;
|
|
|
|
}
|
|
|
|
}
|
2018-08-10 07:14:43 +02:00
|
|
|
|
|
|
|
TEST_F(MemorySSATest, LifetimeMarkersAreClobbers) {
|
|
|
|
// Example code:
|
|
|
|
// define void @a(i8* %foo) {
|
|
|
|
// %bar = getelementptr i8, i8* %foo, i64 1
|
|
|
|
// store i8 0, i8* %foo
|
|
|
|
// store i8 0, i8* %bar
|
|
|
|
// call void @llvm.lifetime.end.p0i8(i64 8, i32* %p)
|
|
|
|
// call void @llvm.lifetime.start.p0i8(i64 8, i32* %p)
|
|
|
|
// store i8 0, i8* %foo
|
|
|
|
// store i8 0, i8* %bar
|
|
|
|
// ret void
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// Patterns like this are possible after inlining; the stores to %foo and %bar
|
|
|
|
// should both be clobbered by the lifetime.start call if they're dominated by
|
|
|
|
// it.
|
|
|
|
|
|
|
|
IRBuilder<> B(C);
|
|
|
|
F = Function::Create(
|
|
|
|
FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
|
|
|
|
GlobalValue::ExternalLinkage, "F", &M);
|
|
|
|
|
|
|
|
// Make blocks
|
|
|
|
BasicBlock *Entry = BasicBlock::Create(C, "entry", F);
|
|
|
|
|
|
|
|
B.SetInsertPoint(Entry);
|
|
|
|
Value *Foo = &*F->arg_begin();
|
|
|
|
|
2019-02-01 21:44:47 +01:00
|
|
|
Value *Bar = B.CreateGEP(B.getInt8Ty(), Foo, B.getInt64(1), "bar");
|
2018-08-10 07:14:43 +02:00
|
|
|
|
|
|
|
B.CreateStore(B.getInt8(0), Foo);
|
|
|
|
B.CreateStore(B.getInt8(0), Bar);
|
|
|
|
|
|
|
|
auto GetLifetimeIntrinsic = [&](Intrinsic::ID ID) {
|
|
|
|
return Intrinsic::getDeclaration(&M, ID, {Foo->getType()});
|
|
|
|
};
|
|
|
|
|
|
|
|
B.CreateCall(GetLifetimeIntrinsic(Intrinsic::lifetime_end),
|
|
|
|
{B.getInt64(2), Foo});
|
|
|
|
Instruction *LifetimeStart = B.CreateCall(
|
|
|
|
GetLifetimeIntrinsic(Intrinsic::lifetime_start), {B.getInt64(2), Foo});
|
|
|
|
|
|
|
|
Instruction *FooStore = B.CreateStore(B.getInt8(0), Foo);
|
|
|
|
Instruction *BarStore = B.CreateStore(B.getInt8(0), Bar);
|
|
|
|
|
|
|
|
setupAnalyses();
|
|
|
|
MemorySSA &MSSA = *Analyses->MSSA;
|
|
|
|
|
|
|
|
MemoryAccess *LifetimeStartAccess = MSSA.getMemoryAccess(LifetimeStart);
|
|
|
|
ASSERT_NE(LifetimeStartAccess, nullptr);
|
|
|
|
|
|
|
|
MemoryAccess *FooAccess = MSSA.getMemoryAccess(FooStore);
|
|
|
|
ASSERT_NE(FooAccess, nullptr);
|
|
|
|
|
|
|
|
MemoryAccess *BarAccess = MSSA.getMemoryAccess(BarStore);
|
|
|
|
ASSERT_NE(BarAccess, nullptr);
|
|
|
|
|
|
|
|
MemoryAccess *FooClobber =
|
|
|
|
MSSA.getWalker()->getClobberingMemoryAccess(FooAccess);
|
|
|
|
EXPECT_EQ(FooClobber, LifetimeStartAccess);
|
|
|
|
|
|
|
|
MemoryAccess *BarClobber =
|
|
|
|
MSSA.getWalker()->getClobberingMemoryAccess(BarAccess);
|
|
|
|
EXPECT_EQ(BarClobber, LifetimeStartAccess);
|
|
|
|
}
|
2018-08-23 23:29:11 +02:00
|
|
|
|
|
|
|
TEST_F(MemorySSATest, DefOptimizationsAreInvalidatedOnMoving) {
|
|
|
|
IRBuilder<> B(C);
|
|
|
|
F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getInt1Ty()}, false),
|
|
|
|
GlobalValue::ExternalLinkage, "F", &M);
|
|
|
|
|
|
|
|
// Make a CFG like
|
|
|
|
// entry
|
|
|
|
// / \
|
|
|
|
// a b
|
|
|
|
// \ /
|
|
|
|
// c
|
|
|
|
//
|
|
|
|
// Put a def in A and a def in B, move the def from A -> B, observe as the
|
|
|
|
// optimization is invalidated.
|
|
|
|
BasicBlock *Entry = BasicBlock::Create(C, "entry", F);
|
|
|
|
BasicBlock *BlockA = BasicBlock::Create(C, "a", F);
|
|
|
|
BasicBlock *BlockB = BasicBlock::Create(C, "b", F);
|
|
|
|
BasicBlock *BlockC = BasicBlock::Create(C, "c", F);
|
|
|
|
|
|
|
|
B.SetInsertPoint(Entry);
|
|
|
|
Type *Int8 = Type::getInt8Ty(C);
|
|
|
|
Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "alloc");
|
|
|
|
StoreInst *StoreEntry = B.CreateStore(B.getInt8(0), Alloca);
|
|
|
|
B.CreateCondBr(B.getTrue(), BlockA, BlockB);
|
|
|
|
|
|
|
|
B.SetInsertPoint(BlockA);
|
|
|
|
StoreInst *StoreA = B.CreateStore(B.getInt8(1), Alloca);
|
|
|
|
B.CreateBr(BlockC);
|
|
|
|
|
|
|
|
B.SetInsertPoint(BlockB);
|
|
|
|
StoreInst *StoreB = B.CreateStore(B.getInt8(2), Alloca);
|
|
|
|
B.CreateBr(BlockC);
|
|
|
|
|
|
|
|
B.SetInsertPoint(BlockC);
|
|
|
|
B.CreateUnreachable();
|
|
|
|
|
|
|
|
setupAnalyses();
|
|
|
|
MemorySSA &MSSA = *Analyses->MSSA;
|
|
|
|
|
|
|
|
auto *AccessEntry = cast<MemoryDef>(MSSA.getMemoryAccess(StoreEntry));
|
|
|
|
auto *StoreAEntry = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA));
|
|
|
|
auto *StoreBEntry = cast<MemoryDef>(MSSA.getMemoryAccess(StoreB));
|
|
|
|
|
|
|
|
ASSERT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(StoreAEntry),
|
|
|
|
AccessEntry);
|
|
|
|
ASSERT_TRUE(StoreAEntry->isOptimized());
|
|
|
|
|
|
|
|
ASSERT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(StoreBEntry),
|
|
|
|
AccessEntry);
|
|
|
|
ASSERT_TRUE(StoreBEntry->isOptimized());
|
|
|
|
|
|
|
|
// Note that if we did InsertionPlace::Beginning, we don't go out of our way
|
|
|
|
// to invalidate the cache for StoreBEntry. If the user wants to actually do
|
|
|
|
// moves like these, it's up to them to ensure that nearby cache entries are
|
|
|
|
// correctly invalidated (which, in general, requires walking all instructions
|
|
|
|
// that the moved instruction dominates. So we probably shouldn't be doing
|
|
|
|
// moves like this in general. Still, works as a test-case. ;) )
|
|
|
|
MemorySSAUpdater(&MSSA).moveToPlace(StoreAEntry, BlockB,
|
|
|
|
MemorySSA::InsertionPlace::End);
|
|
|
|
ASSERT_FALSE(StoreAEntry->isOptimized());
|
|
|
|
ASSERT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(StoreAEntry),
|
|
|
|
StoreBEntry);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(MemorySSATest, TestOptimizedDefsAreProperUses) {
|
|
|
|
F = Function::Create(FunctionType::get(B.getVoidTy(),
|
|
|
|
{B.getInt8PtrTy(), B.getInt8PtrTy()},
|
|
|
|
false),
|
|
|
|
GlobalValue::ExternalLinkage, "F", &M);
|
|
|
|
B.SetInsertPoint(BasicBlock::Create(C, "", F));
|
|
|
|
Type *Int8 = Type::getInt8Ty(C);
|
|
|
|
Value *AllocA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
|
|
|
|
Value *AllocB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B");
|
|
|
|
|
|
|
|
StoreInst *StoreA = B.CreateStore(ConstantInt::get(Int8, 0), AllocA);
|
|
|
|
StoreInst *StoreB = B.CreateStore(ConstantInt::get(Int8, 1), AllocB);
|
|
|
|
StoreInst *StoreA2 = B.CreateStore(ConstantInt::get(Int8, 2), AllocA);
|
|
|
|
|
|
|
|
setupAnalyses();
|
|
|
|
MemorySSA &MSSA = *Analyses->MSSA;
|
|
|
|
MemorySSAWalker *Walker = Analyses->Walker;
|
|
|
|
|
|
|
|
// If these don't hold, there's no chance of the test result being useful.
|
|
|
|
ASSERT_EQ(Walker->getClobberingMemoryAccess(StoreA),
|
|
|
|
MSSA.getLiveOnEntryDef());
|
|
|
|
ASSERT_EQ(Walker->getClobberingMemoryAccess(StoreB),
|
|
|
|
MSSA.getLiveOnEntryDef());
|
|
|
|
auto *StoreAAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA));
|
|
|
|
auto *StoreA2Access = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA2));
|
|
|
|
ASSERT_EQ(Walker->getClobberingMemoryAccess(StoreA2), StoreAAccess);
|
|
|
|
ASSERT_EQ(StoreA2Access->getOptimized(), StoreAAccess);
|
|
|
|
|
|
|
|
auto *StoreBAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreB));
|
|
|
|
ASSERT_LT(StoreAAccess->getID(), StoreBAccess->getID());
|
|
|
|
ASSERT_LT(StoreBAccess->getID(), StoreA2Access->getID());
|
|
|
|
|
|
|
|
auto SortVecByID = [](std::vector<const MemoryDef *> &Defs) {
|
llvm::sort(C.begin(), C.end(), ...) -> llvm::sort(C, ...)
Summary: The convenience wrapper in STLExtras is available since rL342102.
Reviewers: dblaikie, javed.absar, JDevlieghere, andreadb
Subscribers: MatzeB, sanjoy, arsenm, dschuff, mehdi_amini, sdardis, nemanjai, jvesely, nhaehnle, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, javed.absar, gbedwell, jrtc27, mgrang, atanasyan, steven_wu, george.burgess.iv, dexonsmith, kristina, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D52573
llvm-svn: 343163
2018-09-27 04:13:45 +02:00
|
|
|
llvm::sort(Defs, [](const MemoryDef *LHS, const MemoryDef *RHS) {
|
|
|
|
return LHS->getID() < RHS->getID();
|
|
|
|
});
|
2018-08-23 23:29:11 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
auto SortedUserList = [&](const MemoryDef *MD) {
|
|
|
|
std::vector<const MemoryDef *> Result;
|
|
|
|
transform(MD->users(), std::back_inserter(Result),
|
|
|
|
[](const User *U) { return cast<MemoryDef>(U); });
|
|
|
|
SortVecByID(Result);
|
|
|
|
return Result;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Use std::vectors, since they have nice pretty-printing if the test fails.
|
|
|
|
// Parens are necessary because EXPECT_EQ is a macro, and we have commas in
|
|
|
|
// our init lists...
|
|
|
|
EXPECT_EQ(SortedUserList(StoreAAccess),
|
|
|
|
(std::vector<const MemoryDef *>{StoreBAccess, StoreA2Access}));
|
|
|
|
|
|
|
|
EXPECT_EQ(SortedUserList(StoreBAccess),
|
|
|
|
std::vector<const MemoryDef *>{StoreA2Access});
|
|
|
|
|
|
|
|
// StoreAAccess should be present twice, since it uses liveOnEntry for both
|
|
|
|
// its defining and optimized accesses. This is a bit awkward, and is not
|
|
|
|
// relied upon anywhere at the moment. If this is painful, we can fix it.
|
|
|
|
EXPECT_EQ(SortedUserList(cast<MemoryDef>(MSSA.getLiveOnEntryDef())),
|
|
|
|
(std::vector<const MemoryDef *>{StoreAAccess, StoreAAccess,
|
|
|
|
StoreBAccess}));
|
|
|
|
}
|
2018-09-10 22:13:01 +02:00
|
|
|
|
|
|
|
// entry
|
|
|
|
// |
|
|
|
|
// header
|
|
|
|
// / \
|
|
|
|
// body |
|
|
|
|
// \ /
|
|
|
|
// exit
|
|
|
|
// header:
|
|
|
|
// ; 1 = MemoryDef(liveOnEntry)
|
|
|
|
// body:
|
|
|
|
// ; 2 = MemoryDef(1)
|
|
|
|
// exit:
|
|
|
|
// ; 3 = MemoryPhi({body, 2}, {header, 1})
|
|
|
|
// ; 4 = MemoryDef(3); optimized to 3, cannot optimize thorugh phi.
|
|
|
|
// Insert edge: entry -> exit, check mssa Update is correct.
|
|
|
|
TEST_F(MemorySSATest, TestAddedEdgeToBlockWithPhiNotOpt) {
|
|
|
|
F = Function::Create(
|
|
|
|
FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
|
|
|
|
GlobalValue::ExternalLinkage, "F", &M);
|
|
|
|
Argument *PointerArg = &*F->arg_begin();
|
|
|
|
BasicBlock *Entry(BasicBlock::Create(C, "entry", F));
|
|
|
|
BasicBlock *Header(BasicBlock::Create(C, "header", F));
|
|
|
|
BasicBlock *Body(BasicBlock::Create(C, "body", F));
|
|
|
|
BasicBlock *Exit(BasicBlock::Create(C, "exit", F));
|
|
|
|
B.SetInsertPoint(Entry);
|
|
|
|
BranchInst::Create(Header, Entry);
|
|
|
|
B.SetInsertPoint(Header);
|
|
|
|
B.CreateStore(B.getInt8(16), PointerArg);
|
|
|
|
B.CreateCondBr(B.getTrue(), Exit, Body);
|
|
|
|
B.SetInsertPoint(Body);
|
|
|
|
B.CreateStore(B.getInt8(16), PointerArg);
|
|
|
|
BranchInst::Create(Exit, Body);
|
|
|
|
B.SetInsertPoint(Exit);
|
|
|
|
StoreInst *S1 = B.CreateStore(B.getInt8(16), PointerArg);
|
|
|
|
|
|
|
|
setupAnalyses();
|
|
|
|
MemorySSA &MSSA = *Analyses->MSSA;
|
|
|
|
MemorySSAWalker *Walker = Analyses->Walker;
|
|
|
|
std::unique_ptr<MemorySSAUpdater> MSSAU =
|
2019-08-15 17:54:37 +02:00
|
|
|
std::make_unique<MemorySSAUpdater>(&MSSA);
|
2018-09-10 22:13:01 +02:00
|
|
|
|
|
|
|
MemoryPhi *Phi = MSSA.getMemoryAccess(Exit);
|
|
|
|
EXPECT_EQ(Phi, Walker->getClobberingMemoryAccess(S1));
|
|
|
|
|
|
|
|
// Alter CFG, add edge: entry -> exit
|
|
|
|
Entry->getTerminator()->eraseFromParent();
|
|
|
|
B.SetInsertPoint(Entry);
|
|
|
|
B.CreateCondBr(B.getTrue(), Header, Exit);
|
|
|
|
SmallVector<CFGUpdate, 1> Updates;
|
|
|
|
Updates.push_back({cfg::UpdateKind::Insert, Entry, Exit});
|
|
|
|
Analyses->DT.applyUpdates(Updates);
|
|
|
|
MSSAU->applyInsertUpdates(Updates, Analyses->DT);
|
|
|
|
EXPECT_EQ(Phi, Walker->getClobberingMemoryAccess(S1));
|
|
|
|
}
|
|
|
|
|
|
|
|
// entry
|
|
|
|
// |
|
|
|
|
// header
|
|
|
|
// / \
|
|
|
|
// body |
|
|
|
|
// \ /
|
|
|
|
// exit
|
|
|
|
// header:
|
|
|
|
// ; 1 = MemoryDef(liveOnEntry)
|
|
|
|
// body:
|
|
|
|
// ; 2 = MemoryDef(1)
|
|
|
|
// exit:
|
|
|
|
// ; 3 = MemoryPhi({body, 2}, {header, 1})
|
|
|
|
// ; 4 = MemoryDef(3); optimize this to 1 now, added edge should invalidate
|
|
|
|
// the optimized access.
|
|
|
|
// Insert edge: entry -> exit, check mssa Update is correct.
|
|
|
|
TEST_F(MemorySSATest, TestAddedEdgeToBlockWithPhiOpt) {
|
|
|
|
F = Function::Create(
|
|
|
|
FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
|
|
|
|
GlobalValue::ExternalLinkage, "F", &M);
|
|
|
|
Argument *PointerArg = &*F->arg_begin();
|
|
|
|
Type *Int8 = Type::getInt8Ty(C);
|
|
|
|
BasicBlock *Entry(BasicBlock::Create(C, "entry", F));
|
|
|
|
BasicBlock *Header(BasicBlock::Create(C, "header", F));
|
|
|
|
BasicBlock *Body(BasicBlock::Create(C, "body", F));
|
|
|
|
BasicBlock *Exit(BasicBlock::Create(C, "exit", F));
|
|
|
|
|
|
|
|
B.SetInsertPoint(Entry);
|
|
|
|
Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
|
|
|
|
BranchInst::Create(Header, Entry);
|
|
|
|
|
|
|
|
B.SetInsertPoint(Header);
|
|
|
|
StoreInst *S1 = B.CreateStore(B.getInt8(16), PointerArg);
|
|
|
|
B.CreateCondBr(B.getTrue(), Exit, Body);
|
|
|
|
|
|
|
|
B.SetInsertPoint(Body);
|
|
|
|
B.CreateStore(ConstantInt::get(Int8, 0), Alloca);
|
|
|
|
BranchInst::Create(Exit, Body);
|
|
|
|
|
|
|
|
B.SetInsertPoint(Exit);
|
|
|
|
StoreInst *S2 = B.CreateStore(B.getInt8(16), PointerArg);
|
|
|
|
|
|
|
|
setupAnalyses();
|
|
|
|
MemorySSA &MSSA = *Analyses->MSSA;
|
|
|
|
MemorySSAWalker *Walker = Analyses->Walker;
|
|
|
|
std::unique_ptr<MemorySSAUpdater> MSSAU =
|
2019-08-15 17:54:37 +02:00
|
|
|
std::make_unique<MemorySSAUpdater>(&MSSA);
|
2018-09-10 22:13:01 +02:00
|
|
|
|
|
|
|
MemoryDef *DefS1 = cast<MemoryDef>(MSSA.getMemoryAccess(S1));
|
|
|
|
EXPECT_EQ(DefS1, Walker->getClobberingMemoryAccess(S2));
|
|
|
|
|
|
|
|
// Alter CFG, add edge: entry -> exit
|
|
|
|
Entry->getTerminator()->eraseFromParent();
|
|
|
|
B.SetInsertPoint(Entry);
|
|
|
|
B.CreateCondBr(B.getTrue(), Header, Exit);
|
|
|
|
SmallVector<CFGUpdate, 1> Updates;
|
|
|
|
Updates.push_back({cfg::UpdateKind::Insert, Entry, Exit});
|
|
|
|
Analyses->DT.applyUpdates(Updates);
|
|
|
|
MSSAU->applyInsertUpdates(Updates, Analyses->DT);
|
|
|
|
|
|
|
|
MemoryPhi *Phi = MSSA.getMemoryAccess(Exit);
|
|
|
|
EXPECT_EQ(Phi, Walker->getClobberingMemoryAccess(S2));
|
|
|
|
}
|
|
|
|
|
|
|
|
// entry
|
|
|
|
// / |
|
|
|
|
// a |
|
|
|
|
// / \ |
|
|
|
|
// b c f
|
|
|
|
// \ / |
|
|
|
|
// d |
|
|
|
|
// \ /
|
|
|
|
// e
|
|
|
|
// f:
|
|
|
|
// ; 1 = MemoryDef(liveOnEntry)
|
|
|
|
// e:
|
|
|
|
// ; 2 = MemoryPhi({d, liveOnEntry}, {f, 1})
|
|
|
|
//
|
|
|
|
// Insert edge: f -> c, check update is correct.
|
|
|
|
// After update:
|
|
|
|
// f:
|
|
|
|
// ; 1 = MemoryDef(liveOnEntry)
|
|
|
|
// c:
|
|
|
|
// ; 3 = MemoryPhi({a, liveOnEntry}, {f, 1})
|
|
|
|
// d:
|
|
|
|
// ; 4 = MemoryPhi({b, liveOnEntry}, {c, 3})
|
|
|
|
// e:
|
|
|
|
// ; 2 = MemoryPhi({d, 4}, {f, 1})
|
|
|
|
TEST_F(MemorySSATest, TestAddedEdgeToBlockWithNoPhiAddNewPhis) {
|
|
|
|
F = Function::Create(
|
|
|
|
FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
|
|
|
|
GlobalValue::ExternalLinkage, "F", &M);
|
|
|
|
Argument *PointerArg = &*F->arg_begin();
|
|
|
|
BasicBlock *Entry(BasicBlock::Create(C, "entry", F));
|
|
|
|
BasicBlock *ABlock(BasicBlock::Create(C, "a", F));
|
|
|
|
BasicBlock *BBlock(BasicBlock::Create(C, "b", F));
|
|
|
|
BasicBlock *CBlock(BasicBlock::Create(C, "c", F));
|
|
|
|
BasicBlock *DBlock(BasicBlock::Create(C, "d", F));
|
|
|
|
BasicBlock *EBlock(BasicBlock::Create(C, "e", F));
|
|
|
|
BasicBlock *FBlock(BasicBlock::Create(C, "f", F));
|
|
|
|
|
|
|
|
B.SetInsertPoint(Entry);
|
|
|
|
B.CreateCondBr(B.getTrue(), ABlock, FBlock);
|
|
|
|
B.SetInsertPoint(ABlock);
|
|
|
|
B.CreateCondBr(B.getTrue(), BBlock, CBlock);
|
|
|
|
B.SetInsertPoint(BBlock);
|
|
|
|
BranchInst::Create(DBlock, BBlock);
|
|
|
|
B.SetInsertPoint(CBlock);
|
|
|
|
BranchInst::Create(DBlock, CBlock);
|
|
|
|
B.SetInsertPoint(DBlock);
|
|
|
|
BranchInst::Create(EBlock, DBlock);
|
|
|
|
B.SetInsertPoint(FBlock);
|
|
|
|
B.CreateStore(B.getInt8(16), PointerArg);
|
|
|
|
BranchInst::Create(EBlock, FBlock);
|
|
|
|
|
|
|
|
setupAnalyses();
|
|
|
|
MemorySSA &MSSA = *Analyses->MSSA;
|
|
|
|
std::unique_ptr<MemorySSAUpdater> MSSAU =
|
2019-08-15 17:54:37 +02:00
|
|
|
std::make_unique<MemorySSAUpdater>(&MSSA);
|
2018-09-10 22:13:01 +02:00
|
|
|
|
|
|
|
// Alter CFG, add edge: f -> c
|
|
|
|
FBlock->getTerminator()->eraseFromParent();
|
|
|
|
B.SetInsertPoint(FBlock);
|
|
|
|
B.CreateCondBr(B.getTrue(), CBlock, EBlock);
|
|
|
|
SmallVector<CFGUpdate, 1> Updates;
|
|
|
|
Updates.push_back({cfg::UpdateKind::Insert, FBlock, CBlock});
|
|
|
|
Analyses->DT.applyUpdates(Updates);
|
|
|
|
MSSAU->applyInsertUpdates(Updates, Analyses->DT);
|
|
|
|
|
|
|
|
MemoryPhi *MPC = MSSA.getMemoryAccess(CBlock);
|
|
|
|
EXPECT_NE(MPC, nullptr);
|
|
|
|
MemoryPhi *MPD = MSSA.getMemoryAccess(DBlock);
|
|
|
|
EXPECT_NE(MPD, nullptr);
|
|
|
|
MemoryPhi *MPE = MSSA.getMemoryAccess(EBlock);
|
|
|
|
EXPECT_EQ(MPD, MPE->getIncomingValueForBlock(DBlock));
|
|
|
|
}
|