mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-26 04:32:44 +01:00
[SCEV] Move ScalarEvolutionExpander.cpp to Transforms/Utils (NFC).
SCEVExpander modifies the underlying function so it is more suitable in Transforms/Utils, rather than Analysis. This allows using other transform utils in SCEVExpander. Reviewers: sanjoy.google, efriedma, reames Reviewed By: sanjoy.google Differential Revision: https://reviews.llvm.org/D71537
This commit is contained in:
parent
6d1c7ae7b3
commit
088559d18d
@ -1,4 +1,4 @@
|
||||
//===---- llvm/Analysis/ScalarEvolutionExpander.h - SCEV Exprs --*- C++ -*-===//
|
||||
//=== Transforms/Utils/ScalarEvolutionExpander.h - SCEV Expander *- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
@ -82,7 +82,6 @@ add_llvm_component_library(LLVMAnalysis
|
||||
RegionPrinter.cpp
|
||||
ScalarEvolution.cpp
|
||||
ScalarEvolutionAliasAnalysis.cpp
|
||||
ScalarEvolutionExpander.cpp
|
||||
ScalarEvolutionNormalization.cpp
|
||||
StackSafetyAnalysis.cpp
|
||||
SyncDependenceAnalysis.cpp
|
||||
|
@ -30,7 +30,6 @@
|
||||
#include "llvm/Analysis/MemoryLocation.h"
|
||||
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
|
||||
#include "llvm/Analysis/ScalarEvolution.h"
|
||||
#include "llvm/Analysis/ScalarEvolutionExpander.h"
|
||||
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
|
||||
#include "llvm/Analysis/TargetLibraryInfo.h"
|
||||
#include "llvm/Analysis/ValueTracking.h"
|
||||
@ -59,6 +58,7 @@
|
||||
#include "llvm/Support/Debug.h"
|
||||
#include "llvm/Support/ErrorHandling.h"
|
||||
#include "llvm/Support/raw_ostream.h"
|
||||
#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
|
||||
#include <algorithm>
|
||||
#include <cassert>
|
||||
#include <cstdint>
|
||||
|
@ -20,7 +20,6 @@
|
||||
#include "llvm/Analysis/LoopInfo.h"
|
||||
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
|
||||
#include "llvm/Analysis/ScalarEvolution.h"
|
||||
#include "llvm/Analysis/ScalarEvolutionExpander.h"
|
||||
#include "llvm/Analysis/TargetTransformInfo.h"
|
||||
#include "llvm/CodeGen/Passes.h"
|
||||
#include "llvm/CodeGen/TargetPassConfig.h"
|
||||
@ -43,6 +42,7 @@
|
||||
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
|
||||
#include "llvm/Transforms/Utils/Local.h"
|
||||
#include "llvm/Transforms/Utils/LoopUtils.h"
|
||||
#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
|
||||
|
||||
#define DEBUG_TYPE "hardware-loops"
|
||||
|
||||
|
@ -26,7 +26,6 @@
|
||||
#include "llvm/Analysis/LoopInfo.h"
|
||||
#include "llvm/Analysis/LoopPass.h"
|
||||
#include "llvm/Analysis/ScalarEvolution.h"
|
||||
#include "llvm/Analysis/ScalarEvolutionExpander.h"
|
||||
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
|
||||
#include "llvm/Analysis/TargetTransformInfo.h"
|
||||
#include "llvm/CodeGen/TargetPassConfig.h"
|
||||
@ -36,6 +35,7 @@
|
||||
#include "llvm/IR/PatternMatch.h"
|
||||
#include "llvm/Support/Debug.h"
|
||||
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
|
||||
#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
|
||||
|
||||
using namespace llvm;
|
||||
|
||||
|
@ -20,7 +20,6 @@
|
||||
#include "llvm/Analysis/LoopPass.h"
|
||||
#include "llvm/Analysis/MemoryLocation.h"
|
||||
#include "llvm/Analysis/ScalarEvolution.h"
|
||||
#include "llvm/Analysis/ScalarEvolutionExpander.h"
|
||||
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
|
||||
#include "llvm/Analysis/TargetLibraryInfo.h"
|
||||
#include "llvm/Analysis/ValueTracking.h"
|
||||
@ -57,6 +56,7 @@
|
||||
#include "llvm/Transforms/Scalar.h"
|
||||
#include "llvm/Transforms/Utils.h"
|
||||
#include "llvm/Transforms/Utils/Local.h"
|
||||
#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
#include <cassert>
|
||||
|
@ -53,7 +53,6 @@
|
||||
#include "llvm/ADT/Statistic.h"
|
||||
#include "llvm/Analysis/LoopInfo.h"
|
||||
#include "llvm/Analysis/ScalarEvolution.h"
|
||||
#include "llvm/Analysis/ScalarEvolutionExpander.h"
|
||||
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
|
||||
#include "llvm/IR/BasicBlock.h"
|
||||
#include "llvm/IR/CFG.h"
|
||||
@ -74,6 +73,7 @@
|
||||
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
|
||||
#include "llvm/Transforms/Utils/Local.h"
|
||||
#include "llvm/Transforms/Utils/LoopUtils.h"
|
||||
#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
|
||||
#include <cassert>
|
||||
#include <iterator>
|
||||
#include <utility>
|
||||
|
@ -39,7 +39,6 @@
|
||||
#include "llvm/Analysis/LoopInfo.h"
|
||||
#include "llvm/Analysis/LoopPass.h"
|
||||
#include "llvm/Analysis/ScalarEvolution.h"
|
||||
#include "llvm/Analysis/ScalarEvolutionExpander.h"
|
||||
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
|
||||
#include "llvm/Analysis/TargetLibraryInfo.h"
|
||||
#include "llvm/Analysis/TargetTransformInfo.h"
|
||||
@ -81,6 +80,7 @@
|
||||
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
|
||||
#include "llvm/Transforms/Utils/Local.h"
|
||||
#include "llvm/Transforms/Utils/LoopUtils.h"
|
||||
#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
|
||||
#include "llvm/Transforms/Utils/SimplifyIndVar.h"
|
||||
#include <cassert>
|
||||
#include <cstdint>
|
||||
|
@ -56,7 +56,6 @@
|
||||
#include "llvm/Analysis/LoopInfo.h"
|
||||
#include "llvm/Analysis/LoopPass.h"
|
||||
#include "llvm/Analysis/ScalarEvolution.h"
|
||||
#include "llvm/Analysis/ScalarEvolutionExpander.h"
|
||||
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
|
||||
#include "llvm/IR/BasicBlock.h"
|
||||
#include "llvm/IR/CFG.h"
|
||||
@ -87,6 +86,7 @@
|
||||
#include "llvm/Transforms/Utils/Cloning.h"
|
||||
#include "llvm/Transforms/Utils/LoopSimplify.h"
|
||||
#include "llvm/Transforms/Utils/LoopUtils.h"
|
||||
#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
|
||||
#include "llvm/Transforms/Utils/ValueMapper.h"
|
||||
#include <algorithm>
|
||||
#include <cassert>
|
||||
|
@ -21,7 +21,6 @@
|
||||
#include "llvm/Analysis/LoopInfo.h"
|
||||
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
|
||||
#include "llvm/Analysis/ScalarEvolution.h"
|
||||
#include "llvm/Analysis/ScalarEvolutionExpander.h"
|
||||
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
|
||||
#include "llvm/Analysis/TargetTransformInfo.h"
|
||||
#include "llvm/IR/CFG.h"
|
||||
@ -32,6 +31,7 @@
|
||||
#include "llvm/Support/Debug.h"
|
||||
#include "llvm/Transforms/Scalar.h"
|
||||
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
|
||||
#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
|
||||
#include "llvm/Transforms/Utils/ValueMapper.h"
|
||||
using namespace llvm;
|
||||
|
||||
|
@ -53,7 +53,6 @@
|
||||
#include "llvm/Analysis/MemoryLocation.h"
|
||||
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
|
||||
#include "llvm/Analysis/ScalarEvolution.h"
|
||||
#include "llvm/Analysis/ScalarEvolutionExpander.h"
|
||||
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
|
||||
#include "llvm/Analysis/TargetLibraryInfo.h"
|
||||
#include "llvm/Analysis/TargetTransformInfo.h"
|
||||
@ -91,6 +90,7 @@
|
||||
#include "llvm/Transforms/Utils/BuildLibCalls.h"
|
||||
#include "llvm/Transforms/Utils/Local.h"
|
||||
#include "llvm/Transforms/Utils/LoopUtils.h"
|
||||
#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
|
||||
#include <algorithm>
|
||||
#include <cassert>
|
||||
#include <cstdint>
|
||||
|
@ -38,7 +38,6 @@
|
||||
#include "llvm/Analysis/MemorySSA.h"
|
||||
#include "llvm/Analysis/ProfileSummaryInfo.h"
|
||||
#include "llvm/Analysis/ScalarEvolution.h"
|
||||
#include "llvm/Analysis/ScalarEvolutionExpander.h"
|
||||
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
|
||||
#include "llvm/Analysis/TargetLibraryInfo.h"
|
||||
#include "llvm/Analysis/TargetTransformInfo.h"
|
||||
@ -58,6 +57,7 @@
|
||||
#include "llvm/Transforms/Scalar.h"
|
||||
#include "llvm/Transforms/Utils.h"
|
||||
#include "llvm/Transforms/Utils/LoopVersioning.h"
|
||||
#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
|
||||
#include "llvm/Transforms/Utils/SizeOpts.h"
|
||||
#include <algorithm>
|
||||
#include <cassert>
|
||||
|
@ -184,7 +184,6 @@
|
||||
#include "llvm/Analysis/LoopInfo.h"
|
||||
#include "llvm/Analysis/LoopPass.h"
|
||||
#include "llvm/Analysis/ScalarEvolution.h"
|
||||
#include "llvm/Analysis/ScalarEvolutionExpander.h"
|
||||
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
|
||||
#include "llvm/IR/Function.h"
|
||||
#include "llvm/IR/GlobalValue.h"
|
||||
@ -199,6 +198,7 @@
|
||||
#include "llvm/Transforms/Utils/GuardUtils.h"
|
||||
#include "llvm/Transforms/Utils/Local.h"
|
||||
#include "llvm/Transforms/Utils/LoopUtils.h"
|
||||
#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
|
||||
|
||||
#define DEBUG_TYPE "loop-predication"
|
||||
|
||||
|
@ -24,7 +24,6 @@
|
||||
#include "llvm/Analysis/LoopInfo.h"
|
||||
#include "llvm/Analysis/LoopPass.h"
|
||||
#include "llvm/Analysis/ScalarEvolution.h"
|
||||
#include "llvm/Analysis/ScalarEvolutionExpander.h"
|
||||
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
|
||||
#include "llvm/Analysis/TargetLibraryInfo.h"
|
||||
#include "llvm/Analysis/ValueTracking.h"
|
||||
@ -55,6 +54,7 @@
|
||||
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
|
||||
#include "llvm/Transforms/Utils/Local.h"
|
||||
#include "llvm/Transforms/Utils/LoopUtils.h"
|
||||
#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
|
||||
#include <cassert>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
|
@ -70,7 +70,6 @@
|
||||
#include "llvm/Analysis/LoopInfo.h"
|
||||
#include "llvm/Analysis/LoopPass.h"
|
||||
#include "llvm/Analysis/ScalarEvolution.h"
|
||||
#include "llvm/Analysis/ScalarEvolutionExpander.h"
|
||||
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
|
||||
#include "llvm/Analysis/ScalarEvolutionNormalization.h"
|
||||
#include "llvm/Analysis/TargetTransformInfo.h"
|
||||
@ -109,6 +108,7 @@
|
||||
#include "llvm/Transforms/Utils.h"
|
||||
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
|
||||
#include "llvm/Transforms/Utils/Local.h"
|
||||
#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
|
||||
#include <algorithm>
|
||||
#include <cassert>
|
||||
#include <cstddef>
|
||||
|
@ -52,6 +52,7 @@ add_llvm_component_library(LLVMTransformUtils
|
||||
SSAUpdater.cpp
|
||||
SSAUpdaterBulk.cpp
|
||||
SanitizerStats.cpp
|
||||
ScalarEvolutionExpander.cpp
|
||||
SimplifyCFG.cpp
|
||||
SimplifyIndVar.cpp
|
||||
SimplifyLibCalls.cpp
|
||||
|
@ -25,7 +25,6 @@
|
||||
#include "llvm/Analysis/AliasAnalysis.h"
|
||||
#include "llvm/Analysis/LoopIterator.h"
|
||||
#include "llvm/Analysis/ScalarEvolution.h"
|
||||
#include "llvm/Analysis/ScalarEvolutionExpander.h"
|
||||
#include "llvm/IR/BasicBlock.h"
|
||||
#include "llvm/IR/Dominators.h"
|
||||
#include "llvm/IR/Metadata.h"
|
||||
@ -37,6 +36,7 @@
|
||||
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
|
||||
#include "llvm/Transforms/Utils/Cloning.h"
|
||||
#include "llvm/Transforms/Utils/LoopUtils.h"
|
||||
#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
|
||||
#include "llvm/Transforms/Utils/UnrollLoop.h"
|
||||
#include <algorithm>
|
||||
|
||||
|
@ -15,13 +15,13 @@
|
||||
#include "llvm/Transforms/Utils/LoopVersioning.h"
|
||||
#include "llvm/Analysis/LoopAccessAnalysis.h"
|
||||
#include "llvm/Analysis/LoopInfo.h"
|
||||
#include "llvm/Analysis/ScalarEvolutionExpander.h"
|
||||
#include "llvm/IR/Dominators.h"
|
||||
#include "llvm/IR/MDBuilder.h"
|
||||
#include "llvm/InitializePasses.h"
|
||||
#include "llvm/Support/CommandLine.h"
|
||||
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
|
||||
#include "llvm/Transforms/Utils/Cloning.h"
|
||||
#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
|
||||
|
||||
using namespace llvm;
|
||||
|
||||
|
@ -12,7 +12,7 @@
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "llvm/Analysis/ScalarEvolutionExpander.h"
|
||||
#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
|
||||
#include "llvm/ADT/STLExtras.h"
|
||||
#include "llvm/ADT/SmallSet.h"
|
||||
#include "llvm/Analysis/InstructionSimplify.h"
|
@ -17,7 +17,6 @@
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/ADT/Statistic.h"
|
||||
#include "llvm/Analysis/LoopInfo.h"
|
||||
#include "llvm/Analysis/ScalarEvolutionExpander.h"
|
||||
#include "llvm/IR/DataLayout.h"
|
||||
#include "llvm/IR/Dominators.h"
|
||||
#include "llvm/IR/IRBuilder.h"
|
||||
@ -27,6 +26,7 @@
|
||||
#include "llvm/Support/Debug.h"
|
||||
#include "llvm/Support/raw_ostream.h"
|
||||
#include "llvm/Transforms/Utils/Local.h"
|
||||
#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
|
||||
|
||||
using namespace llvm;
|
||||
|
||||
|
@ -91,7 +91,6 @@
|
||||
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
|
||||
#include "llvm/Analysis/ProfileSummaryInfo.h"
|
||||
#include "llvm/Analysis/ScalarEvolution.h"
|
||||
#include "llvm/Analysis/ScalarEvolutionExpander.h"
|
||||
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
|
||||
#include "llvm/Analysis/TargetLibraryInfo.h"
|
||||
#include "llvm/Analysis/TargetTransformInfo.h"
|
||||
@ -137,6 +136,7 @@
|
||||
#include "llvm/Transforms/Utils/LoopSimplify.h"
|
||||
#include "llvm/Transforms/Utils/LoopUtils.h"
|
||||
#include "llvm/Transforms/Utils/LoopVersioning.h"
|
||||
#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
|
||||
#include "llvm/Transforms/Utils/SizeOpts.h"
|
||||
#include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
|
||||
#include <algorithm>
|
||||
|
@ -9,8 +9,8 @@
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/Analysis/AssumptionCache.h"
|
||||
#include "llvm/Analysis/LoopInfo.h"
|
||||
#include "llvm/Analysis/ScalarEvolutionExpander.h"
|
||||
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
|
||||
#include "llvm/Analysis/ScalarEvolutionNormalization.h"
|
||||
#include "llvm/Analysis/TargetLibraryInfo.h"
|
||||
#include "llvm/AsmParser/Parser.h"
|
||||
#include "llvm/IR/Constants.h"
|
||||
@ -141,72 +141,6 @@ TEST_F(ScalarEvolutionsTest, SimplifiedPHI) {
|
||||
EXPECT_EQ(S1, S2);
|
||||
}
|
||||
|
||||
TEST_F(ScalarEvolutionsTest, ExpandPtrTypeSCEV) {
|
||||
// It is to test the fix for PR30213. It exercises the branch in scev
|
||||
// expansion when the value in ValueOffsetPair is a ptr and the offset
|
||||
// is not divisible by the elem type size of value.
|
||||
auto *I8Ty = Type::getInt8Ty(Context);
|
||||
auto *I8PtrTy = Type::getInt8PtrTy(Context);
|
||||
auto *I32Ty = Type::getInt32Ty(Context);
|
||||
auto *I32PtrTy = Type::getInt32PtrTy(Context);
|
||||
FunctionType *FTy =
|
||||
FunctionType::get(Type::getVoidTy(Context), std::vector<Type *>(), false);
|
||||
Function *F = Function::Create(FTy, Function::ExternalLinkage, "f", M);
|
||||
BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
|
||||
BasicBlock *LoopBB = BasicBlock::Create(Context, "loop", F);
|
||||
BasicBlock *ExitBB = BasicBlock::Create(Context, "exit", F);
|
||||
BranchInst::Create(LoopBB, EntryBB);
|
||||
ReturnInst::Create(Context, nullptr, ExitBB);
|
||||
|
||||
// loop: ; preds = %loop, %entry
|
||||
// %alloca = alloca i32
|
||||
// %gep0 = getelementptr i32, i32* %alloca, i32 1
|
||||
// %bitcast1 = bitcast i32* %gep0 to i8*
|
||||
// %gep1 = getelementptr i8, i8* %bitcast1, i32 1
|
||||
// %gep2 = getelementptr i8, i8* undef, i32 1
|
||||
// %cmp = icmp ult i8* undef, %bitcast1
|
||||
// %select = select i1 %cmp, i8* %gep1, i8* %gep2
|
||||
// %bitcast2 = bitcast i8* %select to i32*
|
||||
// br i1 undef, label %loop, label %exit
|
||||
|
||||
const DataLayout &DL = F->getParent()->getDataLayout();
|
||||
BranchInst *Br = BranchInst::Create(
|
||||
LoopBB, ExitBB, UndefValue::get(Type::getInt1Ty(Context)), LoopBB);
|
||||
AllocaInst *Alloca = new AllocaInst(I32Ty, DL.getAllocaAddrSpace(),
|
||||
"alloca", Br);
|
||||
ConstantInt *Ci32 = ConstantInt::get(Context, APInt(32, 1));
|
||||
GetElementPtrInst *Gep0 =
|
||||
GetElementPtrInst::Create(I32Ty, Alloca, Ci32, "gep0", Br);
|
||||
CastInst *CastA =
|
||||
CastInst::CreateBitOrPointerCast(Gep0, I8PtrTy, "bitcast1", Br);
|
||||
GetElementPtrInst *Gep1 =
|
||||
GetElementPtrInst::Create(I8Ty, CastA, Ci32, "gep1", Br);
|
||||
GetElementPtrInst *Gep2 = GetElementPtrInst::Create(
|
||||
I8Ty, UndefValue::get(I8PtrTy), Ci32, "gep2", Br);
|
||||
CmpInst *Cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_ULT,
|
||||
UndefValue::get(I8PtrTy), CastA, "cmp", Br);
|
||||
SelectInst *Sel = SelectInst::Create(Cmp, Gep1, Gep2, "select", Br);
|
||||
CastInst *CastB =
|
||||
CastInst::CreateBitOrPointerCast(Sel, I32PtrTy, "bitcast2", Br);
|
||||
|
||||
ScalarEvolution SE = buildSE(*F);
|
||||
auto *S = SE.getSCEV(CastB);
|
||||
SCEVExpander Exp(SE, M.getDataLayout(), "expander");
|
||||
Value *V =
|
||||
Exp.expandCodeFor(cast<SCEVAddExpr>(S)->getOperand(1), nullptr, Br);
|
||||
|
||||
// Expect the expansion code contains:
|
||||
// %0 = bitcast i32* %bitcast2 to i8*
|
||||
// %uglygep = getelementptr i8, i8* %0, i64 -1
|
||||
// %1 = bitcast i8* %uglygep to i32*
|
||||
EXPECT_TRUE(isa<BitCastInst>(V));
|
||||
Instruction *Gep = cast<Instruction>(V)->getPrevNode();
|
||||
EXPECT_TRUE(isa<GetElementPtrInst>(Gep));
|
||||
EXPECT_TRUE(isa<ConstantInt>(Gep->getOperand(1)));
|
||||
EXPECT_EQ(cast<ConstantInt>(Gep->getOperand(1))->getSExtValue(), -1);
|
||||
EXPECT_TRUE(isa<BitCastInst>(Gep->getPrevNode()));
|
||||
}
|
||||
|
||||
static Instruction *getInstructionByName(Function &F, StringRef Name) {
|
||||
for (auto &I : instructions(F))
|
||||
if (I.getName() == Name)
|
||||
@ -719,83 +653,6 @@ TEST_F(ScalarEvolutionsTest, SCEVZeroExtendExpr) {
|
||||
SE.getZeroExtendExpr(S, I128Ty);
|
||||
}
|
||||
|
||||
// Make sure that SCEV doesn't introduce illegal ptrtoint/inttoptr instructions
|
||||
TEST_F(ScalarEvolutionsTest, SCEVZeroExtendExprNonIntegral) {
|
||||
/*
|
||||
* Create the following code:
|
||||
* func(i64 addrspace(10)* %arg)
|
||||
* top:
|
||||
* br label %L.ph
|
||||
* L.ph:
|
||||
* br label %L
|
||||
* L:
|
||||
* %phi = phi i64 [i64 0, %L.ph], [ %add, %L2 ]
|
||||
* %add = add i64 %phi2, 1
|
||||
* br i1 undef, label %post, label %L2
|
||||
* post:
|
||||
* %gepbase = getelementptr i64 addrspace(10)* %arg, i64 1
|
||||
* #= %gep = getelementptr i64 addrspace(10)* %gepbase, i64 %add =#
|
||||
* ret void
|
||||
*
|
||||
* We will create the appropriate SCEV expression for %gep and expand it,
|
||||
* then check that no inttoptr/ptrtoint instructions got inserted.
|
||||
*/
|
||||
|
||||
// Create a module with non-integral pointers in it's datalayout
|
||||
Module NIM("nonintegral", Context);
|
||||
std::string DataLayout = M.getDataLayoutStr();
|
||||
if (!DataLayout.empty())
|
||||
DataLayout += "-";
|
||||
DataLayout += "ni:10";
|
||||
NIM.setDataLayout(DataLayout);
|
||||
|
||||
Type *T_int1 = Type::getInt1Ty(Context);
|
||||
Type *T_int64 = Type::getInt64Ty(Context);
|
||||
Type *T_pint64 = T_int64->getPointerTo(10);
|
||||
|
||||
FunctionType *FTy =
|
||||
FunctionType::get(Type::getVoidTy(Context), {T_pint64}, false);
|
||||
Function *F = Function::Create(FTy, Function::ExternalLinkage, "foo", NIM);
|
||||
|
||||
Argument *Arg = &*F->arg_begin();
|
||||
|
||||
BasicBlock *Top = BasicBlock::Create(Context, "top", F);
|
||||
BasicBlock *LPh = BasicBlock::Create(Context, "L.ph", F);
|
||||
BasicBlock *L = BasicBlock::Create(Context, "L", F);
|
||||
BasicBlock *Post = BasicBlock::Create(Context, "post", F);
|
||||
|
||||
IRBuilder<> Builder(Top);
|
||||
Builder.CreateBr(LPh);
|
||||
|
||||
Builder.SetInsertPoint(LPh);
|
||||
Builder.CreateBr(L);
|
||||
|
||||
Builder.SetInsertPoint(L);
|
||||
PHINode *Phi = Builder.CreatePHI(T_int64, 2);
|
||||
Value *Add = Builder.CreateAdd(Phi, ConstantInt::get(T_int64, 1), "add");
|
||||
Builder.CreateCondBr(UndefValue::get(T_int1), L, Post);
|
||||
Phi->addIncoming(ConstantInt::get(T_int64, 0), LPh);
|
||||
Phi->addIncoming(Add, L);
|
||||
|
||||
Builder.SetInsertPoint(Post);
|
||||
Value *GepBase =
|
||||
Builder.CreateGEP(T_int64, Arg, ConstantInt::get(T_int64, 1));
|
||||
Instruction *Ret = Builder.CreateRetVoid();
|
||||
|
||||
ScalarEvolution SE = buildSE(*F);
|
||||
auto *AddRec =
|
||||
SE.getAddRecExpr(SE.getUnknown(GepBase), SE.getConstant(T_int64, 1),
|
||||
LI->getLoopFor(L), SCEV::FlagNUW);
|
||||
|
||||
SCEVExpander Exp(SE, NIM.getDataLayout(), "expander");
|
||||
Exp.disableCanonicalMode();
|
||||
Exp.expandCodeFor(AddRec, T_pint64, Ret);
|
||||
|
||||
// Make sure none of the instructions inserted were inttoptr/ptrtoint.
|
||||
// The verifier will check this.
|
||||
EXPECT_FALSE(verifyFunction(*F, &errs()));
|
||||
}
|
||||
|
||||
// Make sure that SCEV invalidates exit limits after invalidating the values it
|
||||
// depends on when we forget a loop.
|
||||
TEST_F(ScalarEvolutionsTest, SCEVExitLimitForgetLoop) {
|
||||
@ -1122,283 +979,6 @@ TEST_F(ScalarEvolutionsTest, SCEVFoldSumOfTruncs) {
|
||||
EXPECT_EQ(Expr, ZeroConst);
|
||||
}
|
||||
|
||||
// Check that we can correctly identify the points at which the SCEV of the
|
||||
// AddRec can be expanded.
|
||||
TEST_F(ScalarEvolutionsTest, SCEVExpanderIsSafeToExpandAt) {
|
||||
/*
|
||||
* Create the following code:
|
||||
* func(i64 addrspace(10)* %arg)
|
||||
* top:
|
||||
* br label %L.ph
|
||||
* L.ph:
|
||||
* br label %L
|
||||
* L:
|
||||
* %phi = phi i64 [i64 0, %L.ph], [ %add, %L2 ]
|
||||
* %add = add i64 %phi2, 1
|
||||
* %cond = icmp slt i64 %add, 1000; then becomes 2000.
|
||||
* br i1 %cond, label %post, label %L2
|
||||
* post:
|
||||
* ret void
|
||||
*
|
||||
*/
|
||||
|
||||
// Create a module with non-integral pointers in it's datalayout
|
||||
Module NIM("nonintegral", Context);
|
||||
std::string DataLayout = M.getDataLayoutStr();
|
||||
if (!DataLayout.empty())
|
||||
DataLayout += "-";
|
||||
DataLayout += "ni:10";
|
||||
NIM.setDataLayout(DataLayout);
|
||||
|
||||
Type *T_int64 = Type::getInt64Ty(Context);
|
||||
Type *T_pint64 = T_int64->getPointerTo(10);
|
||||
|
||||
FunctionType *FTy =
|
||||
FunctionType::get(Type::getVoidTy(Context), {T_pint64}, false);
|
||||
Function *F = Function::Create(FTy, Function::ExternalLinkage, "foo", NIM);
|
||||
|
||||
BasicBlock *Top = BasicBlock::Create(Context, "top", F);
|
||||
BasicBlock *LPh = BasicBlock::Create(Context, "L.ph", F);
|
||||
BasicBlock *L = BasicBlock::Create(Context, "L", F);
|
||||
BasicBlock *Post = BasicBlock::Create(Context, "post", F);
|
||||
|
||||
IRBuilder<> Builder(Top);
|
||||
Builder.CreateBr(LPh);
|
||||
|
||||
Builder.SetInsertPoint(LPh);
|
||||
Builder.CreateBr(L);
|
||||
|
||||
Builder.SetInsertPoint(L);
|
||||
PHINode *Phi = Builder.CreatePHI(T_int64, 2);
|
||||
auto *Add = cast<Instruction>(
|
||||
Builder.CreateAdd(Phi, ConstantInt::get(T_int64, 1), "add"));
|
||||
auto *Limit = ConstantInt::get(T_int64, 1000);
|
||||
auto *Cond = cast<Instruction>(
|
||||
Builder.CreateICmp(ICmpInst::ICMP_SLT, Add, Limit, "cond"));
|
||||
Builder.CreateCondBr(Cond, L, Post);
|
||||
Phi->addIncoming(ConstantInt::get(T_int64, 0), LPh);
|
||||
Phi->addIncoming(Add, L);
|
||||
|
||||
Builder.SetInsertPoint(Post);
|
||||
Builder.CreateRetVoid();
|
||||
|
||||
ScalarEvolution SE = buildSE(*F);
|
||||
const SCEV *S = SE.getSCEV(Phi);
|
||||
EXPECT_TRUE(isa<SCEVAddRecExpr>(S));
|
||||
const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
|
||||
EXPECT_TRUE(AR->isAffine());
|
||||
EXPECT_FALSE(isSafeToExpandAt(AR, Top->getTerminator(), SE));
|
||||
EXPECT_FALSE(isSafeToExpandAt(AR, LPh->getTerminator(), SE));
|
||||
EXPECT_TRUE(isSafeToExpandAt(AR, L->getTerminator(), SE));
|
||||
EXPECT_TRUE(isSafeToExpandAt(AR, Post->getTerminator(), SE));
|
||||
}
|
||||
|
||||
// Check that SCEV expander does not use the nuw instruction
|
||||
// for expansion.
|
||||
TEST_F(ScalarEvolutionsTest, SCEVExpanderNUW) {
|
||||
/*
|
||||
* Create the following code:
|
||||
* func(i64 %a)
|
||||
* entry:
|
||||
* br false, label %exit, label %body
|
||||
* body:
|
||||
* %s1 = add i64 %a, -1
|
||||
* br label %exit
|
||||
* exit:
|
||||
* %s = add nuw i64 %a, -1
|
||||
* ret %s
|
||||
*/
|
||||
|
||||
// Create a module.
|
||||
Module M("SCEVExpanderNUW", Context);
|
||||
|
||||
Type *T_int64 = Type::getInt64Ty(Context);
|
||||
|
||||
FunctionType *FTy =
|
||||
FunctionType::get(Type::getVoidTy(Context), { T_int64 }, false);
|
||||
Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M);
|
||||
Argument *Arg = &*F->arg_begin();
|
||||
ConstantInt *C = ConstantInt::get(Context, APInt(64, -1));
|
||||
|
||||
BasicBlock *Entry = BasicBlock::Create(Context, "entry", F);
|
||||
BasicBlock *Body = BasicBlock::Create(Context, "body", F);
|
||||
BasicBlock *Exit = BasicBlock::Create(Context, "exit", F);
|
||||
|
||||
IRBuilder<> Builder(Entry);
|
||||
ConstantInt *Cond = ConstantInt::get(Context, APInt(1, 0));
|
||||
Builder.CreateCondBr(Cond, Exit, Body);
|
||||
|
||||
Builder.SetInsertPoint(Body);
|
||||
auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
|
||||
Builder.CreateBr(Exit);
|
||||
|
||||
Builder.SetInsertPoint(Exit);
|
||||
auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
|
||||
S2->setHasNoUnsignedWrap(true);
|
||||
auto *R = cast<Instruction>(Builder.CreateRetVoid());
|
||||
|
||||
ScalarEvolution SE = buildSE(*F);
|
||||
const SCEV *S = SE.getSCEV(S1);
|
||||
EXPECT_TRUE(isa<SCEVAddExpr>(S));
|
||||
SCEVExpander Exp(SE, M.getDataLayout(), "expander");
|
||||
auto *I = cast<Instruction>(Exp.expandCodeFor(S, nullptr, R));
|
||||
EXPECT_FALSE(I->hasNoUnsignedWrap());
|
||||
}
|
||||
|
||||
// Check that SCEV expander does not use the nsw instruction
|
||||
// for expansion.
|
||||
TEST_F(ScalarEvolutionsTest, SCEVExpanderNSW) {
|
||||
/*
|
||||
* Create the following code:
|
||||
* func(i64 %a)
|
||||
* entry:
|
||||
* br false, label %exit, label %body
|
||||
* body:
|
||||
* %s1 = add i64 %a, -1
|
||||
* br label %exit
|
||||
* exit:
|
||||
* %s = add nsw i64 %a, -1
|
||||
* ret %s
|
||||
*/
|
||||
|
||||
// Create a module.
|
||||
Module M("SCEVExpanderNSW", Context);
|
||||
|
||||
Type *T_int64 = Type::getInt64Ty(Context);
|
||||
|
||||
FunctionType *FTy =
|
||||
FunctionType::get(Type::getVoidTy(Context), { T_int64 }, false);
|
||||
Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M);
|
||||
Argument *Arg = &*F->arg_begin();
|
||||
ConstantInt *C = ConstantInt::get(Context, APInt(64, -1));
|
||||
|
||||
BasicBlock *Entry = BasicBlock::Create(Context, "entry", F);
|
||||
BasicBlock *Body = BasicBlock::Create(Context, "body", F);
|
||||
BasicBlock *Exit = BasicBlock::Create(Context, "exit", F);
|
||||
|
||||
IRBuilder<> Builder(Entry);
|
||||
ConstantInt *Cond = ConstantInt::get(Context, APInt(1, 0));
|
||||
Builder.CreateCondBr(Cond, Exit, Body);
|
||||
|
||||
Builder.SetInsertPoint(Body);
|
||||
auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
|
||||
Builder.CreateBr(Exit);
|
||||
|
||||
Builder.SetInsertPoint(Exit);
|
||||
auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
|
||||
S2->setHasNoSignedWrap(true);
|
||||
auto *R = cast<Instruction>(Builder.CreateRetVoid());
|
||||
|
||||
ScalarEvolution SE = buildSE(*F);
|
||||
const SCEV *S = SE.getSCEV(S1);
|
||||
EXPECT_TRUE(isa<SCEVAddExpr>(S));
|
||||
SCEVExpander Exp(SE, M.getDataLayout(), "expander");
|
||||
auto *I = cast<Instruction>(Exp.expandCodeFor(S, nullptr, R));
|
||||
EXPECT_FALSE(I->hasNoSignedWrap());
|
||||
}
|
||||
|
||||
// Check that SCEV does not save the SCEV -> V
|
||||
// mapping of SCEV differ from V in NUW flag.
|
||||
TEST_F(ScalarEvolutionsTest, SCEVCacheNUW) {
|
||||
/*
|
||||
* Create the following code:
|
||||
* func(i64 %a)
|
||||
* entry:
|
||||
* %s1 = add i64 %a, -1
|
||||
* %s2 = add nuw i64 %a, -1
|
||||
* br label %exit
|
||||
* exit:
|
||||
* ret %s
|
||||
*/
|
||||
|
||||
// Create a module.
|
||||
Module M("SCEVCacheNUW", Context);
|
||||
|
||||
Type *T_int64 = Type::getInt64Ty(Context);
|
||||
|
||||
FunctionType *FTy =
|
||||
FunctionType::get(Type::getVoidTy(Context), { T_int64 }, false);
|
||||
Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M);
|
||||
Argument *Arg = &*F->arg_begin();
|
||||
ConstantInt *C = ConstantInt::get(Context, APInt(64, -1));
|
||||
|
||||
BasicBlock *Entry = BasicBlock::Create(Context, "entry", F);
|
||||
BasicBlock *Exit = BasicBlock::Create(Context, "exit", F);
|
||||
|
||||
IRBuilder<> Builder(Entry);
|
||||
auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
|
||||
auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
|
||||
S2->setHasNoUnsignedWrap(true);
|
||||
Builder.CreateBr(Exit);
|
||||
|
||||
Builder.SetInsertPoint(Exit);
|
||||
auto *R = cast<Instruction>(Builder.CreateRetVoid());
|
||||
|
||||
ScalarEvolution SE = buildSE(*F);
|
||||
// Get S2 first to move it to cache.
|
||||
const SCEV *SC2 = SE.getSCEV(S2);
|
||||
EXPECT_TRUE(isa<SCEVAddExpr>(SC2));
|
||||
// Now get S1.
|
||||
const SCEV *SC1 = SE.getSCEV(S1);
|
||||
EXPECT_TRUE(isa<SCEVAddExpr>(SC1));
|
||||
// Expand for S1, it should use S1 not S2 in spite S2
|
||||
// first in the cache.
|
||||
SCEVExpander Exp(SE, M.getDataLayout(), "expander");
|
||||
auto *I = cast<Instruction>(Exp.expandCodeFor(SC1, nullptr, R));
|
||||
EXPECT_FALSE(I->hasNoUnsignedWrap());
|
||||
}
|
||||
|
||||
// Check that SCEV does not save the SCEV -> V
|
||||
// mapping of SCEV differ from V in NSW flag.
|
||||
TEST_F(ScalarEvolutionsTest, SCEVCacheNSW) {
|
||||
/*
|
||||
* Create the following code:
|
||||
* func(i64 %a)
|
||||
* entry:
|
||||
* %s1 = add i64 %a, -1
|
||||
* %s2 = add nsw i64 %a, -1
|
||||
* br label %exit
|
||||
* exit:
|
||||
* ret %s
|
||||
*/
|
||||
|
||||
// Create a module.
|
||||
Module M("SCEVCacheNUW", Context);
|
||||
|
||||
Type *T_int64 = Type::getInt64Ty(Context);
|
||||
|
||||
FunctionType *FTy =
|
||||
FunctionType::get(Type::getVoidTy(Context), { T_int64 }, false);
|
||||
Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M);
|
||||
Argument *Arg = &*F->arg_begin();
|
||||
ConstantInt *C = ConstantInt::get(Context, APInt(64, -1));
|
||||
|
||||
BasicBlock *Entry = BasicBlock::Create(Context, "entry", F);
|
||||
BasicBlock *Exit = BasicBlock::Create(Context, "exit", F);
|
||||
|
||||
IRBuilder<> Builder(Entry);
|
||||
auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
|
||||
auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
|
||||
S2->setHasNoSignedWrap(true);
|
||||
Builder.CreateBr(Exit);
|
||||
|
||||
Builder.SetInsertPoint(Exit);
|
||||
auto *R = cast<Instruction>(Builder.CreateRetVoid());
|
||||
|
||||
ScalarEvolution SE = buildSE(*F);
|
||||
// Get S2 first to move it to cache.
|
||||
const SCEV *SC2 = SE.getSCEV(S2);
|
||||
EXPECT_TRUE(isa<SCEVAddExpr>(SC2));
|
||||
// Now get S1.
|
||||
const SCEV *SC1 = SE.getSCEV(S1);
|
||||
EXPECT_TRUE(isa<SCEVAddExpr>(SC1));
|
||||
// Expand for S1, it should use S1 not S2 in spite S2
|
||||
// first in the cache.
|
||||
SCEVExpander Exp(SE, M.getDataLayout(), "expander");
|
||||
auto *I = cast<Instruction>(Exp.expandCodeFor(SC1, nullptr, R));
|
||||
EXPECT_FALSE(I->hasNoSignedWrap());
|
||||
}
|
||||
|
||||
// Check logic of SCEV expression size computation.
|
||||
TEST_F(ScalarEvolutionsTest, SCEVComputeExpressionSize) {
|
||||
/*
|
||||
@ -1449,240 +1029,6 @@ TEST_F(ScalarEvolutionsTest, SCEVComputeExpressionSize) {
|
||||
EXPECT_EQ(S2S->getExpressionSize(), 5u);
|
||||
}
|
||||
|
||||
TEST_F(ScalarEvolutionsTest, SCEVExpandInsertCanonicalIV) {
|
||||
LLVMContext C;
|
||||
SMDiagnostic Err;
|
||||
|
||||
// Expand the addrec produced by GetAddRec into a loop without a canonical IV.
|
||||
// SCEVExpander will insert one.
|
||||
auto TestNoCanonicalIV = [&](
|
||||
std::function<const SCEV *(ScalarEvolution & SE, Loop * L)> GetAddRec) {
|
||||
std::unique_ptr<Module> M =
|
||||
parseAssemblyString("define i32 @test(i32 %limit) { "
|
||||
"entry: "
|
||||
" br label %loop "
|
||||
"loop: "
|
||||
" %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
|
||||
" %i.inc = add nsw i32 %i, 1 "
|
||||
" %cont = icmp slt i32 %i.inc, %limit "
|
||||
" br i1 %cont, label %loop, label %exit "
|
||||
"exit: "
|
||||
" ret i32 %i.inc "
|
||||
"}",
|
||||
Err, C);
|
||||
|
||||
assert(M && "Could not parse module?");
|
||||
assert(!verifyModule(*M) && "Must have been well formed!");
|
||||
|
||||
runWithSE(*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
|
||||
auto &I = GetInstByName(F, "i");
|
||||
auto *Loop = LI.getLoopFor(I.getParent());
|
||||
EXPECT_FALSE(Loop->getCanonicalInductionVariable());
|
||||
|
||||
auto *AR = GetAddRec(SE, Loop);
|
||||
unsigned ExpectedCanonicalIVWidth = SE.getTypeSizeInBits(AR->getType());
|
||||
|
||||
SCEVExpander Exp(SE, M->getDataLayout(), "expander");
|
||||
auto *InsertAt = I.getNextNode();
|
||||
Exp.expandCodeFor(AR, nullptr, InsertAt);
|
||||
PHINode *CanonicalIV = Loop->getCanonicalInductionVariable();
|
||||
unsigned CanonicalIVBitWidth =
|
||||
cast<IntegerType>(CanonicalIV->getType())->getBitWidth();
|
||||
EXPECT_EQ(CanonicalIVBitWidth, ExpectedCanonicalIVWidth);
|
||||
});
|
||||
};
|
||||
|
||||
// Expand the addrec produced by GetAddRec into a loop with a canonical IV
|
||||
// which is narrower than addrec type.
|
||||
// SCEVExpander will insert a canonical IV of a wider type to expand the
|
||||
// addrec.
|
||||
auto TestNarrowCanonicalIV = [&](
|
||||
std::function<const SCEV *(ScalarEvolution & SE, Loop * L)> GetAddRec) {
|
||||
std::unique_ptr<Module> M = parseAssemblyString(
|
||||
"define i32 @test(i32 %limit) { "
|
||||
"entry: "
|
||||
" br label %loop "
|
||||
"loop: "
|
||||
" %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
|
||||
" %canonical.iv = phi i8 [ 0, %entry ], [ %canonical.iv.inc, %loop ] "
|
||||
" %i.inc = add nsw i32 %i, 1 "
|
||||
" %canonical.iv.inc = add i8 %canonical.iv, 1 "
|
||||
" %cont = icmp slt i32 %i.inc, %limit "
|
||||
" br i1 %cont, label %loop, label %exit "
|
||||
"exit: "
|
||||
" ret i32 %i.inc "
|
||||
"}",
|
||||
Err, C);
|
||||
|
||||
assert(M && "Could not parse module?");
|
||||
assert(!verifyModule(*M) && "Must have been well formed!");
|
||||
|
||||
runWithSE(*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
|
||||
auto &I = GetInstByName(F, "i");
|
||||
|
||||
auto *LoopHeaderBB = I.getParent();
|
||||
auto *Loop = LI.getLoopFor(LoopHeaderBB);
|
||||
PHINode *CanonicalIV = Loop->getCanonicalInductionVariable();
|
||||
EXPECT_EQ(CanonicalIV, &GetInstByName(F, "canonical.iv"));
|
||||
|
||||
auto *AR = GetAddRec(SE, Loop);
|
||||
|
||||
unsigned ExpectedCanonicalIVWidth = SE.getTypeSizeInBits(AR->getType());
|
||||
unsigned CanonicalIVBitWidth =
|
||||
cast<IntegerType>(CanonicalIV->getType())->getBitWidth();
|
||||
EXPECT_LT(CanonicalIVBitWidth, ExpectedCanonicalIVWidth);
|
||||
|
||||
SCEVExpander Exp(SE, M->getDataLayout(), "expander");
|
||||
auto *InsertAt = I.getNextNode();
|
||||
Exp.expandCodeFor(AR, nullptr, InsertAt);
|
||||
|
||||
// Loop over all of the PHI nodes, looking for the new canonical indvar.
|
||||
PHINode *NewCanonicalIV = nullptr;
|
||||
for (BasicBlock::iterator i = LoopHeaderBB->begin(); isa<PHINode>(i);
|
||||
++i) {
|
||||
PHINode *PN = cast<PHINode>(i);
|
||||
if (PN == &I || PN == CanonicalIV)
|
||||
continue;
|
||||
// We expect that the only PHI added is the new canonical IV
|
||||
EXPECT_FALSE(NewCanonicalIV);
|
||||
NewCanonicalIV = PN;
|
||||
}
|
||||
|
||||
// Check that NewCanonicalIV is a canonical IV, i.e {0,+,1}
|
||||
BasicBlock *Incoming = nullptr, *Backedge = nullptr;
|
||||
EXPECT_TRUE(Loop->getIncomingAndBackEdge(Incoming, Backedge));
|
||||
auto *Start = NewCanonicalIV->getIncomingValueForBlock(Incoming);
|
||||
EXPECT_TRUE(isa<ConstantInt>(Start));
|
||||
EXPECT_TRUE(dyn_cast<ConstantInt>(Start)->isZero());
|
||||
auto *Next = NewCanonicalIV->getIncomingValueForBlock(Backedge);
|
||||
EXPECT_TRUE(isa<BinaryOperator>(Next));
|
||||
auto *NextBinOp = dyn_cast<BinaryOperator>(Next);
|
||||
EXPECT_EQ(NextBinOp->getOpcode(), Instruction::Add);
|
||||
EXPECT_EQ(NextBinOp->getOperand(0), NewCanonicalIV);
|
||||
auto *Step = NextBinOp->getOperand(1);
|
||||
EXPECT_TRUE(isa<ConstantInt>(Step));
|
||||
EXPECT_TRUE(dyn_cast<ConstantInt>(Step)->isOne());
|
||||
|
||||
unsigned NewCanonicalIVBitWidth =
|
||||
cast<IntegerType>(NewCanonicalIV->getType())->getBitWidth();
|
||||
EXPECT_EQ(NewCanonicalIVBitWidth, ExpectedCanonicalIVWidth);
|
||||
});
|
||||
};
|
||||
|
||||
// Expand the addrec produced by GetAddRec into a loop with a canonical IV
|
||||
// of addrec width.
|
||||
// To expand the addrec SCEVExpander should use the existing canonical IV.
|
||||
auto TestMatchingCanonicalIV = [&](
|
||||
std::function<const SCEV *(ScalarEvolution & SE, Loop * L)> GetAddRec,
|
||||
unsigned ARBitWidth) {
|
||||
auto ARBitWidthTypeStr = "i" + std::to_string(ARBitWidth);
|
||||
std::unique_ptr<Module> M = parseAssemblyString(
|
||||
"define i32 @test(i32 %limit) { "
|
||||
"entry: "
|
||||
" br label %loop "
|
||||
"loop: "
|
||||
" %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
|
||||
" %canonical.iv = phi " + ARBitWidthTypeStr +
|
||||
" [ 0, %entry ], [ %canonical.iv.inc, %loop ] "
|
||||
" %i.inc = add nsw i32 %i, 1 "
|
||||
" %canonical.iv.inc = add " + ARBitWidthTypeStr +
|
||||
" %canonical.iv, 1 "
|
||||
" %cont = icmp slt i32 %i.inc, %limit "
|
||||
" br i1 %cont, label %loop, label %exit "
|
||||
"exit: "
|
||||
" ret i32 %i.inc "
|
||||
"}",
|
||||
Err, C);
|
||||
|
||||
assert(M && "Could not parse module?");
|
||||
assert(!verifyModule(*M) && "Must have been well formed!");
|
||||
|
||||
runWithSE(*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
|
||||
auto &I = GetInstByName(F, "i");
|
||||
auto &CanonicalIV = GetInstByName(F, "canonical.iv");
|
||||
|
||||
auto *LoopHeaderBB = I.getParent();
|
||||
auto *Loop = LI.getLoopFor(LoopHeaderBB);
|
||||
EXPECT_EQ(&CanonicalIV, Loop->getCanonicalInductionVariable());
|
||||
unsigned CanonicalIVBitWidth =
|
||||
cast<IntegerType>(CanonicalIV.getType())->getBitWidth();
|
||||
|
||||
auto *AR = GetAddRec(SE, Loop);
|
||||
EXPECT_EQ(ARBitWidth, SE.getTypeSizeInBits(AR->getType()));
|
||||
EXPECT_EQ(CanonicalIVBitWidth, ARBitWidth);
|
||||
|
||||
SCEVExpander Exp(SE, M->getDataLayout(), "expander");
|
||||
auto *InsertAt = I.getNextNode();
|
||||
Exp.expandCodeFor(AR, nullptr, InsertAt);
|
||||
|
||||
// Loop over all of the PHI nodes, looking if a new canonical indvar was
|
||||
// introduced.
|
||||
PHINode *NewCanonicalIV = nullptr;
|
||||
for (BasicBlock::iterator i = LoopHeaderBB->begin(); isa<PHINode>(i);
|
||||
++i) {
|
||||
PHINode *PN = cast<PHINode>(i);
|
||||
if (PN == &I || PN == &CanonicalIV)
|
||||
continue;
|
||||
NewCanonicalIV = PN;
|
||||
}
|
||||
EXPECT_FALSE(NewCanonicalIV);
|
||||
});
|
||||
};
|
||||
|
||||
unsigned ARBitWidth = 16;
|
||||
Type *ARType = IntegerType::get(C, ARBitWidth);
|
||||
|
||||
// Expand {5,+,1}
|
||||
auto GetAR2 = [&](ScalarEvolution &SE, Loop *L) -> const SCEV * {
|
||||
return SE.getAddRecExpr(SE.getConstant(APInt(ARBitWidth, 5)),
|
||||
SE.getOne(ARType), L, SCEV::FlagAnyWrap);
|
||||
};
|
||||
TestNoCanonicalIV(GetAR2);
|
||||
TestNarrowCanonicalIV(GetAR2);
|
||||
TestMatchingCanonicalIV(GetAR2, ARBitWidth);
|
||||
}
|
||||
|
||||
TEST_F(ScalarEvolutionsTest, SCEVExpanderShlNSW) {
|
||||
|
||||
auto checkOneCase = [this](std::string &&str) {
|
||||
LLVMContext C;
|
||||
SMDiagnostic Err;
|
||||
std::unique_ptr<Module> M = parseAssemblyString(str, Err, C);
|
||||
|
||||
assert(M && "Could not parse module?");
|
||||
assert(!verifyModule(*M) && "Must have been well formed!");
|
||||
|
||||
Function *F = M->getFunction("f");
|
||||
ASSERT_NE(F, nullptr) << "Could not find function 'f'";
|
||||
|
||||
BasicBlock &Entry = F->getEntryBlock();
|
||||
LoadInst *Load = cast<LoadInst>(&Entry.front());
|
||||
BinaryOperator *And = cast<BinaryOperator>(*Load->user_begin());
|
||||
|
||||
ScalarEvolution SE = buildSE(*F);
|
||||
const SCEV *AndSCEV = SE.getSCEV(And);
|
||||
EXPECT_TRUE(isa<SCEVMulExpr>(AndSCEV));
|
||||
EXPECT_TRUE(cast<SCEVMulExpr>(AndSCEV)->hasNoSignedWrap());
|
||||
|
||||
SCEVExpander Exp(SE, M->getDataLayout(), "expander");
|
||||
auto *I = cast<Instruction>(Exp.expandCodeFor(AndSCEV, nullptr, And));
|
||||
EXPECT_EQ(I->getOpcode(), Instruction::Shl);
|
||||
EXPECT_FALSE(I->hasNoSignedWrap());
|
||||
};
|
||||
|
||||
checkOneCase("define void @f(i16* %arrayidx) { "
|
||||
" %1 = load i16, i16* %arrayidx "
|
||||
" %2 = and i16 %1, -32768 "
|
||||
" ret void "
|
||||
"} ");
|
||||
|
||||
checkOneCase("define void @f(i8* %arrayidx) { "
|
||||
" %1 = load i8, i8* %arrayidx "
|
||||
" %2 = and i8 %1, -128 "
|
||||
" ret void "
|
||||
"} ");
|
||||
}
|
||||
|
||||
TEST_F(ScalarEvolutionsTest, SCEVComputeConstantDifference) {
|
||||
LLVMContext C;
|
||||
SMDiagnostic Err;
|
||||
@ -1745,186 +1091,4 @@ TEST_F(ScalarEvolutionsTest, SCEVComputeConstantDifference) {
|
||||
});
|
||||
}
|
||||
|
||||
// Test expansion of nested addrecs in CanonicalMode.
|
||||
// Expanding nested addrecs in canonical mode requiers a canonical IV of a
|
||||
// type wider than the type of the addrec itself. Currently, SCEVExpander
|
||||
// just falls back to literal mode for nested addrecs.
|
||||
TEST_F(ScalarEvolutionsTest, SCEVExpandNonAffineAddRec) {
|
||||
LLVMContext C;
|
||||
SMDiagnostic Err;
|
||||
|
||||
// Expand the addrec produced by GetAddRec into a loop without a canonical IV.
|
||||
auto TestNoCanonicalIV = [&](std::function<const SCEVAddRecExpr *(
|
||||
ScalarEvolution & SE, Loop * L)> GetAddRec) {
|
||||
std::unique_ptr<Module> M =
|
||||
parseAssemblyString("define i32 @test(i32 %limit) { "
|
||||
"entry: "
|
||||
" br label %loop "
|
||||
"loop: "
|
||||
" %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
|
||||
" %i.inc = add nsw i32 %i, 1 "
|
||||
" %cont = icmp slt i32 %i.inc, %limit "
|
||||
" br i1 %cont, label %loop, label %exit "
|
||||
"exit: "
|
||||
" ret i32 %i.inc "
|
||||
"}",
|
||||
Err, C);
|
||||
|
||||
assert(M && "Could not parse module?");
|
||||
assert(!verifyModule(*M) && "Must have been well formed!");
|
||||
|
||||
runWithSE(*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
|
||||
auto &I = GetInstByName(F, "i");
|
||||
auto *Loop = LI.getLoopFor(I.getParent());
|
||||
EXPECT_FALSE(Loop->getCanonicalInductionVariable());
|
||||
|
||||
auto *AR = GetAddRec(SE, Loop);
|
||||
EXPECT_FALSE(AR->isAffine());
|
||||
|
||||
SCEVExpander Exp(SE, M->getDataLayout(), "expander");
|
||||
auto *InsertAt = I.getNextNode();
|
||||
Value *V = Exp.expandCodeFor(AR, nullptr, InsertAt);
|
||||
auto *ExpandedAR = SE.getSCEV(V);
|
||||
// Check that the expansion happened literally.
|
||||
EXPECT_EQ(AR, ExpandedAR);
|
||||
});
|
||||
};
|
||||
|
||||
// Expand the addrec produced by GetAddRec into a loop with a canonical IV
|
||||
// which is narrower than addrec type.
|
||||
auto TestNarrowCanonicalIV = [&](
|
||||
std::function<const SCEVAddRecExpr *(ScalarEvolution & SE, Loop * L)>
|
||||
GetAddRec) {
|
||||
std::unique_ptr<Module> M = parseAssemblyString(
|
||||
"define i32 @test(i32 %limit) { "
|
||||
"entry: "
|
||||
" br label %loop "
|
||||
"loop: "
|
||||
" %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
|
||||
" %canonical.iv = phi i8 [ 0, %entry ], [ %canonical.iv.inc, %loop ] "
|
||||
" %i.inc = add nsw i32 %i, 1 "
|
||||
" %canonical.iv.inc = add i8 %canonical.iv, 1 "
|
||||
" %cont = icmp slt i32 %i.inc, %limit "
|
||||
" br i1 %cont, label %loop, label %exit "
|
||||
"exit: "
|
||||
" ret i32 %i.inc "
|
||||
"}",
|
||||
Err, C);
|
||||
|
||||
assert(M && "Could not parse module?");
|
||||
assert(!verifyModule(*M) && "Must have been well formed!");
|
||||
|
||||
runWithSE(*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
|
||||
auto &I = GetInstByName(F, "i");
|
||||
|
||||
auto *LoopHeaderBB = I.getParent();
|
||||
auto *Loop = LI.getLoopFor(LoopHeaderBB);
|
||||
PHINode *CanonicalIV = Loop->getCanonicalInductionVariable();
|
||||
EXPECT_EQ(CanonicalIV, &GetInstByName(F, "canonical.iv"));
|
||||
|
||||
auto *AR = GetAddRec(SE, Loop);
|
||||
EXPECT_FALSE(AR->isAffine());
|
||||
|
||||
unsigned ExpectedCanonicalIVWidth = SE.getTypeSizeInBits(AR->getType());
|
||||
unsigned CanonicalIVBitWidth =
|
||||
cast<IntegerType>(CanonicalIV->getType())->getBitWidth();
|
||||
EXPECT_LT(CanonicalIVBitWidth, ExpectedCanonicalIVWidth);
|
||||
|
||||
SCEVExpander Exp(SE, M->getDataLayout(), "expander");
|
||||
auto *InsertAt = I.getNextNode();
|
||||
Value *V = Exp.expandCodeFor(AR, nullptr, InsertAt);
|
||||
auto *ExpandedAR = SE.getSCEV(V);
|
||||
// Check that the expansion happened literally.
|
||||
EXPECT_EQ(AR, ExpandedAR);
|
||||
});
|
||||
};
|
||||
|
||||
// Expand the addrec produced by GetAddRec into a loop with a canonical IV
|
||||
// of addrec width.
|
||||
auto TestMatchingCanonicalIV = [&](
|
||||
std::function<const SCEVAddRecExpr *(ScalarEvolution & SE, Loop * L)>
|
||||
GetAddRec,
|
||||
unsigned ARBitWidth) {
|
||||
auto ARBitWidthTypeStr = "i" + std::to_string(ARBitWidth);
|
||||
std::unique_ptr<Module> M = parseAssemblyString(
|
||||
"define i32 @test(i32 %limit) { "
|
||||
"entry: "
|
||||
" br label %loop "
|
||||
"loop: "
|
||||
" %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
|
||||
" %canonical.iv = phi " + ARBitWidthTypeStr +
|
||||
" [ 0, %entry ], [ %canonical.iv.inc, %loop ] "
|
||||
" %i.inc = add nsw i32 %i, 1 "
|
||||
" %canonical.iv.inc = add " + ARBitWidthTypeStr +
|
||||
" %canonical.iv, 1 "
|
||||
" %cont = icmp slt i32 %i.inc, %limit "
|
||||
" br i1 %cont, label %loop, label %exit "
|
||||
"exit: "
|
||||
" ret i32 %i.inc "
|
||||
"}",
|
||||
Err, C);
|
||||
|
||||
assert(M && "Could not parse module?");
|
||||
assert(!verifyModule(*M) && "Must have been well formed!");
|
||||
|
||||
runWithSE(*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
|
||||
auto &I = GetInstByName(F, "i");
|
||||
auto &CanonicalIV = GetInstByName(F, "canonical.iv");
|
||||
|
||||
auto *LoopHeaderBB = I.getParent();
|
||||
auto *Loop = LI.getLoopFor(LoopHeaderBB);
|
||||
EXPECT_EQ(&CanonicalIV, Loop->getCanonicalInductionVariable());
|
||||
unsigned CanonicalIVBitWidth =
|
||||
cast<IntegerType>(CanonicalIV.getType())->getBitWidth();
|
||||
|
||||
auto *AR = GetAddRec(SE, Loop);
|
||||
EXPECT_FALSE(AR->isAffine());
|
||||
EXPECT_EQ(ARBitWidth, SE.getTypeSizeInBits(AR->getType()));
|
||||
EXPECT_EQ(CanonicalIVBitWidth, ARBitWidth);
|
||||
|
||||
SCEVExpander Exp(SE, M->getDataLayout(), "expander");
|
||||
auto *InsertAt = I.getNextNode();
|
||||
Value *V = Exp.expandCodeFor(AR, nullptr, InsertAt);
|
||||
auto *ExpandedAR = SE.getSCEV(V);
|
||||
// Check that the expansion happened literally.
|
||||
EXPECT_EQ(AR, ExpandedAR);
|
||||
});
|
||||
};
|
||||
|
||||
unsigned ARBitWidth = 16;
|
||||
Type *ARType = IntegerType::get(C, ARBitWidth);
|
||||
|
||||
// Expand {5,+,1,+,1}
|
||||
auto GetAR3 = [&](ScalarEvolution &SE, Loop *L) -> const SCEVAddRecExpr * {
|
||||
SmallVector<const SCEV *, 3> Ops = {SE.getConstant(APInt(ARBitWidth, 5)),
|
||||
SE.getOne(ARType), SE.getOne(ARType)};
|
||||
return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, L, SCEV::FlagAnyWrap));
|
||||
};
|
||||
TestNoCanonicalIV(GetAR3);
|
||||
TestNarrowCanonicalIV(GetAR3);
|
||||
TestMatchingCanonicalIV(GetAR3, ARBitWidth);
|
||||
|
||||
// Expand {5,+,1,+,1,+,1}
|
||||
auto GetAR4 = [&](ScalarEvolution &SE, Loop *L) -> const SCEVAddRecExpr * {
|
||||
SmallVector<const SCEV *, 4> Ops = {SE.getConstant(APInt(ARBitWidth, 5)),
|
||||
SE.getOne(ARType), SE.getOne(ARType),
|
||||
SE.getOne(ARType)};
|
||||
return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, L, SCEV::FlagAnyWrap));
|
||||
};
|
||||
TestNoCanonicalIV(GetAR4);
|
||||
TestNarrowCanonicalIV(GetAR4);
|
||||
TestMatchingCanonicalIV(GetAR4, ARBitWidth);
|
||||
|
||||
// Expand {5,+,1,+,1,+,1,+,1}
|
||||
auto GetAR5 = [&](ScalarEvolution &SE, Loop *L) -> const SCEVAddRecExpr * {
|
||||
SmallVector<const SCEV *, 5> Ops = {SE.getConstant(APInt(ARBitWidth, 5)),
|
||||
SE.getOne(ARType), SE.getOne(ARType),
|
||||
SE.getOne(ARType), SE.getOne(ARType)};
|
||||
return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, L, SCEV::FlagAnyWrap));
|
||||
};
|
||||
TestNoCanonicalIV(GetAR5);
|
||||
TestNarrowCanonicalIV(GetAR5);
|
||||
TestMatchingCanonicalIV(GetAR5, ARBitWidth);
|
||||
}
|
||||
|
||||
} // end namespace llvm
|
||||
|
@ -17,6 +17,7 @@ add_llvm_unittest(UtilsTests
|
||||
LocalTest.cpp
|
||||
LoopUtilsTest.cpp
|
||||
SizeOptsTest.cpp
|
||||
ScalarEvolutionExpanderTest.cpp
|
||||
SSAUpdaterBulkTest.cpp
|
||||
UnrollLoopTest.cpp
|
||||
ValueMapperTest.cpp
|
||||
|
@ -11,7 +11,7 @@
|
||||
#include "llvm/Analysis/DependenceAnalysis.h"
|
||||
#include "llvm/Analysis/LoopInfo.h"
|
||||
#include "llvm/Analysis/PostDominators.h"
|
||||
#include "llvm/Analysis/ScalarEvolutionExpander.h"
|
||||
#include "llvm/Analysis/ScalarEvolution.h"
|
||||
#include "llvm/AsmParser/Parser.h"
|
||||
#include "llvm/IR/Dominators.h"
|
||||
#include "llvm/IR/LLVMContext.h"
|
||||
|
923
unittests/Transforms/Utils/ScalarEvolutionExpanderTest.cpp
Normal file
923
unittests/Transforms/Utils/ScalarEvolutionExpanderTest.cpp
Normal file
@ -0,0 +1,923 @@
|
||||
//===- ScalarEvolutionExpanderTest.cpp - ScalarEvolution unit tests -------===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/Analysis/AssumptionCache.h"
|
||||
#include "llvm/Analysis/LoopInfo.h"
|
||||
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
|
||||
#include "llvm/Analysis/TargetLibraryInfo.h"
|
||||
#include "llvm/AsmParser/Parser.h"
|
||||
#include "llvm/IR/Constants.h"
|
||||
#include "llvm/IR/Dominators.h"
|
||||
#include "llvm/IR/GlobalVariable.h"
|
||||
#include "llvm/IR/IRBuilder.h"
|
||||
#include "llvm/IR/InstIterator.h"
|
||||
#include "llvm/IR/LLVMContext.h"
|
||||
#include "llvm/IR/LegacyPassManager.h"
|
||||
#include "llvm/IR/Module.h"
|
||||
#include "llvm/IR/Verifier.h"
|
||||
#include "llvm/Support/SourceMgr.h"
|
||||
#include "gtest/gtest.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
// We use this fixture to ensure that we clean up ScalarEvolution before
|
||||
// deleting the PassManager.
|
||||
class ScalarEvolutionsTest : public testing::Test {
|
||||
protected:
|
||||
LLVMContext Context;
|
||||
Module M;
|
||||
TargetLibraryInfoImpl TLII;
|
||||
TargetLibraryInfo TLI;
|
||||
|
||||
std::unique_ptr<AssumptionCache> AC;
|
||||
std::unique_ptr<DominatorTree> DT;
|
||||
std::unique_ptr<LoopInfo> LI;
|
||||
|
||||
ScalarEvolutionsTest() : M("", Context), TLII(), TLI(TLII) {}
|
||||
|
||||
ScalarEvolution buildSE(Function &F) {
|
||||
AC.reset(new AssumptionCache(F));
|
||||
DT.reset(new DominatorTree(F));
|
||||
LI.reset(new LoopInfo(*DT));
|
||||
return ScalarEvolution(F, TLI, *AC, *DT, *LI);
|
||||
}
|
||||
|
||||
void runWithSE(
|
||||
Module &M, StringRef FuncName,
|
||||
function_ref<void(Function &F, LoopInfo &LI, ScalarEvolution &SE)> Test) {
|
||||
auto *F = M.getFunction(FuncName);
|
||||
ASSERT_NE(F, nullptr) << "Could not find " << FuncName;
|
||||
ScalarEvolution SE = buildSE(*F);
|
||||
Test(*F, *LI, SE);
|
||||
}
|
||||
|
||||
static Optional<APInt> computeConstantDifference(ScalarEvolution &SE,
|
||||
const SCEV *LHS,
|
||||
const SCEV *RHS) {
|
||||
return SE.computeConstantDifference(LHS, RHS);
|
||||
}
|
||||
};
|
||||
|
||||
static Instruction &GetInstByName(Function &F, StringRef Name) {
|
||||
for (auto &I : instructions(F))
|
||||
if (I.getName() == Name)
|
||||
return I;
|
||||
llvm_unreachable("Could not find instructions!");
|
||||
}
|
||||
|
||||
TEST_F(ScalarEvolutionsTest, ExpandPtrTypeSCEV) {
|
||||
// It is to test the fix for PR30213. It exercises the branch in scev
|
||||
// expansion when the value in ValueOffsetPair is a ptr and the offset
|
||||
// is not divisible by the elem type size of value.
|
||||
auto *I8Ty = Type::getInt8Ty(Context);
|
||||
auto *I8PtrTy = Type::getInt8PtrTy(Context);
|
||||
auto *I32Ty = Type::getInt32Ty(Context);
|
||||
auto *I32PtrTy = Type::getInt32PtrTy(Context);
|
||||
FunctionType *FTy =
|
||||
FunctionType::get(Type::getVoidTy(Context), std::vector<Type *>(), false);
|
||||
Function *F = Function::Create(FTy, Function::ExternalLinkage, "f", M);
|
||||
BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
|
||||
BasicBlock *LoopBB = BasicBlock::Create(Context, "loop", F);
|
||||
BasicBlock *ExitBB = BasicBlock::Create(Context, "exit", F);
|
||||
BranchInst::Create(LoopBB, EntryBB);
|
||||
ReturnInst::Create(Context, nullptr, ExitBB);
|
||||
|
||||
// loop: ; preds = %loop, %entry
|
||||
// %alloca = alloca i32
|
||||
// %gep0 = getelementptr i32, i32* %alloca, i32 1
|
||||
// %bitcast1 = bitcast i32* %gep0 to i8*
|
||||
// %gep1 = getelementptr i8, i8* %bitcast1, i32 1
|
||||
// %gep2 = getelementptr i8, i8* undef, i32 1
|
||||
// %cmp = icmp ult i8* undef, %bitcast1
|
||||
// %select = select i1 %cmp, i8* %gep1, i8* %gep2
|
||||
// %bitcast2 = bitcast i8* %select to i32*
|
||||
// br i1 undef, label %loop, label %exit
|
||||
|
||||
const DataLayout &DL = F->getParent()->getDataLayout();
|
||||
BranchInst *Br = BranchInst::Create(
|
||||
LoopBB, ExitBB, UndefValue::get(Type::getInt1Ty(Context)), LoopBB);
|
||||
AllocaInst *Alloca =
|
||||
new AllocaInst(I32Ty, DL.getAllocaAddrSpace(), "alloca", Br);
|
||||
ConstantInt *Ci32 = ConstantInt::get(Context, APInt(32, 1));
|
||||
GetElementPtrInst *Gep0 =
|
||||
GetElementPtrInst::Create(I32Ty, Alloca, Ci32, "gep0", Br);
|
||||
CastInst *CastA =
|
||||
CastInst::CreateBitOrPointerCast(Gep0, I8PtrTy, "bitcast1", Br);
|
||||
GetElementPtrInst *Gep1 =
|
||||
GetElementPtrInst::Create(I8Ty, CastA, Ci32, "gep1", Br);
|
||||
GetElementPtrInst *Gep2 = GetElementPtrInst::Create(
|
||||
I8Ty, UndefValue::get(I8PtrTy), Ci32, "gep2", Br);
|
||||
CmpInst *Cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_ULT,
|
||||
UndefValue::get(I8PtrTy), CastA, "cmp", Br);
|
||||
SelectInst *Sel = SelectInst::Create(Cmp, Gep1, Gep2, "select", Br);
|
||||
CastInst *CastB =
|
||||
CastInst::CreateBitOrPointerCast(Sel, I32PtrTy, "bitcast2", Br);
|
||||
|
||||
ScalarEvolution SE = buildSE(*F);
|
||||
auto *S = SE.getSCEV(CastB);
|
||||
SCEVExpander Exp(SE, M.getDataLayout(), "expander");
|
||||
Value *V =
|
||||
Exp.expandCodeFor(cast<SCEVAddExpr>(S)->getOperand(1), nullptr, Br);
|
||||
|
||||
// Expect the expansion code contains:
|
||||
// %0 = bitcast i32* %bitcast2 to i8*
|
||||
// %uglygep = getelementptr i8, i8* %0, i64 -1
|
||||
// %1 = bitcast i8* %uglygep to i32*
|
||||
EXPECT_TRUE(isa<BitCastInst>(V));
|
||||
Instruction *Gep = cast<Instruction>(V)->getPrevNode();
|
||||
EXPECT_TRUE(isa<GetElementPtrInst>(Gep));
|
||||
EXPECT_TRUE(isa<ConstantInt>(Gep->getOperand(1)));
|
||||
EXPECT_EQ(cast<ConstantInt>(Gep->getOperand(1))->getSExtValue(), -1);
|
||||
EXPECT_TRUE(isa<BitCastInst>(Gep->getPrevNode()));
|
||||
}
|
||||
|
||||
// Make sure that SCEV doesn't introduce illegal ptrtoint/inttoptr instructions
|
||||
TEST_F(ScalarEvolutionsTest, SCEVZeroExtendExprNonIntegral) {
|
||||
/*
|
||||
* Create the following code:
|
||||
* func(i64 addrspace(10)* %arg)
|
||||
* top:
|
||||
* br label %L.ph
|
||||
* L.ph:
|
||||
* br label %L
|
||||
* L:
|
||||
* %phi = phi i64 [i64 0, %L.ph], [ %add, %L2 ]
|
||||
* %add = add i64 %phi2, 1
|
||||
* br i1 undef, label %post, label %L2
|
||||
* post:
|
||||
* %gepbase = getelementptr i64 addrspace(10)* %arg, i64 1
|
||||
* #= %gep = getelementptr i64 addrspace(10)* %gepbase, i64 %add =#
|
||||
* ret void
|
||||
*
|
||||
* We will create the appropriate SCEV expression for %gep and expand it,
|
||||
* then check that no inttoptr/ptrtoint instructions got inserted.
|
||||
*/
|
||||
|
||||
// Create a module with non-integral pointers in it's datalayout
|
||||
Module NIM("nonintegral", Context);
|
||||
std::string DataLayout = M.getDataLayoutStr();
|
||||
if (!DataLayout.empty())
|
||||
DataLayout += "-";
|
||||
DataLayout += "ni:10";
|
||||
NIM.setDataLayout(DataLayout);
|
||||
|
||||
Type *T_int1 = Type::getInt1Ty(Context);
|
||||
Type *T_int64 = Type::getInt64Ty(Context);
|
||||
Type *T_pint64 = T_int64->getPointerTo(10);
|
||||
|
||||
FunctionType *FTy =
|
||||
FunctionType::get(Type::getVoidTy(Context), {T_pint64}, false);
|
||||
Function *F = Function::Create(FTy, Function::ExternalLinkage, "foo", NIM);
|
||||
|
||||
Argument *Arg = &*F->arg_begin();
|
||||
|
||||
BasicBlock *Top = BasicBlock::Create(Context, "top", F);
|
||||
BasicBlock *LPh = BasicBlock::Create(Context, "L.ph", F);
|
||||
BasicBlock *L = BasicBlock::Create(Context, "L", F);
|
||||
BasicBlock *Post = BasicBlock::Create(Context, "post", F);
|
||||
|
||||
IRBuilder<> Builder(Top);
|
||||
Builder.CreateBr(LPh);
|
||||
|
||||
Builder.SetInsertPoint(LPh);
|
||||
Builder.CreateBr(L);
|
||||
|
||||
Builder.SetInsertPoint(L);
|
||||
PHINode *Phi = Builder.CreatePHI(T_int64, 2);
|
||||
Value *Add = Builder.CreateAdd(Phi, ConstantInt::get(T_int64, 1), "add");
|
||||
Builder.CreateCondBr(UndefValue::get(T_int1), L, Post);
|
||||
Phi->addIncoming(ConstantInt::get(T_int64, 0), LPh);
|
||||
Phi->addIncoming(Add, L);
|
||||
|
||||
Builder.SetInsertPoint(Post);
|
||||
Value *GepBase =
|
||||
Builder.CreateGEP(T_int64, Arg, ConstantInt::get(T_int64, 1));
|
||||
Instruction *Ret = Builder.CreateRetVoid();
|
||||
|
||||
ScalarEvolution SE = buildSE(*F);
|
||||
auto *AddRec =
|
||||
SE.getAddRecExpr(SE.getUnknown(GepBase), SE.getConstant(T_int64, 1),
|
||||
LI->getLoopFor(L), SCEV::FlagNUW);
|
||||
|
||||
SCEVExpander Exp(SE, NIM.getDataLayout(), "expander");
|
||||
Exp.disableCanonicalMode();
|
||||
Exp.expandCodeFor(AddRec, T_pint64, Ret);
|
||||
|
||||
// Make sure none of the instructions inserted were inttoptr/ptrtoint.
|
||||
// The verifier will check this.
|
||||
EXPECT_FALSE(verifyFunction(*F, &errs()));
|
||||
}
|
||||
|
||||
// Check that we can correctly identify the points at which the SCEV of the
|
||||
// AddRec can be expanded.
|
||||
TEST_F(ScalarEvolutionsTest, SCEVExpanderIsSafeToExpandAt) {
|
||||
/*
|
||||
* Create the following code:
|
||||
* func(i64 addrspace(10)* %arg)
|
||||
* top:
|
||||
* br label %L.ph
|
||||
* L.ph:
|
||||
* br label %L
|
||||
* L:
|
||||
* %phi = phi i64 [i64 0, %L.ph], [ %add, %L2 ]
|
||||
* %add = add i64 %phi2, 1
|
||||
* %cond = icmp slt i64 %add, 1000; then becomes 2000.
|
||||
* br i1 %cond, label %post, label %L2
|
||||
* post:
|
||||
* ret void
|
||||
*
|
||||
*/
|
||||
|
||||
// Create a module with non-integral pointers in it's datalayout
|
||||
Module NIM("nonintegral", Context);
|
||||
std::string DataLayout = M.getDataLayoutStr();
|
||||
if (!DataLayout.empty())
|
||||
DataLayout += "-";
|
||||
DataLayout += "ni:10";
|
||||
NIM.setDataLayout(DataLayout);
|
||||
|
||||
Type *T_int64 = Type::getInt64Ty(Context);
|
||||
Type *T_pint64 = T_int64->getPointerTo(10);
|
||||
|
||||
FunctionType *FTy =
|
||||
FunctionType::get(Type::getVoidTy(Context), {T_pint64}, false);
|
||||
Function *F = Function::Create(FTy, Function::ExternalLinkage, "foo", NIM);
|
||||
|
||||
BasicBlock *Top = BasicBlock::Create(Context, "top", F);
|
||||
BasicBlock *LPh = BasicBlock::Create(Context, "L.ph", F);
|
||||
BasicBlock *L = BasicBlock::Create(Context, "L", F);
|
||||
BasicBlock *Post = BasicBlock::Create(Context, "post", F);
|
||||
|
||||
IRBuilder<> Builder(Top);
|
||||
Builder.CreateBr(LPh);
|
||||
|
||||
Builder.SetInsertPoint(LPh);
|
||||
Builder.CreateBr(L);
|
||||
|
||||
Builder.SetInsertPoint(L);
|
||||
PHINode *Phi = Builder.CreatePHI(T_int64, 2);
|
||||
auto *Add = cast<Instruction>(
|
||||
Builder.CreateAdd(Phi, ConstantInt::get(T_int64, 1), "add"));
|
||||
auto *Limit = ConstantInt::get(T_int64, 1000);
|
||||
auto *Cond = cast<Instruction>(
|
||||
Builder.CreateICmp(ICmpInst::ICMP_SLT, Add, Limit, "cond"));
|
||||
Builder.CreateCondBr(Cond, L, Post);
|
||||
Phi->addIncoming(ConstantInt::get(T_int64, 0), LPh);
|
||||
Phi->addIncoming(Add, L);
|
||||
|
||||
Builder.SetInsertPoint(Post);
|
||||
Builder.CreateRetVoid();
|
||||
|
||||
ScalarEvolution SE = buildSE(*F);
|
||||
const SCEV *S = SE.getSCEV(Phi);
|
||||
EXPECT_TRUE(isa<SCEVAddRecExpr>(S));
|
||||
const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
|
||||
EXPECT_TRUE(AR->isAffine());
|
||||
EXPECT_FALSE(isSafeToExpandAt(AR, Top->getTerminator(), SE));
|
||||
EXPECT_FALSE(isSafeToExpandAt(AR, LPh->getTerminator(), SE));
|
||||
EXPECT_TRUE(isSafeToExpandAt(AR, L->getTerminator(), SE));
|
||||
EXPECT_TRUE(isSafeToExpandAt(AR, Post->getTerminator(), SE));
|
||||
}
|
||||
|
||||
// Check that SCEV expander does not use the nuw instruction
|
||||
// for expansion.
|
||||
TEST_F(ScalarEvolutionsTest, SCEVExpanderNUW) {
|
||||
/*
|
||||
* Create the following code:
|
||||
* func(i64 %a)
|
||||
* entry:
|
||||
* br false, label %exit, label %body
|
||||
* body:
|
||||
* %s1 = add i64 %a, -1
|
||||
* br label %exit
|
||||
* exit:
|
||||
* %s = add nuw i64 %a, -1
|
||||
* ret %s
|
||||
*/
|
||||
|
||||
// Create a module.
|
||||
Module M("SCEVExpanderNUW", Context);
|
||||
|
||||
Type *T_int64 = Type::getInt64Ty(Context);
|
||||
|
||||
FunctionType *FTy =
|
||||
FunctionType::get(Type::getVoidTy(Context), {T_int64}, false);
|
||||
Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M);
|
||||
Argument *Arg = &*F->arg_begin();
|
||||
ConstantInt *C = ConstantInt::get(Context, APInt(64, -1));
|
||||
|
||||
BasicBlock *Entry = BasicBlock::Create(Context, "entry", F);
|
||||
BasicBlock *Body = BasicBlock::Create(Context, "body", F);
|
||||
BasicBlock *Exit = BasicBlock::Create(Context, "exit", F);
|
||||
|
||||
IRBuilder<> Builder(Entry);
|
||||
ConstantInt *Cond = ConstantInt::get(Context, APInt(1, 0));
|
||||
Builder.CreateCondBr(Cond, Exit, Body);
|
||||
|
||||
Builder.SetInsertPoint(Body);
|
||||
auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
|
||||
Builder.CreateBr(Exit);
|
||||
|
||||
Builder.SetInsertPoint(Exit);
|
||||
auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
|
||||
S2->setHasNoUnsignedWrap(true);
|
||||
auto *R = cast<Instruction>(Builder.CreateRetVoid());
|
||||
|
||||
ScalarEvolution SE = buildSE(*F);
|
||||
const SCEV *S = SE.getSCEV(S1);
|
||||
EXPECT_TRUE(isa<SCEVAddExpr>(S));
|
||||
SCEVExpander Exp(SE, M.getDataLayout(), "expander");
|
||||
auto *I = cast<Instruction>(Exp.expandCodeFor(S, nullptr, R));
|
||||
EXPECT_FALSE(I->hasNoUnsignedWrap());
|
||||
}
|
||||
|
||||
// Check that SCEV expander does not use the nsw instruction
|
||||
// for expansion.
|
||||
TEST_F(ScalarEvolutionsTest, SCEVExpanderNSW) {
|
||||
/*
|
||||
* Create the following code:
|
||||
* func(i64 %a)
|
||||
* entry:
|
||||
* br false, label %exit, label %body
|
||||
* body:
|
||||
* %s1 = add i64 %a, -1
|
||||
* br label %exit
|
||||
* exit:
|
||||
* %s = add nsw i64 %a, -1
|
||||
* ret %s
|
||||
*/
|
||||
|
||||
// Create a module.
|
||||
Module M("SCEVExpanderNSW", Context);
|
||||
|
||||
Type *T_int64 = Type::getInt64Ty(Context);
|
||||
|
||||
FunctionType *FTy =
|
||||
FunctionType::get(Type::getVoidTy(Context), {T_int64}, false);
|
||||
Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M);
|
||||
Argument *Arg = &*F->arg_begin();
|
||||
ConstantInt *C = ConstantInt::get(Context, APInt(64, -1));
|
||||
|
||||
BasicBlock *Entry = BasicBlock::Create(Context, "entry", F);
|
||||
BasicBlock *Body = BasicBlock::Create(Context, "body", F);
|
||||
BasicBlock *Exit = BasicBlock::Create(Context, "exit", F);
|
||||
|
||||
IRBuilder<> Builder(Entry);
|
||||
ConstantInt *Cond = ConstantInt::get(Context, APInt(1, 0));
|
||||
Builder.CreateCondBr(Cond, Exit, Body);
|
||||
|
||||
Builder.SetInsertPoint(Body);
|
||||
auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
|
||||
Builder.CreateBr(Exit);
|
||||
|
||||
Builder.SetInsertPoint(Exit);
|
||||
auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
|
||||
S2->setHasNoSignedWrap(true);
|
||||
auto *R = cast<Instruction>(Builder.CreateRetVoid());
|
||||
|
||||
ScalarEvolution SE = buildSE(*F);
|
||||
const SCEV *S = SE.getSCEV(S1);
|
||||
EXPECT_TRUE(isa<SCEVAddExpr>(S));
|
||||
SCEVExpander Exp(SE, M.getDataLayout(), "expander");
|
||||
auto *I = cast<Instruction>(Exp.expandCodeFor(S, nullptr, R));
|
||||
EXPECT_FALSE(I->hasNoSignedWrap());
|
||||
}
|
||||
|
||||
// Check that SCEV does not save the SCEV -> V
|
||||
// mapping of SCEV differ from V in NUW flag.
|
||||
TEST_F(ScalarEvolutionsTest, SCEVCacheNUW) {
|
||||
/*
|
||||
* Create the following code:
|
||||
* func(i64 %a)
|
||||
* entry:
|
||||
* %s1 = add i64 %a, -1
|
||||
* %s2 = add nuw i64 %a, -1
|
||||
* br label %exit
|
||||
* exit:
|
||||
* ret %s
|
||||
*/
|
||||
|
||||
// Create a module.
|
||||
Module M("SCEVCacheNUW", Context);
|
||||
|
||||
Type *T_int64 = Type::getInt64Ty(Context);
|
||||
|
||||
FunctionType *FTy =
|
||||
FunctionType::get(Type::getVoidTy(Context), {T_int64}, false);
|
||||
Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M);
|
||||
Argument *Arg = &*F->arg_begin();
|
||||
ConstantInt *C = ConstantInt::get(Context, APInt(64, -1));
|
||||
|
||||
BasicBlock *Entry = BasicBlock::Create(Context, "entry", F);
|
||||
BasicBlock *Exit = BasicBlock::Create(Context, "exit", F);
|
||||
|
||||
IRBuilder<> Builder(Entry);
|
||||
auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
|
||||
auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
|
||||
S2->setHasNoUnsignedWrap(true);
|
||||
Builder.CreateBr(Exit);
|
||||
|
||||
Builder.SetInsertPoint(Exit);
|
||||
auto *R = cast<Instruction>(Builder.CreateRetVoid());
|
||||
|
||||
ScalarEvolution SE = buildSE(*F);
|
||||
// Get S2 first to move it to cache.
|
||||
const SCEV *SC2 = SE.getSCEV(S2);
|
||||
EXPECT_TRUE(isa<SCEVAddExpr>(SC2));
|
||||
// Now get S1.
|
||||
const SCEV *SC1 = SE.getSCEV(S1);
|
||||
EXPECT_TRUE(isa<SCEVAddExpr>(SC1));
|
||||
// Expand for S1, it should use S1 not S2 in spite S2
|
||||
// first in the cache.
|
||||
SCEVExpander Exp(SE, M.getDataLayout(), "expander");
|
||||
auto *I = cast<Instruction>(Exp.expandCodeFor(SC1, nullptr, R));
|
||||
EXPECT_FALSE(I->hasNoUnsignedWrap());
|
||||
}
|
||||
|
||||
// Check that SCEV does not save the SCEV -> V
|
||||
// mapping of SCEV differ from V in NSW flag.
|
||||
TEST_F(ScalarEvolutionsTest, SCEVCacheNSW) {
|
||||
/*
|
||||
* Create the following code:
|
||||
* func(i64 %a)
|
||||
* entry:
|
||||
* %s1 = add i64 %a, -1
|
||||
* %s2 = add nsw i64 %a, -1
|
||||
* br label %exit
|
||||
* exit:
|
||||
* ret %s
|
||||
*/
|
||||
|
||||
// Create a module.
|
||||
Module M("SCEVCacheNUW", Context);
|
||||
|
||||
Type *T_int64 = Type::getInt64Ty(Context);
|
||||
|
||||
FunctionType *FTy =
|
||||
FunctionType::get(Type::getVoidTy(Context), {T_int64}, false);
|
||||
Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M);
|
||||
Argument *Arg = &*F->arg_begin();
|
||||
ConstantInt *C = ConstantInt::get(Context, APInt(64, -1));
|
||||
|
||||
BasicBlock *Entry = BasicBlock::Create(Context, "entry", F);
|
||||
BasicBlock *Exit = BasicBlock::Create(Context, "exit", F);
|
||||
|
||||
IRBuilder<> Builder(Entry);
|
||||
auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
|
||||
auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
|
||||
S2->setHasNoSignedWrap(true);
|
||||
Builder.CreateBr(Exit);
|
||||
|
||||
Builder.SetInsertPoint(Exit);
|
||||
auto *R = cast<Instruction>(Builder.CreateRetVoid());
|
||||
|
||||
ScalarEvolution SE = buildSE(*F);
|
||||
// Get S2 first to move it to cache.
|
||||
const SCEV *SC2 = SE.getSCEV(S2);
|
||||
EXPECT_TRUE(isa<SCEVAddExpr>(SC2));
|
||||
// Now get S1.
|
||||
const SCEV *SC1 = SE.getSCEV(S1);
|
||||
EXPECT_TRUE(isa<SCEVAddExpr>(SC1));
|
||||
// Expand for S1, it should use S1 not S2 in spite S2
|
||||
// first in the cache.
|
||||
SCEVExpander Exp(SE, M.getDataLayout(), "expander");
|
||||
auto *I = cast<Instruction>(Exp.expandCodeFor(SC1, nullptr, R));
|
||||
EXPECT_FALSE(I->hasNoSignedWrap());
|
||||
}
|
||||
|
||||
TEST_F(ScalarEvolutionsTest, SCEVExpandInsertCanonicalIV) {
|
||||
LLVMContext C;
|
||||
SMDiagnostic Err;
|
||||
|
||||
// Expand the addrec produced by GetAddRec into a loop without a canonical IV.
|
||||
// SCEVExpander will insert one.
|
||||
auto TestNoCanonicalIV =
|
||||
[&](std::function<const SCEV *(ScalarEvolution & SE, Loop * L)>
|
||||
GetAddRec) {
|
||||
std::unique_ptr<Module> M = parseAssemblyString(
|
||||
"define i32 @test(i32 %limit) { "
|
||||
"entry: "
|
||||
" br label %loop "
|
||||
"loop: "
|
||||
" %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
|
||||
" %i.inc = add nsw i32 %i, 1 "
|
||||
" %cont = icmp slt i32 %i.inc, %limit "
|
||||
" br i1 %cont, label %loop, label %exit "
|
||||
"exit: "
|
||||
" ret i32 %i.inc "
|
||||
"}",
|
||||
Err, C);
|
||||
|
||||
assert(M && "Could not parse module?");
|
||||
assert(!verifyModule(*M) && "Must have been well formed!");
|
||||
|
||||
runWithSE(
|
||||
*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
|
||||
auto &I = GetInstByName(F, "i");
|
||||
auto *Loop = LI.getLoopFor(I.getParent());
|
||||
EXPECT_FALSE(Loop->getCanonicalInductionVariable());
|
||||
|
||||
auto *AR = GetAddRec(SE, Loop);
|
||||
unsigned ExpectedCanonicalIVWidth =
|
||||
SE.getTypeSizeInBits(AR->getType());
|
||||
|
||||
SCEVExpander Exp(SE, M->getDataLayout(), "expander");
|
||||
auto *InsertAt = I.getNextNode();
|
||||
Exp.expandCodeFor(AR, nullptr, InsertAt);
|
||||
PHINode *CanonicalIV = Loop->getCanonicalInductionVariable();
|
||||
unsigned CanonicalIVBitWidth =
|
||||
cast<IntegerType>(CanonicalIV->getType())->getBitWidth();
|
||||
EXPECT_EQ(CanonicalIVBitWidth, ExpectedCanonicalIVWidth);
|
||||
});
|
||||
};
|
||||
|
||||
// Expand the addrec produced by GetAddRec into a loop with a canonical IV
|
||||
// which is narrower than addrec type.
|
||||
// SCEVExpander will insert a canonical IV of a wider type to expand the
|
||||
// addrec.
|
||||
auto TestNarrowCanonicalIV = [&](std::function<const SCEV *(
|
||||
ScalarEvolution & SE, Loop * L)>
|
||||
GetAddRec) {
|
||||
std::unique_ptr<Module> M = parseAssemblyString(
|
||||
"define i32 @test(i32 %limit) { "
|
||||
"entry: "
|
||||
" br label %loop "
|
||||
"loop: "
|
||||
" %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
|
||||
" %canonical.iv = phi i8 [ 0, %entry ], [ %canonical.iv.inc, %loop ] "
|
||||
" %i.inc = add nsw i32 %i, 1 "
|
||||
" %canonical.iv.inc = add i8 %canonical.iv, 1 "
|
||||
" %cont = icmp slt i32 %i.inc, %limit "
|
||||
" br i1 %cont, label %loop, label %exit "
|
||||
"exit: "
|
||||
" ret i32 %i.inc "
|
||||
"}",
|
||||
Err, C);
|
||||
|
||||
assert(M && "Could not parse module?");
|
||||
assert(!verifyModule(*M) && "Must have been well formed!");
|
||||
|
||||
runWithSE(*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
|
||||
auto &I = GetInstByName(F, "i");
|
||||
|
||||
auto *LoopHeaderBB = I.getParent();
|
||||
auto *Loop = LI.getLoopFor(LoopHeaderBB);
|
||||
PHINode *CanonicalIV = Loop->getCanonicalInductionVariable();
|
||||
EXPECT_EQ(CanonicalIV, &GetInstByName(F, "canonical.iv"));
|
||||
|
||||
auto *AR = GetAddRec(SE, Loop);
|
||||
|
||||
unsigned ExpectedCanonicalIVWidth = SE.getTypeSizeInBits(AR->getType());
|
||||
unsigned CanonicalIVBitWidth =
|
||||
cast<IntegerType>(CanonicalIV->getType())->getBitWidth();
|
||||
EXPECT_LT(CanonicalIVBitWidth, ExpectedCanonicalIVWidth);
|
||||
|
||||
SCEVExpander Exp(SE, M->getDataLayout(), "expander");
|
||||
auto *InsertAt = I.getNextNode();
|
||||
Exp.expandCodeFor(AR, nullptr, InsertAt);
|
||||
|
||||
// Loop over all of the PHI nodes, looking for the new canonical indvar.
|
||||
PHINode *NewCanonicalIV = nullptr;
|
||||
for (BasicBlock::iterator i = LoopHeaderBB->begin(); isa<PHINode>(i);
|
||||
++i) {
|
||||
PHINode *PN = cast<PHINode>(i);
|
||||
if (PN == &I || PN == CanonicalIV)
|
||||
continue;
|
||||
// We expect that the only PHI added is the new canonical IV
|
||||
EXPECT_FALSE(NewCanonicalIV);
|
||||
NewCanonicalIV = PN;
|
||||
}
|
||||
|
||||
// Check that NewCanonicalIV is a canonical IV, i.e {0,+,1}
|
||||
BasicBlock *Incoming = nullptr, *Backedge = nullptr;
|
||||
EXPECT_TRUE(Loop->getIncomingAndBackEdge(Incoming, Backedge));
|
||||
auto *Start = NewCanonicalIV->getIncomingValueForBlock(Incoming);
|
||||
EXPECT_TRUE(isa<ConstantInt>(Start));
|
||||
EXPECT_TRUE(dyn_cast<ConstantInt>(Start)->isZero());
|
||||
auto *Next = NewCanonicalIV->getIncomingValueForBlock(Backedge);
|
||||
EXPECT_TRUE(isa<BinaryOperator>(Next));
|
||||
auto *NextBinOp = dyn_cast<BinaryOperator>(Next);
|
||||
EXPECT_EQ(NextBinOp->getOpcode(), Instruction::Add);
|
||||
EXPECT_EQ(NextBinOp->getOperand(0), NewCanonicalIV);
|
||||
auto *Step = NextBinOp->getOperand(1);
|
||||
EXPECT_TRUE(isa<ConstantInt>(Step));
|
||||
EXPECT_TRUE(dyn_cast<ConstantInt>(Step)->isOne());
|
||||
|
||||
unsigned NewCanonicalIVBitWidth =
|
||||
cast<IntegerType>(NewCanonicalIV->getType())->getBitWidth();
|
||||
EXPECT_EQ(NewCanonicalIVBitWidth, ExpectedCanonicalIVWidth);
|
||||
});
|
||||
};
|
||||
|
||||
// Expand the addrec produced by GetAddRec into a loop with a canonical IV
|
||||
// of addrec width.
|
||||
// To expand the addrec SCEVExpander should use the existing canonical IV.
|
||||
auto TestMatchingCanonicalIV =
|
||||
[&](std::function<const SCEV *(ScalarEvolution & SE, Loop * L)> GetAddRec,
|
||||
unsigned ARBitWidth) {
|
||||
auto ARBitWidthTypeStr = "i" + std::to_string(ARBitWidth);
|
||||
std::unique_ptr<Module> M = parseAssemblyString(
|
||||
"define i32 @test(i32 %limit) { "
|
||||
"entry: "
|
||||
" br label %loop "
|
||||
"loop: "
|
||||
" %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
|
||||
" %canonical.iv = phi " +
|
||||
ARBitWidthTypeStr +
|
||||
" [ 0, %entry ], [ %canonical.iv.inc, %loop ] "
|
||||
" %i.inc = add nsw i32 %i, 1 "
|
||||
" %canonical.iv.inc = add " +
|
||||
ARBitWidthTypeStr +
|
||||
" %canonical.iv, 1 "
|
||||
" %cont = icmp slt i32 %i.inc, %limit "
|
||||
" br i1 %cont, label %loop, label %exit "
|
||||
"exit: "
|
||||
" ret i32 %i.inc "
|
||||
"}",
|
||||
Err, C);
|
||||
|
||||
assert(M && "Could not parse module?");
|
||||
assert(!verifyModule(*M) && "Must have been well formed!");
|
||||
|
||||
runWithSE(
|
||||
*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
|
||||
auto &I = GetInstByName(F, "i");
|
||||
auto &CanonicalIV = GetInstByName(F, "canonical.iv");
|
||||
|
||||
auto *LoopHeaderBB = I.getParent();
|
||||
auto *Loop = LI.getLoopFor(LoopHeaderBB);
|
||||
EXPECT_EQ(&CanonicalIV, Loop->getCanonicalInductionVariable());
|
||||
unsigned CanonicalIVBitWidth =
|
||||
cast<IntegerType>(CanonicalIV.getType())->getBitWidth();
|
||||
|
||||
auto *AR = GetAddRec(SE, Loop);
|
||||
EXPECT_EQ(ARBitWidth, SE.getTypeSizeInBits(AR->getType()));
|
||||
EXPECT_EQ(CanonicalIVBitWidth, ARBitWidth);
|
||||
|
||||
SCEVExpander Exp(SE, M->getDataLayout(), "expander");
|
||||
auto *InsertAt = I.getNextNode();
|
||||
Exp.expandCodeFor(AR, nullptr, InsertAt);
|
||||
|
||||
// Loop over all of the PHI nodes, looking if a new canonical
|
||||
// indvar was introduced.
|
||||
PHINode *NewCanonicalIV = nullptr;
|
||||
for (BasicBlock::iterator i = LoopHeaderBB->begin();
|
||||
isa<PHINode>(i); ++i) {
|
||||
PHINode *PN = cast<PHINode>(i);
|
||||
if (PN == &I || PN == &CanonicalIV)
|
||||
continue;
|
||||
NewCanonicalIV = PN;
|
||||
}
|
||||
EXPECT_FALSE(NewCanonicalIV);
|
||||
});
|
||||
};
|
||||
|
||||
unsigned ARBitWidth = 16;
|
||||
Type *ARType = IntegerType::get(C, ARBitWidth);
|
||||
|
||||
// Expand {5,+,1}
|
||||
auto GetAR2 = [&](ScalarEvolution &SE, Loop *L) -> const SCEV * {
|
||||
return SE.getAddRecExpr(SE.getConstant(APInt(ARBitWidth, 5)),
|
||||
SE.getOne(ARType), L, SCEV::FlagAnyWrap);
|
||||
};
|
||||
TestNoCanonicalIV(GetAR2);
|
||||
TestNarrowCanonicalIV(GetAR2);
|
||||
TestMatchingCanonicalIV(GetAR2, ARBitWidth);
|
||||
}
|
||||
|
||||
TEST_F(ScalarEvolutionsTest, SCEVExpanderShlNSW) {
|
||||
|
||||
auto checkOneCase = [this](std::string &&str) {
|
||||
LLVMContext C;
|
||||
SMDiagnostic Err;
|
||||
std::unique_ptr<Module> M = parseAssemblyString(str, Err, C);
|
||||
|
||||
assert(M && "Could not parse module?");
|
||||
assert(!verifyModule(*M) && "Must have been well formed!");
|
||||
|
||||
Function *F = M->getFunction("f");
|
||||
ASSERT_NE(F, nullptr) << "Could not find function 'f'";
|
||||
|
||||
BasicBlock &Entry = F->getEntryBlock();
|
||||
LoadInst *Load = cast<LoadInst>(&Entry.front());
|
||||
BinaryOperator *And = cast<BinaryOperator>(*Load->user_begin());
|
||||
|
||||
ScalarEvolution SE = buildSE(*F);
|
||||
const SCEV *AndSCEV = SE.getSCEV(And);
|
||||
EXPECT_TRUE(isa<SCEVMulExpr>(AndSCEV));
|
||||
EXPECT_TRUE(cast<SCEVMulExpr>(AndSCEV)->hasNoSignedWrap());
|
||||
|
||||
SCEVExpander Exp(SE, M->getDataLayout(), "expander");
|
||||
auto *I = cast<Instruction>(Exp.expandCodeFor(AndSCEV, nullptr, And));
|
||||
EXPECT_EQ(I->getOpcode(), Instruction::Shl);
|
||||
EXPECT_FALSE(I->hasNoSignedWrap());
|
||||
};
|
||||
|
||||
checkOneCase("define void @f(i16* %arrayidx) { "
|
||||
" %1 = load i16, i16* %arrayidx "
|
||||
" %2 = and i16 %1, -32768 "
|
||||
" ret void "
|
||||
"} ");
|
||||
|
||||
checkOneCase("define void @f(i8* %arrayidx) { "
|
||||
" %1 = load i8, i8* %arrayidx "
|
||||
" %2 = and i8 %1, -128 "
|
||||
" ret void "
|
||||
"} ");
|
||||
}
|
||||
|
||||
// Test expansion of nested addrecs in CanonicalMode.
|
||||
// Expanding nested addrecs in canonical mode requiers a canonical IV of a
|
||||
// type wider than the type of the addrec itself. Currently, SCEVExpander
|
||||
// just falls back to literal mode for nested addrecs.
|
||||
TEST_F(ScalarEvolutionsTest, SCEVExpandNonAffineAddRec) {
|
||||
LLVMContext C;
|
||||
SMDiagnostic Err;
|
||||
|
||||
// Expand the addrec produced by GetAddRec into a loop without a canonical IV.
|
||||
auto TestNoCanonicalIV =
|
||||
[&](std::function<const SCEVAddRecExpr *(ScalarEvolution & SE, Loop * L)>
|
||||
GetAddRec) {
|
||||
std::unique_ptr<Module> M = parseAssemblyString(
|
||||
"define i32 @test(i32 %limit) { "
|
||||
"entry: "
|
||||
" br label %loop "
|
||||
"loop: "
|
||||
" %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
|
||||
" %i.inc = add nsw i32 %i, 1 "
|
||||
" %cont = icmp slt i32 %i.inc, %limit "
|
||||
" br i1 %cont, label %loop, label %exit "
|
||||
"exit: "
|
||||
" ret i32 %i.inc "
|
||||
"}",
|
||||
Err, C);
|
||||
|
||||
assert(M && "Could not parse module?");
|
||||
assert(!verifyModule(*M) && "Must have been well formed!");
|
||||
|
||||
runWithSE(*M, "test",
|
||||
[&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
|
||||
auto &I = GetInstByName(F, "i");
|
||||
auto *Loop = LI.getLoopFor(I.getParent());
|
||||
EXPECT_FALSE(Loop->getCanonicalInductionVariable());
|
||||
|
||||
auto *AR = GetAddRec(SE, Loop);
|
||||
EXPECT_FALSE(AR->isAffine());
|
||||
|
||||
SCEVExpander Exp(SE, M->getDataLayout(), "expander");
|
||||
auto *InsertAt = I.getNextNode();
|
||||
Value *V = Exp.expandCodeFor(AR, nullptr, InsertAt);
|
||||
auto *ExpandedAR = SE.getSCEV(V);
|
||||
// Check that the expansion happened literally.
|
||||
EXPECT_EQ(AR, ExpandedAR);
|
||||
});
|
||||
};
|
||||
|
||||
// Expand the addrec produced by GetAddRec into a loop with a canonical IV
|
||||
// which is narrower than addrec type.
|
||||
auto TestNarrowCanonicalIV = [&](std::function<const SCEVAddRecExpr *(
|
||||
ScalarEvolution & SE, Loop * L)>
|
||||
GetAddRec) {
|
||||
std::unique_ptr<Module> M = parseAssemblyString(
|
||||
"define i32 @test(i32 %limit) { "
|
||||
"entry: "
|
||||
" br label %loop "
|
||||
"loop: "
|
||||
" %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
|
||||
" %canonical.iv = phi i8 [ 0, %entry ], [ %canonical.iv.inc, %loop ] "
|
||||
" %i.inc = add nsw i32 %i, 1 "
|
||||
" %canonical.iv.inc = add i8 %canonical.iv, 1 "
|
||||
" %cont = icmp slt i32 %i.inc, %limit "
|
||||
" br i1 %cont, label %loop, label %exit "
|
||||
"exit: "
|
||||
" ret i32 %i.inc "
|
||||
"}",
|
||||
Err, C);
|
||||
|
||||
assert(M && "Could not parse module?");
|
||||
assert(!verifyModule(*M) && "Must have been well formed!");
|
||||
|
||||
runWithSE(*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
|
||||
auto &I = GetInstByName(F, "i");
|
||||
|
||||
auto *LoopHeaderBB = I.getParent();
|
||||
auto *Loop = LI.getLoopFor(LoopHeaderBB);
|
||||
PHINode *CanonicalIV = Loop->getCanonicalInductionVariable();
|
||||
EXPECT_EQ(CanonicalIV, &GetInstByName(F, "canonical.iv"));
|
||||
|
||||
auto *AR = GetAddRec(SE, Loop);
|
||||
EXPECT_FALSE(AR->isAffine());
|
||||
|
||||
unsigned ExpectedCanonicalIVWidth = SE.getTypeSizeInBits(AR->getType());
|
||||
unsigned CanonicalIVBitWidth =
|
||||
cast<IntegerType>(CanonicalIV->getType())->getBitWidth();
|
||||
EXPECT_LT(CanonicalIVBitWidth, ExpectedCanonicalIVWidth);
|
||||
|
||||
SCEVExpander Exp(SE, M->getDataLayout(), "expander");
|
||||
auto *InsertAt = I.getNextNode();
|
||||
Value *V = Exp.expandCodeFor(AR, nullptr, InsertAt);
|
||||
auto *ExpandedAR = SE.getSCEV(V);
|
||||
// Check that the expansion happened literally.
|
||||
EXPECT_EQ(AR, ExpandedAR);
|
||||
});
|
||||
};
|
||||
|
||||
// Expand the addrec produced by GetAddRec into a loop with a canonical IV
|
||||
// of addrec width.
|
||||
auto TestMatchingCanonicalIV =
|
||||
[&](std::function<const SCEVAddRecExpr *(ScalarEvolution & SE, Loop * L)>
|
||||
GetAddRec,
|
||||
unsigned ARBitWidth) {
|
||||
auto ARBitWidthTypeStr = "i" + std::to_string(ARBitWidth);
|
||||
std::unique_ptr<Module> M = parseAssemblyString(
|
||||
"define i32 @test(i32 %limit) { "
|
||||
"entry: "
|
||||
" br label %loop "
|
||||
"loop: "
|
||||
" %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
|
||||
" %canonical.iv = phi " +
|
||||
ARBitWidthTypeStr +
|
||||
" [ 0, %entry ], [ %canonical.iv.inc, %loop ] "
|
||||
" %i.inc = add nsw i32 %i, 1 "
|
||||
" %canonical.iv.inc = add " +
|
||||
ARBitWidthTypeStr +
|
||||
" %canonical.iv, 1 "
|
||||
" %cont = icmp slt i32 %i.inc, %limit "
|
||||
" br i1 %cont, label %loop, label %exit "
|
||||
"exit: "
|
||||
" ret i32 %i.inc "
|
||||
"}",
|
||||
Err, C);
|
||||
|
||||
assert(M && "Could not parse module?");
|
||||
assert(!verifyModule(*M) && "Must have been well formed!");
|
||||
|
||||
runWithSE(
|
||||
*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
|
||||
auto &I = GetInstByName(F, "i");
|
||||
auto &CanonicalIV = GetInstByName(F, "canonical.iv");
|
||||
|
||||
auto *LoopHeaderBB = I.getParent();
|
||||
auto *Loop = LI.getLoopFor(LoopHeaderBB);
|
||||
EXPECT_EQ(&CanonicalIV, Loop->getCanonicalInductionVariable());
|
||||
unsigned CanonicalIVBitWidth =
|
||||
cast<IntegerType>(CanonicalIV.getType())->getBitWidth();
|
||||
|
||||
auto *AR = GetAddRec(SE, Loop);
|
||||
EXPECT_FALSE(AR->isAffine());
|
||||
EXPECT_EQ(ARBitWidth, SE.getTypeSizeInBits(AR->getType()));
|
||||
EXPECT_EQ(CanonicalIVBitWidth, ARBitWidth);
|
||||
|
||||
SCEVExpander Exp(SE, M->getDataLayout(), "expander");
|
||||
auto *InsertAt = I.getNextNode();
|
||||
Value *V = Exp.expandCodeFor(AR, nullptr, InsertAt);
|
||||
auto *ExpandedAR = SE.getSCEV(V);
|
||||
// Check that the expansion happened literally.
|
||||
EXPECT_EQ(AR, ExpandedAR);
|
||||
});
|
||||
};
|
||||
|
||||
unsigned ARBitWidth = 16;
|
||||
Type *ARType = IntegerType::get(C, ARBitWidth);
|
||||
|
||||
// Expand {5,+,1,+,1}
|
||||
auto GetAR3 = [&](ScalarEvolution &SE, Loop *L) -> const SCEVAddRecExpr * {
|
||||
SmallVector<const SCEV *, 3> Ops = {SE.getConstant(APInt(ARBitWidth, 5)),
|
||||
SE.getOne(ARType), SE.getOne(ARType)};
|
||||
return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, L, SCEV::FlagAnyWrap));
|
||||
};
|
||||
TestNoCanonicalIV(GetAR3);
|
||||
TestNarrowCanonicalIV(GetAR3);
|
||||
TestMatchingCanonicalIV(GetAR3, ARBitWidth);
|
||||
|
||||
// Expand {5,+,1,+,1,+,1}
|
||||
auto GetAR4 = [&](ScalarEvolution &SE, Loop *L) -> const SCEVAddRecExpr * {
|
||||
SmallVector<const SCEV *, 4> Ops = {SE.getConstant(APInt(ARBitWidth, 5)),
|
||||
SE.getOne(ARType), SE.getOne(ARType),
|
||||
SE.getOne(ARType)};
|
||||
return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, L, SCEV::FlagAnyWrap));
|
||||
};
|
||||
TestNoCanonicalIV(GetAR4);
|
||||
TestNarrowCanonicalIV(GetAR4);
|
||||
TestMatchingCanonicalIV(GetAR4, ARBitWidth);
|
||||
|
||||
// Expand {5,+,1,+,1,+,1,+,1}
|
||||
auto GetAR5 = [&](ScalarEvolution &SE, Loop *L) -> const SCEVAddRecExpr * {
|
||||
SmallVector<const SCEV *, 5> Ops = {SE.getConstant(APInt(ARBitWidth, 5)),
|
||||
SE.getOne(ARType), SE.getOne(ARType),
|
||||
SE.getOne(ARType), SE.getOne(ARType)};
|
||||
return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, L, SCEV::FlagAnyWrap));
|
||||
};
|
||||
TestNoCanonicalIV(GetAR5);
|
||||
TestNarrowCanonicalIV(GetAR5);
|
||||
TestMatchingCanonicalIV(GetAR5, ARBitWidth);
|
||||
}
|
||||
|
||||
} // end namespace llvm
|
Loading…
Reference in New Issue
Block a user