2017-08-17 00:07:40 +02:00
|
|
|
//===- MemoryBuiltins.cpp - Identify calls to memory builtins -------------===//
|
2009-09-10 06:36:43 +02:00
|
|
|
//
|
2019-01-19 09:50:56 +01:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2009-09-10 06:36:43 +02:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
2009-10-27 21:05:49 +01:00
|
|
|
// This family of functions identifies calls to builtin functions that allocate
|
2013-03-08 22:03:09 +01:00
|
|
|
// or free memory.
|
2009-09-10 06:36:43 +02:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2009-10-27 21:05:49 +01:00
|
|
|
#include "llvm/Analysis/MemoryBuiltins.h"
|
2017-08-17 00:07:40 +02:00
|
|
|
#include "llvm/ADT/APInt.h"
|
|
|
|
#include "llvm/ADT/None.h"
|
|
|
|
#include "llvm/ADT/Optional.h"
|
2012-12-03 17:50:05 +01:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
|
|
|
#include "llvm/ADT/Statistic.h"
|
2017-08-17 00:07:40 +02:00
|
|
|
#include "llvm/ADT/StringRef.h"
|
|
|
|
#include "llvm/Analysis/TargetFolder.h"
|
2015-01-15 03:16:27 +01:00
|
|
|
#include "llvm/Analysis/TargetLibraryInfo.h"
|
2018-03-21 23:34:23 +01:00
|
|
|
#include "llvm/Analysis/Utils/Local.h"
|
2012-12-03 17:50:05 +01:00
|
|
|
#include "llvm/Analysis/ValueTracking.h"
|
2017-08-17 00:07:40 +02:00
|
|
|
#include "llvm/IR/Argument.h"
|
|
|
|
#include "llvm/IR/Attributes.h"
|
|
|
|
#include "llvm/IR/Constants.h"
|
2013-01-02 12:36:10 +01:00
|
|
|
#include "llvm/IR/DataLayout.h"
|
2017-08-17 00:07:40 +02:00
|
|
|
#include "llvm/IR/DerivedTypes.h"
|
|
|
|
#include "llvm/IR/Function.h"
|
|
|
|
#include "llvm/IR/GlobalAlias.h"
|
2013-01-02 12:36:10 +01:00
|
|
|
#include "llvm/IR/GlobalVariable.h"
|
2017-08-17 00:07:40 +02:00
|
|
|
#include "llvm/IR/Instruction.h"
|
2013-01-02 12:36:10 +01:00
|
|
|
#include "llvm/IR/Instructions.h"
|
2017-08-17 00:07:40 +02:00
|
|
|
#include "llvm/IR/IntrinsicInst.h"
|
|
|
|
#include "llvm/IR/Operator.h"
|
|
|
|
#include "llvm/IR/Type.h"
|
|
|
|
#include "llvm/IR/Value.h"
|
|
|
|
#include "llvm/Support/Casting.h"
|
2012-06-21 17:45:28 +02:00
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#include "llvm/Support/MathExtras.h"
|
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2017-08-17 00:07:40 +02:00
|
|
|
#include <cassert>
|
|
|
|
#include <cstdint>
|
|
|
|
#include <iterator>
|
|
|
|
#include <utility>
|
|
|
|
|
2009-09-10 06:36:43 +02:00
|
|
|
using namespace llvm;
|
|
|
|
|
2014-04-22 04:48:03 +02:00
|
|
|
#define DEBUG_TYPE "memory-builtins"
|
|
|
|
|
2015-11-17 20:48:06 +01:00
|
|
|
enum AllocType : uint8_t {
|
2013-09-24 19:34:29 +02:00
|
|
|
OpNewLike = 1<<0, // allocates; never returns null
|
|
|
|
MallocLike = 1<<1 | OpNewLike, // allocates; may return null
|
2020-03-28 06:59:52 +01:00
|
|
|
AlignedAllocLike = 1<<2, // allocates with alignment; may return null
|
|
|
|
CallocLike = 1<<3, // allocates + bzero
|
|
|
|
ReallocLike = 1<<4, // reallocates
|
|
|
|
StrDupLike = 1<<5,
|
|
|
|
MallocOrCallocLike = MallocLike | CallocLike | AlignedAllocLike,
|
|
|
|
AllocLike = MallocOrCallocLike | StrDupLike,
|
2013-09-24 19:15:14 +02:00
|
|
|
AnyAlloc = AllocLike | ReallocLike
|
2012-06-21 17:45:28 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
struct AllocFnsTy {
|
|
|
|
AllocType AllocTy;
|
2016-04-12 03:05:35 +02:00
|
|
|
unsigned NumParams;
|
2012-06-21 17:45:28 +02:00
|
|
|
// First and Second size parameters (or -1 if unused)
|
2016-04-12 03:05:35 +02:00
|
|
|
int FstParam, SndParam;
|
2012-06-21 17:45:28 +02:00
|
|
|
};
|
|
|
|
|
2012-06-28 18:34:03 +02:00
|
|
|
// FIXME: certain users need more information. E.g., SimplifyLibCalls needs to
|
|
|
|
// know which functions are nounwind, noalias, nocapture parameters, etc.
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 00:16:46 +01:00
|
|
|
static const std::pair<LibFunc, AllocFnsTy> AllocationFnData[] = {
|
|
|
|
{LibFunc_malloc, {MallocLike, 1, 0, -1}},
|
2021-01-22 21:59:29 +01:00
|
|
|
{LibFunc_vec_malloc, {MallocLike, 1, 0, -1}},
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 00:16:46 +01:00
|
|
|
{LibFunc_valloc, {MallocLike, 1, 0, -1}},
|
|
|
|
{LibFunc_Znwj, {OpNewLike, 1, 0, -1}}, // new(unsigned int)
|
|
|
|
{LibFunc_ZnwjRKSt9nothrow_t, {MallocLike, 2, 0, -1}}, // new(unsigned int, nothrow)
|
2018-04-04 21:01:51 +02:00
|
|
|
{LibFunc_ZnwjSt11align_val_t, {OpNewLike, 2, 0, -1}}, // new(unsigned int, align_val_t)
|
|
|
|
{LibFunc_ZnwjSt11align_val_tRKSt9nothrow_t, // new(unsigned int, align_val_t, nothrow)
|
|
|
|
{MallocLike, 3, 0, -1}},
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 00:16:46 +01:00
|
|
|
{LibFunc_Znwm, {OpNewLike, 1, 0, -1}}, // new(unsigned long)
|
|
|
|
{LibFunc_ZnwmRKSt9nothrow_t, {MallocLike, 2, 0, -1}}, // new(unsigned long, nothrow)
|
2018-04-04 21:01:51 +02:00
|
|
|
{LibFunc_ZnwmSt11align_val_t, {OpNewLike, 2, 0, -1}}, // new(unsigned long, align_val_t)
|
|
|
|
{LibFunc_ZnwmSt11align_val_tRKSt9nothrow_t, // new(unsigned long, align_val_t, nothrow)
|
|
|
|
{MallocLike, 3, 0, -1}},
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 00:16:46 +01:00
|
|
|
{LibFunc_Znaj, {OpNewLike, 1, 0, -1}}, // new[](unsigned int)
|
|
|
|
{LibFunc_ZnajRKSt9nothrow_t, {MallocLike, 2, 0, -1}}, // new[](unsigned int, nothrow)
|
2018-04-04 21:01:51 +02:00
|
|
|
{LibFunc_ZnajSt11align_val_t, {OpNewLike, 2, 0, -1}}, // new[](unsigned int, align_val_t)
|
|
|
|
{LibFunc_ZnajSt11align_val_tRKSt9nothrow_t, // new[](unsigned int, align_val_t, nothrow)
|
|
|
|
{MallocLike, 3, 0, -1}},
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 00:16:46 +01:00
|
|
|
{LibFunc_Znam, {OpNewLike, 1, 0, -1}}, // new[](unsigned long)
|
|
|
|
{LibFunc_ZnamRKSt9nothrow_t, {MallocLike, 2, 0, -1}}, // new[](unsigned long, nothrow)
|
2018-04-04 21:01:51 +02:00
|
|
|
{LibFunc_ZnamSt11align_val_t, {OpNewLike, 2, 0, -1}}, // new[](unsigned long, align_val_t)
|
|
|
|
{LibFunc_ZnamSt11align_val_tRKSt9nothrow_t, // new[](unsigned long, align_val_t, nothrow)
|
|
|
|
{MallocLike, 3, 0, -1}},
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 00:16:46 +01:00
|
|
|
{LibFunc_msvc_new_int, {OpNewLike, 1, 0, -1}}, // new(unsigned int)
|
|
|
|
{LibFunc_msvc_new_int_nothrow, {MallocLike, 2, 0, -1}}, // new(unsigned int, nothrow)
|
|
|
|
{LibFunc_msvc_new_longlong, {OpNewLike, 1, 0, -1}}, // new(unsigned long long)
|
|
|
|
{LibFunc_msvc_new_longlong_nothrow, {MallocLike, 2, 0, -1}}, // new(unsigned long long, nothrow)
|
|
|
|
{LibFunc_msvc_new_array_int, {OpNewLike, 1, 0, -1}}, // new[](unsigned int)
|
|
|
|
{LibFunc_msvc_new_array_int_nothrow, {MallocLike, 2, 0, -1}}, // new[](unsigned int, nothrow)
|
|
|
|
{LibFunc_msvc_new_array_longlong, {OpNewLike, 1, 0, -1}}, // new[](unsigned long long)
|
|
|
|
{LibFunc_msvc_new_array_longlong_nothrow, {MallocLike, 2, 0, -1}}, // new[](unsigned long long, nothrow)
|
2020-03-28 06:59:52 +01:00
|
|
|
{LibFunc_aligned_alloc, {AlignedAllocLike, 2, 1, -1}},
|
2021-04-20 12:38:55 +02:00
|
|
|
{LibFunc_memalign, {AlignedAllocLike, 2, 1, -1}},
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 00:16:46 +01:00
|
|
|
{LibFunc_calloc, {CallocLike, 2, 0, 1}},
|
2021-01-22 21:59:29 +01:00
|
|
|
{LibFunc_vec_calloc, {CallocLike, 2, 0, 1}},
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 00:16:46 +01:00
|
|
|
{LibFunc_realloc, {ReallocLike, 2, 1, -1}},
|
2021-01-22 21:59:29 +01:00
|
|
|
{LibFunc_vec_realloc, {ReallocLike, 2, 1, -1}},
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 00:16:46 +01:00
|
|
|
{LibFunc_reallocf, {ReallocLike, 2, 1, -1}},
|
|
|
|
{LibFunc_strdup, {StrDupLike, 1, -1, -1}},
|
|
|
|
{LibFunc_strndup, {StrDupLike, 2, 1, -1}}
|
2013-09-24 19:49:08 +02:00
|
|
|
// TODO: Handle "int posix_memalign(void **, size_t, size_t)"
|
2012-06-21 17:45:28 +02:00
|
|
|
};
|
|
|
|
|
2017-04-18 22:17:23 +02:00
|
|
|
static const Function *getCalledFunction(const Value *V, bool LookThroughBitCast,
|
|
|
|
bool &IsNoBuiltin) {
|
2016-12-27 07:10:50 +01:00
|
|
|
// Don't care about intrinsics in this case.
|
|
|
|
if (isa<IntrinsicInst>(V))
|
|
|
|
return nullptr;
|
|
|
|
|
2012-06-21 17:45:28 +02:00
|
|
|
if (LookThroughBitCast)
|
|
|
|
V = V->stripPointerCasts();
|
2012-06-21 23:25:05 +02:00
|
|
|
|
2020-04-20 03:32:03 +02:00
|
|
|
const auto *CB = dyn_cast<CallBase>(V);
|
|
|
|
if (!CB)
|
2014-04-15 06:59:12 +02:00
|
|
|
return nullptr;
|
2009-09-10 06:36:43 +02:00
|
|
|
|
2020-04-20 03:32:03 +02:00
|
|
|
IsNoBuiltin = CB->isNoBuiltin();
|
2013-05-16 06:12:04 +02:00
|
|
|
|
2020-04-20 03:32:03 +02:00
|
|
|
if (const Function *Callee = CB->getCalledFunction())
|
2018-02-20 23:00:33 +01:00
|
|
|
return Callee;
|
|
|
|
return nullptr;
|
2009-09-10 06:36:43 +02:00
|
|
|
}
|
|
|
|
|
2016-04-12 03:05:35 +02:00
|
|
|
/// Returns the allocation data for the given value if it's either a call to a
|
|
|
|
/// known allocation function, or a call to a function with the allocsize
|
|
|
|
/// attribute.
|
2016-12-27 07:10:50 +01:00
|
|
|
static Optional<AllocFnsTy>
|
|
|
|
getAllocationDataForFunction(const Function *Callee, AllocType AllocTy,
|
|
|
|
const TargetLibraryInfo *TLI) {
|
2012-08-29 17:32:21 +02:00
|
|
|
// Make sure that the function is available.
|
|
|
|
StringRef FnName = Callee->getName();
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 00:16:46 +01:00
|
|
|
LibFunc TLIFn;
|
2012-08-29 17:32:21 +02:00
|
|
|
if (!TLI || !TLI->getLibFunc(FnName, TLIFn) || !TLI->has(TLIFn))
|
2016-04-12 03:05:35 +02:00
|
|
|
return None;
|
2012-08-29 17:32:21 +02:00
|
|
|
|
2016-12-20 19:46:27 +01:00
|
|
|
const auto *Iter = find_if(
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 00:16:46 +01:00
|
|
|
AllocationFnData, [TLIFn](const std::pair<LibFunc, AllocFnsTy> &P) {
|
2016-12-20 19:46:27 +01:00
|
|
|
return P.first == TLIFn;
|
|
|
|
});
|
2015-10-24 21:03:15 +02:00
|
|
|
|
2016-04-12 03:05:35 +02:00
|
|
|
if (Iter == std::end(AllocationFnData))
|
|
|
|
return None;
|
2009-09-10 06:36:43 +02:00
|
|
|
|
2016-04-12 03:05:35 +02:00
|
|
|
const AllocFnsTy *FnData = &Iter->second;
|
2018-08-30 20:37:18 +02:00
|
|
|
if ((FnData->AllocTy & AllocTy) != FnData->AllocTy)
|
2016-04-12 03:05:35 +02:00
|
|
|
return None;
|
2012-06-21 17:45:28 +02:00
|
|
|
|
|
|
|
// Check function prototype.
|
2012-06-21 20:38:26 +02:00
|
|
|
int FstParam = FnData->FstParam;
|
|
|
|
int SndParam = FnData->SndParam;
|
2011-07-18 06:54:35 +02:00
|
|
|
FunctionType *FTy = Callee->getFunctionType();
|
2012-06-21 17:45:28 +02:00
|
|
|
|
|
|
|
if (FTy->getReturnType() == Type::getInt8PtrTy(FTy->getContext()) &&
|
|
|
|
FTy->getNumParams() == FnData->NumParams &&
|
2012-06-21 20:38:26 +02:00
|
|
|
(FstParam < 0 ||
|
2012-06-21 17:45:28 +02:00
|
|
|
(FTy->getParamType(FstParam)->isIntegerTy(32) ||
|
|
|
|
FTy->getParamType(FstParam)->isIntegerTy(64))) &&
|
2012-06-21 20:38:26 +02:00
|
|
|
(SndParam < 0 ||
|
2012-06-21 17:45:28 +02:00
|
|
|
FTy->getParamType(SndParam)->isIntegerTy(32) ||
|
|
|
|
FTy->getParamType(SndParam)->isIntegerTy(64)))
|
2016-04-12 03:05:35 +02:00
|
|
|
return *FnData;
|
|
|
|
return None;
|
2009-09-10 06:36:43 +02:00
|
|
|
}
|
|
|
|
|
2016-12-27 07:10:50 +01:00
|
|
|
static Optional<AllocFnsTy> getAllocationData(const Value *V, AllocType AllocTy,
|
|
|
|
const TargetLibraryInfo *TLI,
|
|
|
|
bool LookThroughBitCast = false) {
|
2016-12-27 07:32:14 +01:00
|
|
|
bool IsNoBuiltinCall;
|
|
|
|
if (const Function *Callee =
|
|
|
|
getCalledFunction(V, LookThroughBitCast, IsNoBuiltinCall))
|
|
|
|
if (!IsNoBuiltinCall)
|
|
|
|
return getAllocationDataForFunction(Callee, AllocTy, TLI);
|
2016-12-27 07:10:50 +01:00
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
Change TargetLibraryInfo analysis passes to always require Function
Summary:
This is the first change to enable the TLI to be built per-function so
that -fno-builtin* handling can be migrated to use function attributes.
See discussion on D61634 for background. This is an enabler for fixing
handling of these options for LTO, for example.
This change should not affect behavior, as the provided function is not
yet used to build a specifically per-function TLI, but rather enables
that migration.
Most of the changes were very mechanical, e.g. passing a Function to the
legacy analysis pass's getTLI interface, or in Module level cases,
adding a callback. This is similar to the way the per-function TTI
analysis works.
There was one place where we were looking for builtins but not in the
context of a specific function. See FindCXAAtExit in
lib/Transforms/IPO/GlobalOpt.cpp. I'm somewhat concerned my workaround
could provide the wrong behavior in some corner cases. Suggestions
welcome.
Reviewers: chandlerc, hfinkel
Subscribers: arsenm, dschuff, jvesely, nhaehnle, mehdi_amini, javed.absar, sbc100, jgravelle-google, eraman, aheejin, steven_wu, george.burgess.iv, dexonsmith, jfb, asbirlea, gchatelet, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D66428
llvm-svn: 371284
2019-09-07 05:09:36 +02:00
|
|
|
static Optional<AllocFnsTy>
|
|
|
|
getAllocationData(const Value *V, AllocType AllocTy,
|
|
|
|
function_ref<const TargetLibraryInfo &(Function &)> GetTLI,
|
|
|
|
bool LookThroughBitCast = false) {
|
|
|
|
bool IsNoBuiltinCall;
|
|
|
|
if (const Function *Callee =
|
|
|
|
getCalledFunction(V, LookThroughBitCast, IsNoBuiltinCall))
|
|
|
|
if (!IsNoBuiltinCall)
|
|
|
|
return getAllocationDataForFunction(
|
|
|
|
Callee, AllocTy, &GetTLI(const_cast<Function &>(*Callee)));
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
2016-12-23 02:18:09 +01:00
|
|
|
static Optional<AllocFnsTy> getAllocationSize(const Value *V,
|
|
|
|
const TargetLibraryInfo *TLI) {
|
2016-12-27 07:32:14 +01:00
|
|
|
bool IsNoBuiltinCall;
|
|
|
|
const Function *Callee =
|
|
|
|
getCalledFunction(V, /*LookThroughBitCast=*/false, IsNoBuiltinCall);
|
2016-12-27 07:10:50 +01:00
|
|
|
if (!Callee)
|
|
|
|
return None;
|
|
|
|
|
2016-12-23 02:18:09 +01:00
|
|
|
// Prefer to use existing information over allocsize. This will give us an
|
|
|
|
// accurate AllocTy.
|
2016-12-27 07:32:14 +01:00
|
|
|
if (!IsNoBuiltinCall)
|
|
|
|
if (Optional<AllocFnsTy> Data =
|
|
|
|
getAllocationDataForFunction(Callee, AnyAlloc, TLI))
|
|
|
|
return Data;
|
2016-12-23 02:18:09 +01:00
|
|
|
|
2016-12-27 07:10:50 +01:00
|
|
|
Attribute Attr = Callee->getFnAttribute(Attribute::AllocSize);
|
|
|
|
if (Attr == Attribute())
|
2016-12-23 02:18:09 +01:00
|
|
|
return None;
|
|
|
|
|
|
|
|
std::pair<unsigned, Optional<unsigned>> Args = Attr.getAllocSizeArgs();
|
|
|
|
|
|
|
|
AllocFnsTy Result;
|
|
|
|
// Because allocsize only tells us how many bytes are allocated, we're not
|
|
|
|
// really allowed to assume anything, so we use MallocLike.
|
|
|
|
Result.AllocTy = MallocLike;
|
|
|
|
Result.NumParams = Callee->getNumOperands();
|
|
|
|
Result.FstParam = Args.first;
|
|
|
|
Result.SndParam = Args.second.getValueOr(-1);
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
2012-06-21 17:45:28 +02:00
|
|
|
static bool hasNoAliasAttr(const Value *V, bool LookThroughBitCast) {
|
2020-04-20 03:32:03 +02:00
|
|
|
const auto *CB =
|
|
|
|
dyn_cast<CallBase>(LookThroughBitCast ? V->stripPointerCasts() : V);
|
|
|
|
return CB && CB->hasRetAttr(Attribute::NoAlias);
|
2009-09-10 06:36:43 +02:00
|
|
|
}
|
|
|
|
|
2018-05-01 17:54:18 +02:00
|
|
|
/// Tests if a value is a call or invoke to a library function that
|
2012-06-21 23:25:05 +02:00
|
|
|
/// allocates or reallocates memory (either malloc, calloc, realloc, or strdup
|
|
|
|
/// like).
|
2012-08-29 17:32:21 +02:00
|
|
|
bool llvm::isAllocationFn(const Value *V, const TargetLibraryInfo *TLI,
|
|
|
|
bool LookThroughBitCast) {
|
2016-04-12 03:05:35 +02:00
|
|
|
return getAllocationData(V, AnyAlloc, TLI, LookThroughBitCast).hasValue();
|
2009-09-10 06:36:43 +02:00
|
|
|
}
|
Change TargetLibraryInfo analysis passes to always require Function
Summary:
This is the first change to enable the TLI to be built per-function so
that -fno-builtin* handling can be migrated to use function attributes.
See discussion on D61634 for background. This is an enabler for fixing
handling of these options for LTO, for example.
This change should not affect behavior, as the provided function is not
yet used to build a specifically per-function TLI, but rather enables
that migration.
Most of the changes were very mechanical, e.g. passing a Function to the
legacy analysis pass's getTLI interface, or in Module level cases,
adding a callback. This is similar to the way the per-function TTI
analysis works.
There was one place where we were looking for builtins but not in the
context of a specific function. See FindCXAAtExit in
lib/Transforms/IPO/GlobalOpt.cpp. I'm somewhat concerned my workaround
could provide the wrong behavior in some corner cases. Suggestions
welcome.
Reviewers: chandlerc, hfinkel
Subscribers: arsenm, dschuff, jvesely, nhaehnle, mehdi_amini, javed.absar, sbc100, jgravelle-google, eraman, aheejin, steven_wu, george.burgess.iv, dexonsmith, jfb, asbirlea, gchatelet, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D66428
llvm-svn: 371284
2019-09-07 05:09:36 +02:00
|
|
|
bool llvm::isAllocationFn(
|
|
|
|
const Value *V, function_ref<const TargetLibraryInfo &(Function &)> GetTLI,
|
|
|
|
bool LookThroughBitCast) {
|
|
|
|
return getAllocationData(V, AnyAlloc, GetTLI, LookThroughBitCast).hasValue();
|
|
|
|
}
|
2009-09-10 06:36:43 +02:00
|
|
|
|
2018-05-01 17:54:18 +02:00
|
|
|
/// Tests if a value is a call or invoke to a function that returns a
|
2012-06-28 18:34:03 +02:00
|
|
|
/// NoAlias pointer (including malloc/calloc/realloc/strdup-like functions).
|
2012-08-29 17:32:21 +02:00
|
|
|
bool llvm::isNoAliasFn(const Value *V, const TargetLibraryInfo *TLI,
|
|
|
|
bool LookThroughBitCast) {
|
2012-06-28 18:34:03 +02:00
|
|
|
// it's safe to consider realloc as noalias since accessing the original
|
|
|
|
// pointer is undefined behavior
|
2012-08-29 17:32:21 +02:00
|
|
|
return isAllocationFn(V, TLI, LookThroughBitCast) ||
|
2012-06-21 17:45:28 +02:00
|
|
|
hasNoAliasAttr(V, LookThroughBitCast);
|
2009-09-10 06:36:43 +02:00
|
|
|
}
|
|
|
|
|
2018-05-01 17:54:18 +02:00
|
|
|
/// Tests if a value is a call or invoke to a library function that
|
2012-06-21 23:25:05 +02:00
|
|
|
/// allocates uninitialized memory (such as malloc).
|
2012-08-29 17:32:21 +02:00
|
|
|
bool llvm::isMallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
|
|
|
|
bool LookThroughBitCast) {
|
2016-04-12 03:05:35 +02:00
|
|
|
return getAllocationData(V, MallocLike, TLI, LookThroughBitCast).hasValue();
|
2012-06-21 17:45:28 +02:00
|
|
|
}
|
Change TargetLibraryInfo analysis passes to always require Function
Summary:
This is the first change to enable the TLI to be built per-function so
that -fno-builtin* handling can be migrated to use function attributes.
See discussion on D61634 for background. This is an enabler for fixing
handling of these options for LTO, for example.
This change should not affect behavior, as the provided function is not
yet used to build a specifically per-function TLI, but rather enables
that migration.
Most of the changes were very mechanical, e.g. passing a Function to the
legacy analysis pass's getTLI interface, or in Module level cases,
adding a callback. This is similar to the way the per-function TTI
analysis works.
There was one place where we were looking for builtins but not in the
context of a specific function. See FindCXAAtExit in
lib/Transforms/IPO/GlobalOpt.cpp. I'm somewhat concerned my workaround
could provide the wrong behavior in some corner cases. Suggestions
welcome.
Reviewers: chandlerc, hfinkel
Subscribers: arsenm, dschuff, jvesely, nhaehnle, mehdi_amini, javed.absar, sbc100, jgravelle-google, eraman, aheejin, steven_wu, george.burgess.iv, dexonsmith, jfb, asbirlea, gchatelet, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D66428
llvm-svn: 371284
2019-09-07 05:09:36 +02:00
|
|
|
bool llvm::isMallocLikeFn(
|
|
|
|
const Value *V, function_ref<const TargetLibraryInfo &(Function &)> GetTLI,
|
|
|
|
bool LookThroughBitCast) {
|
|
|
|
return getAllocationData(V, MallocLike, GetTLI, LookThroughBitCast)
|
|
|
|
.hasValue();
|
|
|
|
}
|
2012-06-21 17:45:28 +02:00
|
|
|
|
2020-03-28 06:59:52 +01:00
|
|
|
/// Tests if a value is a call or invoke to a library function that
|
|
|
|
/// allocates uninitialized memory with alignment (such as aligned_alloc).
|
|
|
|
bool llvm::isAlignedAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
|
|
|
|
bool LookThroughBitCast) {
|
|
|
|
return getAllocationData(V, AlignedAllocLike, TLI, LookThroughBitCast)
|
|
|
|
.hasValue();
|
|
|
|
}
|
|
|
|
bool llvm::isAlignedAllocLikeFn(
|
|
|
|
const Value *V, function_ref<const TargetLibraryInfo &(Function &)> GetTLI,
|
|
|
|
bool LookThroughBitCast) {
|
|
|
|
return getAllocationData(V, AlignedAllocLike, GetTLI, LookThroughBitCast)
|
|
|
|
.hasValue();
|
|
|
|
}
|
|
|
|
|
2018-05-01 17:54:18 +02:00
|
|
|
/// Tests if a value is a call or invoke to a library function that
|
2012-06-21 23:25:05 +02:00
|
|
|
/// allocates zero-filled memory (such as calloc).
|
2012-08-29 17:32:21 +02:00
|
|
|
bool llvm::isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
|
|
|
|
bool LookThroughBitCast) {
|
2016-04-12 03:05:35 +02:00
|
|
|
return getAllocationData(V, CallocLike, TLI, LookThroughBitCast).hasValue();
|
2012-06-21 17:45:28 +02:00
|
|
|
}
|
|
|
|
|
2018-05-01 17:54:18 +02:00
|
|
|
/// Tests if a value is a call or invoke to a library function that
|
2018-03-02 19:57:02 +01:00
|
|
|
/// allocates memory similar to malloc or calloc.
|
2017-04-18 23:43:46 +02:00
|
|
|
bool llvm::isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
|
|
|
|
bool LookThroughBitCast) {
|
|
|
|
return getAllocationData(V, MallocOrCallocLike, TLI,
|
|
|
|
LookThroughBitCast).hasValue();
|
|
|
|
}
|
|
|
|
|
2018-05-01 17:54:18 +02:00
|
|
|
/// Tests if a value is a call or invoke to a library function that
|
2012-06-21 23:25:05 +02:00
|
|
|
/// allocates memory (either malloc, calloc, or strdup like).
|
2012-08-29 17:32:21 +02:00
|
|
|
bool llvm::isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
|
|
|
|
bool LookThroughBitCast) {
|
2016-04-12 03:05:35 +02:00
|
|
|
return getAllocationData(V, AllocLike, TLI, LookThroughBitCast).hasValue();
|
2012-06-21 17:45:28 +02:00
|
|
|
}
|
|
|
|
|
2019-07-08 17:57:56 +02:00
|
|
|
/// Tests if a value is a call or invoke to a library function that
|
|
|
|
/// reallocates memory (e.g., realloc).
|
|
|
|
bool llvm::isReallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
|
|
|
|
bool LookThroughBitCast) {
|
|
|
|
return getAllocationData(V, ReallocLike, TLI, LookThroughBitCast).hasValue();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Tests if a functions is a call or invoke to a library function that
|
|
|
|
/// reallocates memory (e.g., realloc).
|
|
|
|
bool llvm::isReallocLikeFn(const Function *F, const TargetLibraryInfo *TLI) {
|
|
|
|
return getAllocationDataForFunction(F, ReallocLike, TLI).hasValue();
|
|
|
|
}
|
|
|
|
|
2019-08-28 10:28:20 +02:00
|
|
|
/// Tests if a value is a call or invoke to a library function that
|
|
|
|
/// allocates memory and throws if an allocation failed (e.g., new).
|
|
|
|
bool llvm::isOpNewLikeFn(const Value *V, const TargetLibraryInfo *TLI,
|
|
|
|
bool LookThroughBitCast) {
|
|
|
|
return getAllocationData(V, OpNewLike, TLI, LookThroughBitCast).hasValue();
|
|
|
|
}
|
|
|
|
|
2019-09-17 12:12:48 +02:00
|
|
|
/// Tests if a value is a call or invoke to a library function that
|
|
|
|
/// allocates memory (strdup, strndup).
|
|
|
|
bool llvm::isStrdupLikeFn(const Value *V, const TargetLibraryInfo *TLI,
|
|
|
|
bool LookThroughBitCast) {
|
|
|
|
return getAllocationData(V, StrDupLike, TLI, LookThroughBitCast).hasValue();
|
|
|
|
}
|
|
|
|
|
2012-06-21 17:45:28 +02:00
|
|
|
/// extractMallocCall - Returns the corresponding CallInst if the instruction
|
|
|
|
/// is a malloc call. Since CallInst::CreateMalloc() only creates calls, we
|
|
|
|
/// ignore InvokeInst here.
|
Change TargetLibraryInfo analysis passes to always require Function
Summary:
This is the first change to enable the TLI to be built per-function so
that -fno-builtin* handling can be migrated to use function attributes.
See discussion on D61634 for background. This is an enabler for fixing
handling of these options for LTO, for example.
This change should not affect behavior, as the provided function is not
yet used to build a specifically per-function TLI, but rather enables
that migration.
Most of the changes were very mechanical, e.g. passing a Function to the
legacy analysis pass's getTLI interface, or in Module level cases,
adding a callback. This is similar to the way the per-function TTI
analysis works.
There was one place where we were looking for builtins but not in the
context of a specific function. See FindCXAAtExit in
lib/Transforms/IPO/GlobalOpt.cpp. I'm somewhat concerned my workaround
could provide the wrong behavior in some corner cases. Suggestions
welcome.
Reviewers: chandlerc, hfinkel
Subscribers: arsenm, dschuff, jvesely, nhaehnle, mehdi_amini, javed.absar, sbc100, jgravelle-google, eraman, aheejin, steven_wu, george.burgess.iv, dexonsmith, jfb, asbirlea, gchatelet, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D66428
llvm-svn: 371284
2019-09-07 05:09:36 +02:00
|
|
|
const CallInst *llvm::extractMallocCall(
|
|
|
|
const Value *I,
|
|
|
|
function_ref<const TargetLibraryInfo &(Function &)> GetTLI) {
|
|
|
|
return isMallocLikeFn(I, GetTLI) ? dyn_cast<CallInst>(I) : nullptr;
|
2009-09-10 06:36:43 +02:00
|
|
|
}
|
|
|
|
|
2015-03-10 03:37:25 +01:00
|
|
|
static Value *computeArraySize(const CallInst *CI, const DataLayout &DL,
|
2012-08-29 17:32:21 +02:00
|
|
|
const TargetLibraryInfo *TLI,
|
2009-11-10 09:32:25 +01:00
|
|
|
bool LookThroughSExt = false) {
|
2009-09-10 06:36:43 +02:00
|
|
|
if (!CI)
|
2014-04-15 06:59:12 +02:00
|
|
|
return nullptr;
|
2009-09-10 06:36:43 +02:00
|
|
|
|
2009-11-07 01:16:28 +01:00
|
|
|
// The size of the malloc's result type must be known to determine array size.
|
2012-08-29 17:32:21 +02:00
|
|
|
Type *T = getMallocAllocatedType(CI, TLI);
|
2015-03-10 03:37:25 +01:00
|
|
|
if (!T || !T->isSized())
|
2014-04-15 06:59:12 +02:00
|
|
|
return nullptr;
|
2009-09-10 06:36:43 +02:00
|
|
|
|
2015-03-10 03:37:25 +01:00
|
|
|
unsigned ElementSize = DL.getTypeAllocSize(T);
|
2011-07-18 06:54:35 +02:00
|
|
|
if (StructType *ST = dyn_cast<StructType>(T))
|
2015-03-10 03:37:25 +01:00
|
|
|
ElementSize = DL.getStructLayout(ST)->getSizeInBytes();
|
2009-11-07 01:16:28 +01:00
|
|
|
|
2010-06-23 23:41:47 +02:00
|
|
|
// If malloc call's arg can be determined to be a multiple of ElementSize,
|
2009-11-10 09:32:25 +01:00
|
|
|
// return the multiple. Otherwise, return NULL.
|
2010-06-23 23:41:47 +02:00
|
|
|
Value *MallocArg = CI->getArgOperand(0);
|
2014-04-15 06:59:12 +02:00
|
|
|
Value *Multiple = nullptr;
|
2016-07-07 18:19:09 +02:00
|
|
|
if (ComputeMultiple(MallocArg, ElementSize, Multiple, LookThroughSExt))
|
2009-11-10 09:32:25 +01:00
|
|
|
return Multiple;
|
2009-09-18 21:20:02 +02:00
|
|
|
|
2014-04-15 06:59:12 +02:00
|
|
|
return nullptr;
|
2009-09-10 06:36:43 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/// getMallocType - Returns the PointerType resulting from the malloc call.
|
2009-11-07 01:16:28 +01:00
|
|
|
/// The PointerType depends on the number of bitcast uses of the malloc call:
|
|
|
|
/// 0: PointerType is the calls' return type.
|
|
|
|
/// 1: PointerType is the bitcast's result type.
|
|
|
|
/// >1: Unique PointerType cannot be determined, return NULL.
|
2012-08-29 17:32:21 +02:00
|
|
|
PointerType *llvm::getMallocType(const CallInst *CI,
|
|
|
|
const TargetLibraryInfo *TLI) {
|
|
|
|
assert(isMallocLikeFn(CI, TLI) && "getMallocType and not malloc call");
|
2013-03-08 22:03:09 +01:00
|
|
|
|
2014-04-15 06:59:12 +02:00
|
|
|
PointerType *MallocType = nullptr;
|
2009-11-07 01:16:28 +01:00
|
|
|
unsigned NumOfBitCastUses = 0;
|
|
|
|
|
2009-09-18 21:20:02 +02:00
|
|
|
// Determine if CallInst has a bitcast use.
|
2021-01-10 15:41:13 +01:00
|
|
|
for (const User *U : CI->users())
|
|
|
|
if (const BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
|
2009-11-07 01:16:28 +01:00
|
|
|
MallocType = cast<PointerType>(BCI->getDestTy());
|
|
|
|
NumOfBitCastUses++;
|
|
|
|
}
|
2009-09-18 21:20:02 +02:00
|
|
|
|
2009-11-07 01:16:28 +01:00
|
|
|
// Malloc call has 1 bitcast use, so type is the bitcast's destination type.
|
|
|
|
if (NumOfBitCastUses == 1)
|
|
|
|
return MallocType;
|
2009-09-10 06:36:43 +02:00
|
|
|
|
2009-09-22 20:50:03 +02:00
|
|
|
// Malloc call was not bitcast, so type is the malloc function's return type.
|
2009-11-07 01:16:28 +01:00
|
|
|
if (NumOfBitCastUses == 0)
|
2009-09-18 21:20:02 +02:00
|
|
|
return cast<PointerType>(CI->getType());
|
2009-09-10 06:36:43 +02:00
|
|
|
|
2009-09-18 21:20:02 +02:00
|
|
|
// Type could not be determined.
|
2014-04-15 06:59:12 +02:00
|
|
|
return nullptr;
|
2009-09-10 06:36:43 +02:00
|
|
|
}
|
|
|
|
|
2009-11-07 01:16:28 +01:00
|
|
|
/// getMallocAllocatedType - Returns the Type allocated by malloc call.
|
|
|
|
/// The Type depends on the number of bitcast uses of the malloc call:
|
|
|
|
/// 0: PointerType is the malloc calls' return type.
|
|
|
|
/// 1: PointerType is the bitcast's result type.
|
|
|
|
/// >1: Unique PointerType cannot be determined, return NULL.
|
2012-08-29 17:32:21 +02:00
|
|
|
Type *llvm::getMallocAllocatedType(const CallInst *CI,
|
|
|
|
const TargetLibraryInfo *TLI) {
|
|
|
|
PointerType *PT = getMallocType(CI, TLI);
|
2014-04-15 06:59:12 +02:00
|
|
|
return PT ? PT->getElementType() : nullptr;
|
2009-09-10 06:36:43 +02:00
|
|
|
}
|
|
|
|
|
2013-03-08 22:03:09 +01:00
|
|
|
/// getMallocArraySize - Returns the array size of a malloc call. If the
|
2009-10-28 21:18:55 +01:00
|
|
|
/// argument passed to malloc is a multiple of the size of the malloced type,
|
|
|
|
/// then return that multiple. For non-array mallocs, the multiple is
|
|
|
|
/// constant 1. Otherwise, return NULL for mallocs whose array size cannot be
|
2009-10-15 22:14:52 +02:00
|
|
|
/// determined.
|
2015-03-10 03:37:25 +01:00
|
|
|
Value *llvm::getMallocArraySize(CallInst *CI, const DataLayout &DL,
|
2012-08-29 17:32:21 +02:00
|
|
|
const TargetLibraryInfo *TLI,
|
2009-11-10 09:32:25 +01:00
|
|
|
bool LookThroughSExt) {
|
2012-08-29 17:32:21 +02:00
|
|
|
assert(isMallocLikeFn(CI, TLI) && "getMallocArraySize and not malloc call");
|
2013-10-03 21:50:01 +02:00
|
|
|
return computeArraySize(CI, DL, TLI, LookThroughSExt);
|
2009-09-10 06:36:43 +02:00
|
|
|
}
|
2009-10-24 06:23:03 +02:00
|
|
|
|
2012-05-03 23:19:58 +02:00
|
|
|
/// extractCallocCall - Returns the corresponding CallInst if the instruction
|
|
|
|
/// is a calloc call.
|
2012-08-29 17:32:21 +02:00
|
|
|
const CallInst *llvm::extractCallocCall(const Value *I,
|
|
|
|
const TargetLibraryInfo *TLI) {
|
2014-04-15 06:59:12 +02:00
|
|
|
return isCallocLikeFn(I, TLI) ? cast<CallInst>(I) : nullptr;
|
2012-05-03 23:19:58 +02:00
|
|
|
}
|
|
|
|
|
2019-07-08 17:57:56 +02:00
|
|
|
/// isLibFreeFunction - Returns true if the function is a builtin free()
|
|
|
|
bool llvm::isLibFreeFunction(const Function *F, const LibFunc TLIFn) {
|
2013-07-22 01:11:42 +02:00
|
|
|
unsigned ExpectedNumParams;
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 00:16:46 +01:00
|
|
|
if (TLIFn == LibFunc_free ||
|
|
|
|
TLIFn == LibFunc_ZdlPv || // operator delete(void*)
|
|
|
|
TLIFn == LibFunc_ZdaPv || // operator delete[](void*)
|
|
|
|
TLIFn == LibFunc_msvc_delete_ptr32 || // operator delete(void*)
|
|
|
|
TLIFn == LibFunc_msvc_delete_ptr64 || // operator delete(void*)
|
|
|
|
TLIFn == LibFunc_msvc_delete_array_ptr32 || // operator delete[](void*)
|
|
|
|
TLIFn == LibFunc_msvc_delete_array_ptr64) // operator delete[](void*)
|
2013-07-22 01:11:42 +02:00
|
|
|
ExpectedNumParams = 1;
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 00:16:46 +01:00
|
|
|
else if (TLIFn == LibFunc_ZdlPvj || // delete(void*, uint)
|
|
|
|
TLIFn == LibFunc_ZdlPvm || // delete(void*, ulong)
|
|
|
|
TLIFn == LibFunc_ZdlPvRKSt9nothrow_t || // delete(void*, nothrow)
|
2018-04-04 21:01:51 +02:00
|
|
|
TLIFn == LibFunc_ZdlPvSt11align_val_t || // delete(void*, align_val_t)
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 00:16:46 +01:00
|
|
|
TLIFn == LibFunc_ZdaPvj || // delete[](void*, uint)
|
|
|
|
TLIFn == LibFunc_ZdaPvm || // delete[](void*, ulong)
|
|
|
|
TLIFn == LibFunc_ZdaPvRKSt9nothrow_t || // delete[](void*, nothrow)
|
2018-04-04 21:01:51 +02:00
|
|
|
TLIFn == LibFunc_ZdaPvSt11align_val_t || // delete[](void*, align_val_t)
|
[Analysis] Add LibFunc_ prefix to enums in TargetLibraryInfo. (NFC)
Summary:
The LibFunc::Func enum holds enumerators named for libc functions.
Unfortunately, there are real situations, including libc implementations, where
function names are actually macros (musl uses "#define fopen64 fopen", for
example; any other transitively visible macro would have similar effects).
Strictly speaking, a conforming C++ Standard Library should provide any such
macros as functions instead (via <cstdio>). However, there are some "library"
functions which are not part of the standard, and thus not subject to this
rule (fopen64, for example). So, in order to be both portable and consistent,
the enum should not use the bare function names.
The old enum naming used a namespace LibFunc and an enum Func, with bare
enumerators. This patch changes LibFunc to be an enum with enumerators prefixed
with "LibFFunc_". (Unfortunately, a scoped enum is not sufficient to override
macros.)
There are additional changes required in clang.
Reviewers: rsmith
Subscribers: mehdi_amini, mzolotukhin, nemanjai, llvm-commits
Differential Revision: https://reviews.llvm.org/D28476
llvm-svn: 292848
2017-01-24 00:16:46 +01:00
|
|
|
TLIFn == LibFunc_msvc_delete_ptr32_int || // delete(void*, uint)
|
|
|
|
TLIFn == LibFunc_msvc_delete_ptr64_longlong || // delete(void*, ulonglong)
|
|
|
|
TLIFn == LibFunc_msvc_delete_ptr32_nothrow || // delete(void*, nothrow)
|
|
|
|
TLIFn == LibFunc_msvc_delete_ptr64_nothrow || // delete(void*, nothrow)
|
|
|
|
TLIFn == LibFunc_msvc_delete_array_ptr32_int || // delete[](void*, uint)
|
|
|
|
TLIFn == LibFunc_msvc_delete_array_ptr64_longlong || // delete[](void*, ulonglong)
|
|
|
|
TLIFn == LibFunc_msvc_delete_array_ptr32_nothrow || // delete[](void*, nothrow)
|
|
|
|
TLIFn == LibFunc_msvc_delete_array_ptr64_nothrow) // delete[](void*, nothrow)
|
2013-07-22 01:11:42 +02:00
|
|
|
ExpectedNumParams = 2;
|
2018-04-04 21:01:51 +02:00
|
|
|
else if (TLIFn == LibFunc_ZdaPvSt11align_val_tRKSt9nothrow_t || // delete(void*, align_val_t, nothrow)
|
2020-06-10 23:06:25 +02:00
|
|
|
TLIFn == LibFunc_ZdlPvSt11align_val_tRKSt9nothrow_t || // delete[](void*, align_val_t, nothrow)
|
|
|
|
TLIFn == LibFunc_ZdlPvjSt11align_val_t || // delete(void*, unsigned long, align_val_t)
|
|
|
|
TLIFn == LibFunc_ZdlPvmSt11align_val_t || // delete(void*, unsigned long, align_val_t)
|
|
|
|
TLIFn == LibFunc_ZdaPvjSt11align_val_t || // delete[](void*, unsigned int, align_val_t)
|
|
|
|
TLIFn == LibFunc_ZdaPvmSt11align_val_t) // delete[](void*, unsigned long, align_val_t)
|
2018-04-04 21:01:51 +02:00
|
|
|
ExpectedNumParams = 3;
|
2013-07-22 01:11:42 +02:00
|
|
|
else
|
2019-07-08 17:57:56 +02:00
|
|
|
return false;
|
2009-10-24 06:23:03 +02:00
|
|
|
|
|
|
|
// Check free prototype.
|
2013-03-08 22:03:09 +01:00
|
|
|
// FIXME: workaround for PR5130, this will be obsolete when a nobuiltin
|
2009-10-24 06:23:03 +02:00
|
|
|
// attribute will exist.
|
2019-07-08 17:57:56 +02:00
|
|
|
FunctionType *FTy = F->getFunctionType();
|
2009-11-03 21:39:35 +01:00
|
|
|
if (!FTy->getReturnType()->isVoidTy())
|
2019-07-08 17:57:56 +02:00
|
|
|
return false;
|
2013-07-22 01:11:42 +02:00
|
|
|
if (FTy->getNumParams() != ExpectedNumParams)
|
2019-07-08 17:57:56 +02:00
|
|
|
return false;
|
|
|
|
if (FTy->getParamType(0) != Type::getInt8PtrTy(F->getContext()))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// isFreeCall - Returns non-null if the value is a call to the builtin free()
|
|
|
|
const CallInst *llvm::isFreeCall(const Value *I, const TargetLibraryInfo *TLI) {
|
|
|
|
bool IsNoBuiltinCall;
|
|
|
|
const Function *Callee =
|
|
|
|
getCalledFunction(I, /*LookThroughBitCast=*/false, IsNoBuiltinCall);
|
|
|
|
if (Callee == nullptr || IsNoBuiltinCall)
|
2014-04-15 06:59:12 +02:00
|
|
|
return nullptr;
|
2019-07-08 17:57:56 +02:00
|
|
|
|
|
|
|
StringRef FnName = Callee->getName();
|
|
|
|
LibFunc TLIFn;
|
|
|
|
if (!TLI || !TLI->getLibFunc(FnName, TLIFn) || !TLI->has(TLIFn))
|
2014-04-15 06:59:12 +02:00
|
|
|
return nullptr;
|
2009-10-24 06:23:03 +02:00
|
|
|
|
2019-07-08 17:57:56 +02:00
|
|
|
return isLibFreeFunction(Callee, TLIFn) ? dyn_cast<CallInst>(I) : nullptr;
|
2009-10-24 06:23:03 +02:00
|
|
|
}
|
2012-06-21 17:45:28 +02:00
|
|
|
|
2019-07-08 17:57:56 +02:00
|
|
|
|
2012-06-21 17:45:28 +02:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Utility functions to compute size of objects.
|
|
|
|
//
|
2016-04-13 14:25:25 +02:00
|
|
|
static APInt getSizeWithOverflow(const SizeOffsetType &Data) {
|
|
|
|
if (Data.second.isNegative() || Data.first.ult(Data.second))
|
|
|
|
return APInt(Data.first.getBitWidth(), 0);
|
|
|
|
return Data.first - Data.second;
|
|
|
|
}
|
2012-06-21 17:45:28 +02:00
|
|
|
|
2018-05-01 17:54:18 +02:00
|
|
|
/// Compute the size of the object pointed by Ptr. Returns true and the
|
2012-06-21 17:45:28 +02:00
|
|
|
/// object size in Size if successful, and false otherwise.
|
2017-07-01 09:12:15 +02:00
|
|
|
/// If RoundToAlign is true, then Size is rounded up to the alignment of
|
|
|
|
/// allocas, byval arguments, and global variables.
|
2015-03-10 03:37:25 +01:00
|
|
|
bool llvm::getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL,
|
2017-03-21 21:08:59 +01:00
|
|
|
const TargetLibraryInfo *TLI, ObjectSizeOpts Opts) {
|
|
|
|
ObjectSizeOffsetVisitor Visitor(DL, TLI, Ptr->getContext(), Opts);
|
2012-06-21 17:45:28 +02:00
|
|
|
SizeOffsetType Data = Visitor.compute(const_cast<Value*>(Ptr));
|
|
|
|
if (!Visitor.bothKnown(Data))
|
|
|
|
return false;
|
|
|
|
|
2016-04-13 14:25:25 +02:00
|
|
|
Size = getSizeWithOverflow(Data).getZExtValue();
|
2012-06-21 17:45:28 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-01-30 21:34:35 +01:00
|
|
|
Value *llvm::lowerObjectSizeCall(IntrinsicInst *ObjectSize,
|
|
|
|
const DataLayout &DL,
|
|
|
|
const TargetLibraryInfo *TLI,
|
|
|
|
bool MustSucceed) {
|
2016-12-21 00:46:36 +01:00
|
|
|
assert(ObjectSize->getIntrinsicID() == Intrinsic::objectsize &&
|
|
|
|
"ObjectSize must be a call to llvm.objectsize!");
|
|
|
|
|
|
|
|
bool MaxVal = cast<ConstantInt>(ObjectSize->getArgOperand(1))->isZero();
|
2017-03-21 21:08:59 +01:00
|
|
|
ObjectSizeOpts EvalOptions;
|
2016-12-21 00:46:36 +01:00
|
|
|
// Unless we have to fold this to something, try to be as accurate as
|
|
|
|
// possible.
|
|
|
|
if (MustSucceed)
|
2017-03-21 21:08:59 +01:00
|
|
|
EvalOptions.EvalMode =
|
|
|
|
MaxVal ? ObjectSizeOpts::Mode::Max : ObjectSizeOpts::Mode::Min;
|
2016-12-21 00:46:36 +01:00
|
|
|
else
|
2017-03-21 21:08:59 +01:00
|
|
|
EvalOptions.EvalMode = ObjectSizeOpts::Mode::Exact;
|
|
|
|
|
|
|
|
EvalOptions.NullIsUnknownSize =
|
|
|
|
cast<ConstantInt>(ObjectSize->getArgOperand(2))->isOne();
|
2016-12-21 00:46:36 +01:00
|
|
|
|
|
|
|
auto *ResultType = cast<IntegerType>(ObjectSize->getType());
|
2019-01-30 21:34:35 +01:00
|
|
|
bool StaticOnly = cast<ConstantInt>(ObjectSize->getArgOperand(3))->isZero();
|
|
|
|
if (StaticOnly) {
|
|
|
|
// FIXME: Does it make sense to just return a failure value if the size won't
|
|
|
|
// fit in the output and `!MustSucceed`?
|
|
|
|
uint64_t Size;
|
|
|
|
if (getObjectSize(ObjectSize->getArgOperand(0), Size, DL, TLI, EvalOptions) &&
|
|
|
|
isUIntN(ResultType->getBitWidth(), Size))
|
|
|
|
return ConstantInt::get(ResultType, Size);
|
|
|
|
} else {
|
|
|
|
LLVMContext &Ctx = ObjectSize->getFunction()->getContext();
|
|
|
|
ObjectSizeOffsetEvaluator Eval(DL, TLI, Ctx, EvalOptions);
|
|
|
|
SizeOffsetEvalType SizeOffsetPair =
|
|
|
|
Eval.compute(ObjectSize->getArgOperand(0));
|
|
|
|
|
|
|
|
if (SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown()) {
|
|
|
|
IRBuilder<TargetFolder> Builder(Ctx, TargetFolder(DL));
|
|
|
|
Builder.SetInsertPoint(ObjectSize);
|
|
|
|
|
|
|
|
// If we've outside the end of the object, then we can always access
|
|
|
|
// exactly 0 bytes.
|
|
|
|
Value *ResultSize =
|
|
|
|
Builder.CreateSub(SizeOffsetPair.first, SizeOffsetPair.second);
|
|
|
|
Value *UseZero =
|
|
|
|
Builder.CreateICmpULT(SizeOffsetPair.first, SizeOffsetPair.second);
|
2019-12-13 10:55:45 +01:00
|
|
|
ResultSize = Builder.CreateZExtOrTrunc(ResultSize, ResultType);
|
2020-12-22 10:51:41 +01:00
|
|
|
Value *Ret = Builder.CreateSelect(
|
|
|
|
UseZero, ConstantInt::get(ResultType, 0), ResultSize);
|
|
|
|
|
|
|
|
// The non-constant size expression cannot evaluate to -1.
|
|
|
|
if (!isa<Constant>(SizeOffsetPair.first) ||
|
|
|
|
!isa<Constant>(SizeOffsetPair.second))
|
|
|
|
Builder.CreateAssumption(
|
|
|
|
Builder.CreateICmpNE(Ret, ConstantInt::get(ResultType, -1)));
|
|
|
|
|
|
|
|
return Ret;
|
2019-01-30 21:34:35 +01:00
|
|
|
}
|
|
|
|
}
|
2016-12-21 00:46:36 +01:00
|
|
|
|
|
|
|
if (!MustSucceed)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
return ConstantInt::get(ResultType, MaxVal ? -1ULL : 0);
|
|
|
|
}
|
|
|
|
|
2012-06-21 17:45:28 +02:00
|
|
|
STATISTIC(ObjectVisitorArgument,
|
|
|
|
"Number of arguments with unsolved size and offset");
|
|
|
|
STATISTIC(ObjectVisitorLoad,
|
|
|
|
"Number of load instructions with unsolved size and offset");
|
|
|
|
|
2019-09-27 14:54:21 +02:00
|
|
|
APInt ObjectSizeOffsetVisitor::align(APInt Size, uint64_t Alignment) {
|
|
|
|
if (Options.RoundToAlign && Alignment)
|
|
|
|
return APInt(IntTyBits, alignTo(Size.getZExtValue(), Align(Alignment)));
|
2012-06-21 17:45:28 +02:00
|
|
|
return Size;
|
|
|
|
}
|
|
|
|
|
2015-03-10 03:37:25 +01:00
|
|
|
ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const DataLayout &DL,
|
2012-08-29 17:32:21 +02:00
|
|
|
const TargetLibraryInfo *TLI,
|
2012-06-21 17:45:28 +02:00
|
|
|
LLVMContext &Context,
|
2017-03-21 21:08:59 +01:00
|
|
|
ObjectSizeOpts Options)
|
|
|
|
: DL(DL), TLI(TLI), Options(Options) {
|
2013-12-14 01:27:48 +01:00
|
|
|
// Pointer size must be rechecked for each object visited since it could have
|
|
|
|
// a different address space.
|
2012-06-21 17:45:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) {
|
2019-12-13 10:55:45 +01:00
|
|
|
IntTyBits = DL.getIndexTypeSizeInBits(V->getType());
|
2013-12-14 01:27:48 +01:00
|
|
|
Zero = APInt::getNullValue(IntTyBits);
|
|
|
|
|
2012-06-21 17:45:28 +02:00
|
|
|
V = V->stripPointerCasts();
|
2013-04-09 20:16:05 +02:00
|
|
|
if (Instruction *I = dyn_cast<Instruction>(V)) {
|
|
|
|
// If we have already seen this instruction, bail out. Cycles can happen in
|
|
|
|
// unreachable code after constant propagation.
|
2014-11-19 08:49:26 +01:00
|
|
|
if (!SeenInsts.insert(I).second)
|
2013-04-09 20:16:05 +02:00
|
|
|
return unknown();
|
2012-12-31 21:45:10 +01:00
|
|
|
|
2012-08-17 21:26:41 +02:00
|
|
|
if (GEPOperator *GEP = dyn_cast<GEPOperator>(V))
|
2013-04-09 20:16:05 +02:00
|
|
|
return visitGEPOperator(*GEP);
|
|
|
|
return visit(*I);
|
2012-08-17 21:26:41 +02:00
|
|
|
}
|
2012-06-21 17:45:28 +02:00
|
|
|
if (Argument *A = dyn_cast<Argument>(V))
|
|
|
|
return visitArgument(*A);
|
|
|
|
if (ConstantPointerNull *P = dyn_cast<ConstantPointerNull>(V))
|
|
|
|
return visitConstantPointerNull(*P);
|
2012-12-31 17:23:48 +01:00
|
|
|
if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
|
|
|
|
return visitGlobalAlias(*GA);
|
2012-06-21 17:45:28 +02:00
|
|
|
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
|
|
|
|
return visitGlobalVariable(*GV);
|
|
|
|
if (UndefValue *UV = dyn_cast<UndefValue>(V))
|
|
|
|
return visitUndefValue(*UV);
|
2012-08-17 21:26:41 +02:00
|
|
|
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
|
2012-06-21 17:45:28 +02:00
|
|
|
if (CE->getOpcode() == Instruction::IntToPtr)
|
|
|
|
return unknown(); // clueless
|
2013-04-09 20:16:05 +02:00
|
|
|
if (CE->getOpcode() == Instruction::GetElementPtr)
|
|
|
|
return visitGEPOperator(cast<GEPOperator>(*CE));
|
2012-08-17 21:26:41 +02:00
|
|
|
}
|
2012-06-21 17:45:28 +02:00
|
|
|
|
2018-05-14 14:53:11 +02:00
|
|
|
LLVM_DEBUG(dbgs() << "ObjectSizeOffsetVisitor::compute() unhandled value: "
|
|
|
|
<< *V << '\n');
|
2012-06-21 17:45:28 +02:00
|
|
|
return unknown();
|
|
|
|
}
|
|
|
|
|
2017-07-12 08:19:10 +02:00
|
|
|
/// When we're compiling N-bit code, and the user uses parameters that are
|
|
|
|
/// greater than N bits (e.g. uint64_t on a 32-bit build), we can run into
|
|
|
|
/// trouble with APInt size issues. This function handles resizing + overflow
|
|
|
|
/// checks for us. Check and zext or trunc \p I depending on IntTyBits and
|
|
|
|
/// I's value.
|
|
|
|
bool ObjectSizeOffsetVisitor::CheckedZextOrTrunc(APInt &I) {
|
|
|
|
// More bits than we can handle. Checking the bit width isn't necessary, but
|
|
|
|
// it's faster than checking active bits, and should give `false` in the
|
|
|
|
// vast majority of cases.
|
|
|
|
if (I.getBitWidth() > IntTyBits && I.getActiveBits() > IntTyBits)
|
|
|
|
return false;
|
|
|
|
if (I.getBitWidth() != IntTyBits)
|
|
|
|
I = I.zextOrTrunc(IntTyBits);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-06-21 17:45:28 +02:00
|
|
|
SizeOffsetType ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) {
|
|
|
|
if (!I.getAllocatedType()->isSized())
|
|
|
|
return unknown();
|
|
|
|
|
2020-04-23 21:19:54 +02:00
|
|
|
if (isa<ScalableVectorType>(I.getAllocatedType()))
|
2020-03-16 19:13:20 +01:00
|
|
|
return unknown();
|
|
|
|
|
2015-03-10 03:37:25 +01:00
|
|
|
APInt Size(IntTyBits, DL.getTypeAllocSize(I.getAllocatedType()));
|
2012-06-21 17:45:28 +02:00
|
|
|
if (!I.isArrayAllocation())
|
|
|
|
return std::make_pair(align(Size, I.getAlignment()), Zero);
|
|
|
|
|
|
|
|
Value *ArraySize = I.getArraySize();
|
|
|
|
if (const ConstantInt *C = dyn_cast<ConstantInt>(ArraySize)) {
|
2017-07-12 08:19:10 +02:00
|
|
|
APInt NumElems = C->getValue();
|
|
|
|
if (!CheckedZextOrTrunc(NumElems))
|
|
|
|
return unknown();
|
|
|
|
|
|
|
|
bool Overflow;
|
|
|
|
Size = Size.umul_ov(NumElems, Overflow);
|
|
|
|
return Overflow ? unknown() : std::make_pair(align(Size, I.getAlignment()),
|
|
|
|
Zero);
|
2012-06-21 17:45:28 +02:00
|
|
|
}
|
|
|
|
return unknown();
|
|
|
|
}
|
|
|
|
|
|
|
|
SizeOffsetType ObjectSizeOffsetVisitor::visitArgument(Argument &A) {
|
IR: Define byref parameter attribute
This allows tracking the in-memory type of a pointer argument to a
function for ABI purposes. This is essentially a stripped down version
of byval to remove some of the stack-copy implications in its
definition.
This includes the base IR changes, and some tests for places where it
should be treated similarly to byval. Codegen support will be in a
future patch.
My original attempt at solving some of these problems was to repurpose
byval with a different address space from the stack. However, it is
technically permitted for the callee to introduce a write to the
argument, although nothing does this in reality. There is also talk of
removing and replacing the byval attribute, so a new attribute would
need to take its place anyway.
This is intended avoid some optimization issues with the current
handling of aggregate arguments, as well as fixes inflexibilty in how
frontends can specify the kernel ABI. The most honest representation
of the amdgpu_kernel convention is to expose all kernel arguments as
loads from constant memory. Today, these are raw, SSA Argument values
and codegen is responsible for turning these into loads.
Background:
There currently isn't a satisfactory way to represent how arguments
for the amdgpu_kernel calling convention are passed. In reality,
arguments are passed in a single, flat, constant memory buffer
implicitly passed to the function. It is also illegal to call this
function in the IR, and this is only ever invoked by a driver of some
kind.
It does not make sense to have a stack passed parameter in this
context as is implied by byval. It is never valid to write to the
kernel arguments, as this would corrupt the inputs seen by other
dispatches of the kernel. These argumets are also not in the same
address space as the stack, so a copy is needed to an alloca. From a
source C-like language, the kernel parameters are invisible.
Semantically, a copy is always required from the constant argument
memory to a mutable variable.
The current clang calling convention lowering emits raw values,
including aggregates into the function argument list, since using
byval would not make sense. This has some unfortunate consequences for
the optimizer. In the aggregate case, we end up with an aggregate
store to alloca, which both SROA and instcombine turn into a store of
each aggregate field. The optimizer never pieces this back together to
see that this is really just a copy from constant memory, so we end up
stuck with expensive stack usage.
This also means the backend dictates the alignment of arguments, and
arbitrarily picks the LLVM IR ABI type alignment. By allowing an
explicit alignment, frontends can make better decisions. For example,
there's real no advantage to an aligment higher than 4, so a frontend
could choose to compact the argument layout. Similarly, there is a
high penalty to using an alignment lower than 4, so a frontend could
opt into more padding for small arguments.
Another design consideration is when it is appropriate to expose the
fact that these arguments are all really passed in adjacent
memory. Currently we have a late IR optimization pass in codegen to
rewrite the kernel argument values into explicit loads to enable
vectorization. In most programs, unrelated argument loads can be
merged together. However, exposing this property directly from the
frontend has some disadvantages. We still need a way to track the
original argument sizes and alignments to report to the driver. I find
using some side-channel, metadata mechanism to track this
unappealing. If the kernel arguments were exposed as a single buffer
to begin with, alias analysis would be unaware that the padding bits
betewen arguments are meaningless. Another family of problems is there
are still some gaps in replacing all of the available parameter
attributes with metadata equivalents once lowered to loads.
The immediate plan is to start using this new attribute to handle all
aggregate argumets for kernels. Long term, it makes sense to migrate
all kernel arguments, including scalars, to be passed indirectly in
the same manner.
Additional context is in D79744.
2020-06-05 22:58:47 +02:00
|
|
|
Type *MemoryTy = A.getPointeeInMemoryValueType();
|
2016-07-07 18:19:09 +02:00
|
|
|
// No interprocedural analysis is done at the moment.
|
2020-10-14 12:38:01 +02:00
|
|
|
if (!MemoryTy|| !MemoryTy->isSized()) {
|
2012-06-21 17:45:28 +02:00
|
|
|
++ObjectVisitorArgument;
|
|
|
|
return unknown();
|
|
|
|
}
|
IR: Define byref parameter attribute
This allows tracking the in-memory type of a pointer argument to a
function for ABI purposes. This is essentially a stripped down version
of byval to remove some of the stack-copy implications in its
definition.
This includes the base IR changes, and some tests for places where it
should be treated similarly to byval. Codegen support will be in a
future patch.
My original attempt at solving some of these problems was to repurpose
byval with a different address space from the stack. However, it is
technically permitted for the callee to introduce a write to the
argument, although nothing does this in reality. There is also talk of
removing and replacing the byval attribute, so a new attribute would
need to take its place anyway.
This is intended avoid some optimization issues with the current
handling of aggregate arguments, as well as fixes inflexibilty in how
frontends can specify the kernel ABI. The most honest representation
of the amdgpu_kernel convention is to expose all kernel arguments as
loads from constant memory. Today, these are raw, SSA Argument values
and codegen is responsible for turning these into loads.
Background:
There currently isn't a satisfactory way to represent how arguments
for the amdgpu_kernel calling convention are passed. In reality,
arguments are passed in a single, flat, constant memory buffer
implicitly passed to the function. It is also illegal to call this
function in the IR, and this is only ever invoked by a driver of some
kind.
It does not make sense to have a stack passed parameter in this
context as is implied by byval. It is never valid to write to the
kernel arguments, as this would corrupt the inputs seen by other
dispatches of the kernel. These argumets are also not in the same
address space as the stack, so a copy is needed to an alloca. From a
source C-like language, the kernel parameters are invisible.
Semantically, a copy is always required from the constant argument
memory to a mutable variable.
The current clang calling convention lowering emits raw values,
including aggregates into the function argument list, since using
byval would not make sense. This has some unfortunate consequences for
the optimizer. In the aggregate case, we end up with an aggregate
store to alloca, which both SROA and instcombine turn into a store of
each aggregate field. The optimizer never pieces this back together to
see that this is really just a copy from constant memory, so we end up
stuck with expensive stack usage.
This also means the backend dictates the alignment of arguments, and
arbitrarily picks the LLVM IR ABI type alignment. By allowing an
explicit alignment, frontends can make better decisions. For example,
there's real no advantage to an aligment higher than 4, so a frontend
could choose to compact the argument layout. Similarly, there is a
high penalty to using an alignment lower than 4, so a frontend could
opt into more padding for small arguments.
Another design consideration is when it is appropriate to expose the
fact that these arguments are all really passed in adjacent
memory. Currently we have a late IR optimization pass in codegen to
rewrite the kernel argument values into explicit loads to enable
vectorization. In most programs, unrelated argument loads can be
merged together. However, exposing this property directly from the
frontend has some disadvantages. We still need a way to track the
original argument sizes and alignments to report to the driver. I find
using some side-channel, metadata mechanism to track this
unappealing. If the kernel arguments were exposed as a single buffer
to begin with, alias analysis would be unaware that the padding bits
betewen arguments are meaningless. Another family of problems is there
are still some gaps in replacing all of the available parameter
attributes with metadata equivalents once lowered to loads.
The immediate plan is to start using this new attribute to handle all
aggregate argumets for kernels. Long term, it makes sense to migrate
all kernel arguments, including scalars, to be passed indirectly in
the same manner.
Additional context is in D79744.
2020-06-05 22:58:47 +02:00
|
|
|
|
|
|
|
APInt Size(IntTyBits, DL.getTypeAllocSize(MemoryTy));
|
2012-06-21 17:45:28 +02:00
|
|
|
return std::make_pair(align(Size, A.getParamAlignment()), Zero);
|
|
|
|
}
|
|
|
|
|
2020-04-20 03:32:03 +02:00
|
|
|
SizeOffsetType ObjectSizeOffsetVisitor::visitCallBase(CallBase &CB) {
|
|
|
|
Optional<AllocFnsTy> FnData = getAllocationSize(&CB, TLI);
|
2012-06-21 17:45:28 +02:00
|
|
|
if (!FnData)
|
|
|
|
return unknown();
|
|
|
|
|
2016-07-07 18:19:09 +02:00
|
|
|
// Handle strdup-like functions separately.
|
2012-06-21 17:45:28 +02:00
|
|
|
if (FnData->AllocTy == StrDupLike) {
|
2020-04-20 03:32:03 +02:00
|
|
|
APInt Size(IntTyBits, GetStringLength(CB.getArgOperand(0)));
|
2012-07-24 18:28:13 +02:00
|
|
|
if (!Size)
|
|
|
|
return unknown();
|
|
|
|
|
2016-07-07 18:19:09 +02:00
|
|
|
// Strndup limits strlen.
|
2012-07-24 18:28:13 +02:00
|
|
|
if (FnData->FstParam > 0) {
|
2016-04-12 03:05:35 +02:00
|
|
|
ConstantInt *Arg =
|
2020-04-20 03:32:03 +02:00
|
|
|
dyn_cast<ConstantInt>(CB.getArgOperand(FnData->FstParam));
|
2012-07-24 18:28:13 +02:00
|
|
|
if (!Arg)
|
|
|
|
return unknown();
|
|
|
|
|
|
|
|
APInt MaxSize = Arg->getValue().zextOrSelf(IntTyBits);
|
|
|
|
if (Size.ugt(MaxSize))
|
|
|
|
Size = MaxSize + 1;
|
|
|
|
}
|
|
|
|
return std::make_pair(Size, Zero);
|
2012-06-21 17:45:28 +02:00
|
|
|
}
|
|
|
|
|
2020-04-20 03:32:03 +02:00
|
|
|
ConstantInt *Arg = dyn_cast<ConstantInt>(CB.getArgOperand(FnData->FstParam));
|
2012-06-21 17:45:28 +02:00
|
|
|
if (!Arg)
|
|
|
|
return unknown();
|
|
|
|
|
2016-04-12 03:05:35 +02:00
|
|
|
APInt Size = Arg->getValue();
|
|
|
|
if (!CheckedZextOrTrunc(Size))
|
|
|
|
return unknown();
|
|
|
|
|
2016-07-07 18:19:09 +02:00
|
|
|
// Size is determined by just 1 parameter.
|
2012-06-21 20:38:26 +02:00
|
|
|
if (FnData->SndParam < 0)
|
2012-06-21 17:45:28 +02:00
|
|
|
return std::make_pair(Size, Zero);
|
|
|
|
|
2020-04-20 03:32:03 +02:00
|
|
|
Arg = dyn_cast<ConstantInt>(CB.getArgOperand(FnData->SndParam));
|
2012-06-21 17:45:28 +02:00
|
|
|
if (!Arg)
|
|
|
|
return unknown();
|
|
|
|
|
2016-04-12 03:05:35 +02:00
|
|
|
APInt NumElems = Arg->getValue();
|
|
|
|
if (!CheckedZextOrTrunc(NumElems))
|
|
|
|
return unknown();
|
|
|
|
|
|
|
|
bool Overflow;
|
|
|
|
Size = Size.umul_ov(NumElems, Overflow);
|
|
|
|
return Overflow ? unknown() : std::make_pair(Size, Zero);
|
2012-06-21 17:45:28 +02:00
|
|
|
|
|
|
|
// TODO: handle more standard functions (+ wchar cousins):
|
2012-07-25 20:49:28 +02:00
|
|
|
// - strdup / strndup
|
2012-06-21 17:45:28 +02:00
|
|
|
// - strcpy / strncpy
|
|
|
|
// - strcat / strncat
|
|
|
|
// - memcpy / memmove
|
2012-07-25 20:49:28 +02:00
|
|
|
// - strcat / strncat
|
2012-06-21 17:45:28 +02:00
|
|
|
// - memset
|
|
|
|
}
|
|
|
|
|
|
|
|
SizeOffsetType
|
2017-03-21 21:08:59 +01:00
|
|
|
ObjectSizeOffsetVisitor::visitConstantPointerNull(ConstantPointerNull& CPN) {
|
2018-07-10 00:21:16 +02:00
|
|
|
// If null is unknown, there's nothing we can do. Additionally, non-zero
|
|
|
|
// address spaces can make use of null, so we don't presume to know anything
|
|
|
|
// about that.
|
|
|
|
//
|
|
|
|
// TODO: How should this work with address space casts? We currently just drop
|
|
|
|
// them on the floor, but it's unclear what we should do when a NULL from
|
|
|
|
// addrspace(1) gets casted to addrspace(0) (or vice-versa).
|
|
|
|
if (Options.NullIsUnknownSize || CPN.getType()->getAddressSpace())
|
2017-03-21 21:08:59 +01:00
|
|
|
return unknown();
|
2012-06-21 17:45:28 +02:00
|
|
|
return std::make_pair(Zero, Zero);
|
|
|
|
}
|
|
|
|
|
2012-06-28 18:34:03 +02:00
|
|
|
SizeOffsetType
|
|
|
|
ObjectSizeOffsetVisitor::visitExtractElementInst(ExtractElementInst&) {
|
|
|
|
return unknown();
|
|
|
|
}
|
|
|
|
|
2012-06-21 17:45:28 +02:00
|
|
|
SizeOffsetType
|
|
|
|
ObjectSizeOffsetVisitor::visitExtractValueInst(ExtractValueInst&) {
|
|
|
|
// Easy cases were already folded by previous passes.
|
|
|
|
return unknown();
|
|
|
|
}
|
|
|
|
|
|
|
|
SizeOffsetType ObjectSizeOffsetVisitor::visitGEPOperator(GEPOperator &GEP) {
|
|
|
|
SizeOffsetType PtrData = compute(GEP.getPointerOperand());
|
2019-12-13 10:55:45 +01:00
|
|
|
APInt Offset(DL.getIndexTypeSizeInBits(GEP.getPointerOperand()->getType()), 0);
|
2015-03-10 03:37:25 +01:00
|
|
|
if (!bothKnown(PtrData) || !GEP.accumulateConstantOffset(DL, Offset))
|
2012-06-21 17:45:28 +02:00
|
|
|
return unknown();
|
|
|
|
|
|
|
|
return std::make_pair(PtrData.first, PtrData.second + Offset);
|
|
|
|
}
|
|
|
|
|
2012-12-31 17:23:48 +01:00
|
|
|
SizeOffsetType ObjectSizeOffsetVisitor::visitGlobalAlias(GlobalAlias &GA) {
|
Don't IPO over functions that can be de-refined
Summary:
Fixes PR26774.
If you're aware of the issue, feel free to skip the "Motivation"
section and jump directly to "This patch".
Motivation:
I define "refinement" as discarding behaviors from a program that the
optimizer has license to discard. So transforming:
```
void f(unsigned x) {
unsigned t = 5 / x;
(void)t;
}
```
to
```
void f(unsigned x) { }
```
is refinement, since the behavior went from "if x == 0 then undefined
else nothing" to "nothing" (the optimizer has license to discard
undefined behavior).
Refinement is a fundamental aspect of many mid-level optimizations done
by LLVM. For instance, transforming `x == (x + 1)` to `false` also
involves refinement since the expression's value went from "if x is
`undef` then { `true` or `false` } else { `false` }" to "`false`" (by
definition, the optimizer has license to fold `undef` to any non-`undef`
value).
Unfortunately, refinement implies that the optimizer cannot assume
that the implementation of a function it can see has all of the
behavior an unoptimized or a differently optimized version of the same
function can have. This is a problem for functions with comdat
linkage, where a function can be replaced by an unoptimized or a
differently optimized version of the same source level function.
For instance, FunctionAttrs cannot assume a comdat function is
actually `readnone` even if it does not have any loads or stores in
it; since there may have been loads and stores in the "original
function" that were refined out in the currently visible variant, and
at the link step the linker may in fact choose an implementation with
a load or a store. As an example, consider a function that does two
atomic loads from the same memory location, and writes to memory only
if the two values are not equal. The optimizer is allowed to refine
this function by first CSE'ing the two loads, and the folding the
comparision to always report that the two values are equal. Such a
refined variant will look like it is `readonly`. However, the
unoptimized version of the function can still write to memory (since
the two loads //can// result in different values), and selecting the
unoptimized version at link time will retroactively invalidate
transforms we may have done under the assumption that the function
does not write to memory.
Note: this is not just a problem with atomics or with linking
differently optimized object files. See PR26774 for more realistic
examples that involved neither.
This patch:
This change introduces a new set of linkage types, predicated as
`GlobalValue::mayBeDerefined` that returns true if the linkage type
allows a function to be replaced by a differently optimized variant at
link time. It then changes a set of IPO passes to bail out if they see
such a function.
Reviewers: chandlerc, hfinkel, dexonsmith, joker.eph, rnk
Subscribers: mcrosier, llvm-commits
Differential Revision: http://reviews.llvm.org/D18634
llvm-svn: 265762
2016-04-08 02:48:30 +02:00
|
|
|
if (GA.isInterposable())
|
2012-12-31 17:23:48 +01:00
|
|
|
return unknown();
|
|
|
|
return compute(GA.getAliasee());
|
|
|
|
}
|
|
|
|
|
2012-06-21 17:45:28 +02:00
|
|
|
SizeOffsetType ObjectSizeOffsetVisitor::visitGlobalVariable(GlobalVariable &GV){
|
|
|
|
if (!GV.hasDefinitiveInitializer())
|
|
|
|
return unknown();
|
|
|
|
|
2019-07-11 15:13:02 +02:00
|
|
|
APInt Size(IntTyBits, DL.getTypeAllocSize(GV.getValueType()));
|
2012-06-21 17:45:28 +02:00
|
|
|
return std::make_pair(align(Size, GV.getAlignment()), Zero);
|
|
|
|
}
|
|
|
|
|
|
|
|
SizeOffsetType ObjectSizeOffsetVisitor::visitIntToPtrInst(IntToPtrInst&) {
|
|
|
|
// clueless
|
|
|
|
return unknown();
|
|
|
|
}
|
|
|
|
|
|
|
|
SizeOffsetType ObjectSizeOffsetVisitor::visitLoadInst(LoadInst&) {
|
|
|
|
++ObjectVisitorLoad;
|
|
|
|
return unknown();
|
|
|
|
}
|
|
|
|
|
2013-04-09 20:16:05 +02:00
|
|
|
SizeOffsetType ObjectSizeOffsetVisitor::visitPHINode(PHINode&) {
|
|
|
|
// too complex to analyze statically.
|
|
|
|
return unknown();
|
2012-06-21 17:45:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
SizeOffsetType ObjectSizeOffsetVisitor::visitSelectInst(SelectInst &I) {
|
|
|
|
SizeOffsetType TrueSide = compute(I.getTrueValue());
|
|
|
|
SizeOffsetType FalseSide = compute(I.getFalseValue());
|
2016-04-13 14:25:25 +02:00
|
|
|
if (bothKnown(TrueSide) && bothKnown(FalseSide)) {
|
|
|
|
if (TrueSide == FalseSide) {
|
|
|
|
return TrueSide;
|
|
|
|
}
|
|
|
|
|
|
|
|
APInt TrueResult = getSizeWithOverflow(TrueSide);
|
|
|
|
APInt FalseResult = getSizeWithOverflow(FalseSide);
|
|
|
|
|
|
|
|
if (TrueResult == FalseResult) {
|
|
|
|
return TrueSide;
|
|
|
|
}
|
2017-03-21 21:08:59 +01:00
|
|
|
if (Options.EvalMode == ObjectSizeOpts::Mode::Min) {
|
2016-04-13 14:25:25 +02:00
|
|
|
if (TrueResult.slt(FalseResult))
|
|
|
|
return TrueSide;
|
|
|
|
return FalseSide;
|
|
|
|
}
|
2017-03-21 21:08:59 +01:00
|
|
|
if (Options.EvalMode == ObjectSizeOpts::Mode::Max) {
|
2016-04-13 14:25:25 +02:00
|
|
|
if (TrueResult.sgt(FalseResult))
|
|
|
|
return TrueSide;
|
|
|
|
return FalseSide;
|
|
|
|
}
|
|
|
|
}
|
2012-06-21 17:45:28 +02:00
|
|
|
return unknown();
|
|
|
|
}
|
|
|
|
|
|
|
|
SizeOffsetType ObjectSizeOffsetVisitor::visitUndefValue(UndefValue&) {
|
|
|
|
return std::make_pair(Zero, Zero);
|
|
|
|
}
|
|
|
|
|
|
|
|
SizeOffsetType ObjectSizeOffsetVisitor::visitInstruction(Instruction &I) {
|
2018-05-14 14:53:11 +02:00
|
|
|
LLVM_DEBUG(dbgs() << "ObjectSizeOffsetVisitor unknown instruction:" << I
|
|
|
|
<< '\n');
|
2012-06-21 17:45:28 +02:00
|
|
|
return unknown();
|
|
|
|
}
|
|
|
|
|
2015-03-10 03:37:25 +01:00
|
|
|
ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator(
|
|
|
|
const DataLayout &DL, const TargetLibraryInfo *TLI, LLVMContext &Context,
|
2019-01-30 21:34:35 +01:00
|
|
|
ObjectSizeOpts EvalOpts)
|
2019-04-11 01:42:11 +02:00
|
|
|
: DL(DL), TLI(TLI), Context(Context),
|
|
|
|
Builder(Context, TargetFolder(DL),
|
|
|
|
IRBuilderCallbackInserter(
|
|
|
|
[&](Instruction *I) { InsertedInstructions.insert(I); })),
|
2019-01-30 21:34:35 +01:00
|
|
|
EvalOpts(EvalOpts) {
|
2013-12-14 01:27:48 +01:00
|
|
|
// IntTy and Zero must be set for each compute() since the address space may
|
|
|
|
// be different for later objects.
|
2012-06-21 17:45:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute(Value *V) {
|
2013-12-14 01:27:48 +01:00
|
|
|
// XXX - Are vectors of pointers possible here?
|
2019-12-13 10:55:45 +01:00
|
|
|
IntTy = cast<IntegerType>(DL.getIndexType(V->getType()));
|
2013-12-14 01:27:48 +01:00
|
|
|
Zero = ConstantInt::get(IntTy, 0);
|
|
|
|
|
2012-06-21 17:45:28 +02:00
|
|
|
SizeOffsetEvalType Result = compute_(V);
|
|
|
|
|
|
|
|
if (!bothKnown(Result)) {
|
2016-07-07 18:19:09 +02:00
|
|
|
// Erase everything that was computed in this iteration from the cache, so
|
2012-06-21 17:45:28 +02:00
|
|
|
// that no dangling references are left behind. We could be a bit smarter if
|
|
|
|
// we kept a dependency graph. It's probably not worth the complexity.
|
2016-06-26 19:27:42 +02:00
|
|
|
for (const Value *SeenVal : SeenVals) {
|
|
|
|
CacheMapTy::iterator CacheIt = CacheMap.find(SeenVal);
|
2012-06-21 17:45:28 +02:00
|
|
|
// non-computable results can be safely cached
|
|
|
|
if (CacheIt != CacheMap.end() && anyKnown(CacheIt->second))
|
|
|
|
CacheMap.erase(CacheIt);
|
|
|
|
}
|
2019-04-11 01:42:11 +02:00
|
|
|
|
|
|
|
// Erase any instructions we inserted as part of the traversal.
|
|
|
|
for (Instruction *I : InsertedInstructions) {
|
|
|
|
I->replaceAllUsesWith(UndefValue::get(I->getType()));
|
|
|
|
I->eraseFromParent();
|
|
|
|
}
|
2012-06-21 17:45:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
SeenVals.clear();
|
2019-04-11 01:42:11 +02:00
|
|
|
InsertedInstructions.clear();
|
2012-06-21 17:45:28 +02:00
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
|
|
|
SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute_(Value *V) {
|
2019-01-30 21:34:35 +01:00
|
|
|
ObjectSizeOffsetVisitor Visitor(DL, TLI, Context, EvalOpts);
|
2012-06-21 17:45:28 +02:00
|
|
|
SizeOffsetType Const = Visitor.compute(V);
|
|
|
|
if (Visitor.bothKnown(Const))
|
|
|
|
return std::make_pair(ConstantInt::get(Context, Const.first),
|
|
|
|
ConstantInt::get(Context, Const.second));
|
|
|
|
|
|
|
|
V = V->stripPointerCasts();
|
|
|
|
|
2016-07-07 18:19:09 +02:00
|
|
|
// Check cache.
|
2012-06-21 17:45:28 +02:00
|
|
|
CacheMapTy::iterator CacheIt = CacheMap.find(V);
|
|
|
|
if (CacheIt != CacheMap.end())
|
|
|
|
return CacheIt->second;
|
|
|
|
|
2016-07-07 18:19:09 +02:00
|
|
|
// Always generate code immediately before the instruction being
|
|
|
|
// processed, so that the generated code dominates the same BBs.
|
Analysis: Remove implicit ilist iterator conversions
Remove implicit ilist iterator conversions from LLVMAnalysis.
I came across something really scary in `llvm::isKnownNotFullPoison()`
which relied on `Instruction::getNextNode()` being completely broken
(not surprising, but scary nevertheless). This function is documented
(and coded to) return `nullptr` when it gets to the sentinel, but with
an `ilist_half_node` as a sentinel, the sentinel check looks into some
other memory and we don't recognize we've hit the end.
Rooting out these scary cases is the reason I'm removing the implicit
conversions before doing anything else with `ilist`; I'm not at all
surprised that clients rely on badness.
I found another scary case -- this time, not relying on badness, just
bad (but I guess getting lucky so far) -- in
`ObjectSizeOffsetEvaluator::compute_()`. Here, we save out the
insertion point, do some things, and then restore it. Previously, we
let the iterator auto-convert to `Instruction*`, and then set it back
using the `Instruction*` version:
Instruction *PrevInsertPoint = Builder.GetInsertPoint();
/* Logic that may change insert point */
if (PrevInsertPoint)
Builder.SetInsertPoint(PrevInsertPoint);
The check for `PrevInsertPoint` doesn't protect correctly against bad
accesses. If the insertion point has been set to the end of a basic
block (i.e., `SetInsertPoint(SomeBB)`), then `GetInsertPoint()` returns
an iterator pointing at the list sentinel. The version of
`SetInsertPoint()` that's getting called will then call
`PrevInsertPoint->getParent()`, which explodes horribly. The only
reason this hasn't blown up is that it's fairly unlikely the builder is
adding to the end of the block; usually, we're adding instructions
somewhere before the terminator.
llvm-svn: 249925
2015-10-10 02:53:03 +02:00
|
|
|
BuilderTy::InsertPointGuard Guard(Builder);
|
2012-06-21 17:45:28 +02:00
|
|
|
if (Instruction *I = dyn_cast<Instruction>(V))
|
|
|
|
Builder.SetInsertPoint(I);
|
|
|
|
|
2016-07-07 18:19:09 +02:00
|
|
|
// Now compute the size and offset.
|
2012-06-21 17:45:28 +02:00
|
|
|
SizeOffsetEvalType Result;
|
2013-09-29 21:39:13 +02:00
|
|
|
|
|
|
|
// Record the pointers that were handled in this run, so that they can be
|
|
|
|
// cleaned later if something fails. We also use this set to break cycles that
|
|
|
|
// can occur in dead code.
|
2014-11-19 08:49:26 +01:00
|
|
|
if (!SeenVals.insert(V).second) {
|
2013-09-29 21:39:13 +02:00
|
|
|
Result = unknown();
|
|
|
|
} else if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
|
2012-06-21 17:45:28 +02:00
|
|
|
Result = visitGEPOperator(*GEP);
|
|
|
|
} else if (Instruction *I = dyn_cast<Instruction>(V)) {
|
|
|
|
Result = visit(*I);
|
|
|
|
} else if (isa<Argument>(V) ||
|
|
|
|
(isa<ConstantExpr>(V) &&
|
|
|
|
cast<ConstantExpr>(V)->getOpcode() == Instruction::IntToPtr) ||
|
2012-12-31 17:23:48 +01:00
|
|
|
isa<GlobalAlias>(V) ||
|
2012-06-21 17:45:28 +02:00
|
|
|
isa<GlobalVariable>(V)) {
|
2016-07-07 18:19:09 +02:00
|
|
|
// Ignore values where we cannot do more than ObjectSizeVisitor.
|
2012-06-21 17:45:28 +02:00
|
|
|
Result = unknown();
|
|
|
|
} else {
|
2018-05-14 14:53:11 +02:00
|
|
|
LLVM_DEBUG(
|
|
|
|
dbgs() << "ObjectSizeOffsetEvaluator::compute() unhandled value: " << *V
|
|
|
|
<< '\n');
|
2012-06-21 17:45:28 +02:00
|
|
|
Result = unknown();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Don't reuse CacheIt since it may be invalid at this point.
|
|
|
|
CacheMap[V] = Result;
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
|
|
|
SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitAllocaInst(AllocaInst &I) {
|
|
|
|
if (!I.getAllocatedType()->isSized())
|
|
|
|
return unknown();
|
|
|
|
|
|
|
|
// must be a VLA
|
|
|
|
assert(I.isArrayAllocation());
|
2021-04-19 17:47:13 +02:00
|
|
|
|
|
|
|
// If needed, adjust the alloca's operand size to match the pointer size.
|
|
|
|
// Subsequent math operations expect the types to match.
|
|
|
|
Value *ArraySize = Builder.CreateZExtOrTrunc(
|
|
|
|
I.getArraySize(), DL.getIntPtrType(I.getContext()));
|
|
|
|
assert(ArraySize->getType() == Zero->getType() &&
|
|
|
|
"Expected zero constant to have pointer type");
|
|
|
|
|
2012-06-21 17:45:28 +02:00
|
|
|
Value *Size = ConstantInt::get(ArraySize->getType(),
|
2015-03-10 03:37:25 +01:00
|
|
|
DL.getTypeAllocSize(I.getAllocatedType()));
|
2012-06-21 17:45:28 +02:00
|
|
|
Size = Builder.CreateMul(Size, ArraySize);
|
|
|
|
return std::make_pair(Size, Zero);
|
|
|
|
}
|
|
|
|
|
2020-04-20 03:32:03 +02:00
|
|
|
SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitCallBase(CallBase &CB) {
|
|
|
|
Optional<AllocFnsTy> FnData = getAllocationSize(&CB, TLI);
|
2012-06-21 17:45:28 +02:00
|
|
|
if (!FnData)
|
|
|
|
return unknown();
|
|
|
|
|
2016-07-07 18:19:09 +02:00
|
|
|
// Handle strdup-like functions separately.
|
2012-06-21 17:45:28 +02:00
|
|
|
if (FnData->AllocTy == StrDupLike) {
|
2012-07-25 20:49:28 +02:00
|
|
|
// TODO
|
|
|
|
return unknown();
|
2012-06-21 17:45:28 +02:00
|
|
|
}
|
|
|
|
|
2020-04-20 03:32:03 +02:00
|
|
|
Value *FirstArg = CB.getArgOperand(FnData->FstParam);
|
2019-12-13 10:55:45 +01:00
|
|
|
FirstArg = Builder.CreateZExtOrTrunc(FirstArg, IntTy);
|
2012-06-21 20:38:26 +02:00
|
|
|
if (FnData->SndParam < 0)
|
2012-06-21 17:45:28 +02:00
|
|
|
return std::make_pair(FirstArg, Zero);
|
|
|
|
|
2020-04-20 03:32:03 +02:00
|
|
|
Value *SecondArg = CB.getArgOperand(FnData->SndParam);
|
2019-12-13 10:55:45 +01:00
|
|
|
SecondArg = Builder.CreateZExtOrTrunc(SecondArg, IntTy);
|
2012-06-21 17:45:28 +02:00
|
|
|
Value *Size = Builder.CreateMul(FirstArg, SecondArg);
|
|
|
|
return std::make_pair(Size, Zero);
|
|
|
|
|
|
|
|
// TODO: handle more standard functions (+ wchar cousins):
|
2012-07-25 20:49:28 +02:00
|
|
|
// - strdup / strndup
|
2012-06-21 17:45:28 +02:00
|
|
|
// - strcpy / strncpy
|
|
|
|
// - strcat / strncat
|
|
|
|
// - memcpy / memmove
|
2012-07-25 20:49:28 +02:00
|
|
|
// - strcat / strncat
|
2012-06-21 17:45:28 +02:00
|
|
|
// - memset
|
|
|
|
}
|
|
|
|
|
2012-06-28 18:34:03 +02:00
|
|
|
SizeOffsetEvalType
|
|
|
|
ObjectSizeOffsetEvaluator::visitExtractElementInst(ExtractElementInst&) {
|
|
|
|
return unknown();
|
|
|
|
}
|
|
|
|
|
|
|
|
SizeOffsetEvalType
|
|
|
|
ObjectSizeOffsetEvaluator::visitExtractValueInst(ExtractValueInst&) {
|
|
|
|
return unknown();
|
|
|
|
}
|
|
|
|
|
2012-06-21 17:45:28 +02:00
|
|
|
SizeOffsetEvalType
|
|
|
|
ObjectSizeOffsetEvaluator::visitGEPOperator(GEPOperator &GEP) {
|
|
|
|
SizeOffsetEvalType PtrData = compute_(GEP.getPointerOperand());
|
|
|
|
if (!bothKnown(PtrData))
|
|
|
|
return unknown();
|
|
|
|
|
2015-03-10 03:37:25 +01:00
|
|
|
Value *Offset = EmitGEPOffset(&Builder, DL, &GEP, /*NoAssumptions=*/true);
|
2012-06-21 17:45:28 +02:00
|
|
|
Offset = Builder.CreateAdd(PtrData.second, Offset);
|
|
|
|
return std::make_pair(PtrData.first, Offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitIntToPtrInst(IntToPtrInst&) {
|
|
|
|
// clueless
|
|
|
|
return unknown();
|
|
|
|
}
|
|
|
|
|
|
|
|
SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitLoadInst(LoadInst&) {
|
|
|
|
return unknown();
|
|
|
|
}
|
|
|
|
|
|
|
|
SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitPHINode(PHINode &PHI) {
|
2016-07-07 18:19:09 +02:00
|
|
|
// Create 2 PHIs: one for size and another for offset.
|
2012-06-21 17:45:28 +02:00
|
|
|
PHINode *SizePHI = Builder.CreatePHI(IntTy, PHI.getNumIncomingValues());
|
|
|
|
PHINode *OffsetPHI = Builder.CreatePHI(IntTy, PHI.getNumIncomingValues());
|
|
|
|
|
2016-07-07 18:19:09 +02:00
|
|
|
// Insert right away in the cache to handle recursive PHIs.
|
2012-06-21 17:45:28 +02:00
|
|
|
CacheMap[&PHI] = std::make_pair(SizePHI, OffsetPHI);
|
|
|
|
|
2016-07-07 18:19:09 +02:00
|
|
|
// Compute offset/size for each PHI incoming pointer.
|
2012-06-21 17:45:28 +02:00
|
|
|
for (unsigned i = 0, e = PHI.getNumIncomingValues(); i != e; ++i) {
|
Analysis: Remove implicit ilist iterator conversions
Remove implicit ilist iterator conversions from LLVMAnalysis.
I came across something really scary in `llvm::isKnownNotFullPoison()`
which relied on `Instruction::getNextNode()` being completely broken
(not surprising, but scary nevertheless). This function is documented
(and coded to) return `nullptr` when it gets to the sentinel, but with
an `ilist_half_node` as a sentinel, the sentinel check looks into some
other memory and we don't recognize we've hit the end.
Rooting out these scary cases is the reason I'm removing the implicit
conversions before doing anything else with `ilist`; I'm not at all
surprised that clients rely on badness.
I found another scary case -- this time, not relying on badness, just
bad (but I guess getting lucky so far) -- in
`ObjectSizeOffsetEvaluator::compute_()`. Here, we save out the
insertion point, do some things, and then restore it. Previously, we
let the iterator auto-convert to `Instruction*`, and then set it back
using the `Instruction*` version:
Instruction *PrevInsertPoint = Builder.GetInsertPoint();
/* Logic that may change insert point */
if (PrevInsertPoint)
Builder.SetInsertPoint(PrevInsertPoint);
The check for `PrevInsertPoint` doesn't protect correctly against bad
accesses. If the insertion point has been set to the end of a basic
block (i.e., `SetInsertPoint(SomeBB)`), then `GetInsertPoint()` returns
an iterator pointing at the list sentinel. The version of
`SetInsertPoint()` that's getting called will then call
`PrevInsertPoint->getParent()`, which explodes horribly. The only
reason this hasn't blown up is that it's fairly unlikely the builder is
adding to the end of the block; usually, we're adding instructions
somewhere before the terminator.
llvm-svn: 249925
2015-10-10 02:53:03 +02:00
|
|
|
Builder.SetInsertPoint(&*PHI.getIncomingBlock(i)->getFirstInsertionPt());
|
2012-06-21 17:45:28 +02:00
|
|
|
SizeOffsetEvalType EdgeData = compute_(PHI.getIncomingValue(i));
|
|
|
|
|
|
|
|
if (!bothKnown(EdgeData)) {
|
|
|
|
OffsetPHI->replaceAllUsesWith(UndefValue::get(IntTy));
|
|
|
|
OffsetPHI->eraseFromParent();
|
2019-04-11 01:42:11 +02:00
|
|
|
InsertedInstructions.erase(OffsetPHI);
|
2012-06-21 17:45:28 +02:00
|
|
|
SizePHI->replaceAllUsesWith(UndefValue::get(IntTy));
|
|
|
|
SizePHI->eraseFromParent();
|
2019-04-11 01:42:11 +02:00
|
|
|
InsertedInstructions.erase(SizePHI);
|
2012-06-21 17:45:28 +02:00
|
|
|
return unknown();
|
|
|
|
}
|
|
|
|
SizePHI->addIncoming(EdgeData.first, PHI.getIncomingBlock(i));
|
|
|
|
OffsetPHI->addIncoming(EdgeData.second, PHI.getIncomingBlock(i));
|
|
|
|
}
|
2012-07-03 19:13:25 +02:00
|
|
|
|
2019-04-11 01:42:11 +02:00
|
|
|
Value *Size = SizePHI, *Offset = OffsetPHI;
|
|
|
|
if (Value *Tmp = SizePHI->hasConstantValue()) {
|
2012-07-03 19:13:25 +02:00
|
|
|
Size = Tmp;
|
|
|
|
SizePHI->replaceAllUsesWith(Size);
|
|
|
|
SizePHI->eraseFromParent();
|
2019-04-11 01:42:11 +02:00
|
|
|
InsertedInstructions.erase(SizePHI);
|
2012-07-03 19:13:25 +02:00
|
|
|
}
|
2019-04-11 01:42:11 +02:00
|
|
|
if (Value *Tmp = OffsetPHI->hasConstantValue()) {
|
2012-07-03 19:13:25 +02:00
|
|
|
Offset = Tmp;
|
|
|
|
OffsetPHI->replaceAllUsesWith(Offset);
|
|
|
|
OffsetPHI->eraseFromParent();
|
2019-04-11 01:42:11 +02:00
|
|
|
InsertedInstructions.erase(OffsetPHI);
|
2012-07-03 19:13:25 +02:00
|
|
|
}
|
|
|
|
return std::make_pair(Size, Offset);
|
2012-06-21 17:45:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitSelectInst(SelectInst &I) {
|
|
|
|
SizeOffsetEvalType TrueSide = compute_(I.getTrueValue());
|
|
|
|
SizeOffsetEvalType FalseSide = compute_(I.getFalseValue());
|
|
|
|
|
|
|
|
if (!bothKnown(TrueSide) || !bothKnown(FalseSide))
|
|
|
|
return unknown();
|
|
|
|
if (TrueSide == FalseSide)
|
|
|
|
return TrueSide;
|
|
|
|
|
|
|
|
Value *Size = Builder.CreateSelect(I.getCondition(), TrueSide.first,
|
|
|
|
FalseSide.first);
|
|
|
|
Value *Offset = Builder.CreateSelect(I.getCondition(), TrueSide.second,
|
|
|
|
FalseSide.second);
|
|
|
|
return std::make_pair(Size, Offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitInstruction(Instruction &I) {
|
2018-05-14 14:53:11 +02:00
|
|
|
LLVM_DEBUG(dbgs() << "ObjectSizeOffsetEvaluator unknown instruction:" << I
|
|
|
|
<< '\n');
|
2012-06-21 17:45:28 +02:00
|
|
|
return unknown();
|
|
|
|
}
|