mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-25 04:02:41 +01:00
0f55045526
This patch pulls google/benchmark v1.4.1 into the LLVM tree so that any project could use it for benchmark generation. A dummy benchmark is added to `llvm/benchmarks/DummyYAML.cpp` to validate the correctness of the build process. The current version does not utilize LLVM LNT and LLVM CMake infrastructure, but that might be sufficient for most users. Two introduced CMake variables: * `LLVM_INCLUDE_BENCHMARKS` (`ON` by default) generates benchmark targets * `LLVM_BUILD_BENCHMARKS` (`OFF` by default) adds generated benchmark targets to the list of default LLVM targets (i.e. if `ON` benchmarks will be built upon standard build invocation, e.g. `ninja` or `make` with no specific targets) List of modifications: * `BENCHMARK_ENABLE_TESTING` is disabled * `BENCHMARK_ENABLE_EXCEPTIONS` is disabled * `BENCHMARK_ENABLE_INSTALL` is disabled * `BENCHMARK_ENABLE_GTEST_TESTS` is disabled * `BENCHMARK_DOWNLOAD_DEPENDENCIES` is disabled Original discussion can be found here: http://lists.llvm.org/pipermail/llvm-dev/2018-August/125023.html Reviewed by: dberris, lebedev.ri Subscribers: ilya-biryukov, ioeric, EricWF, lebedev.ri, srhines, dschuff, mgorny, krytarowski, fedor.sergeev, mgrang, jfb, llvm-commits Differential Revision: https://reviews.llvm.org/D50894 llvm-svn: 340809
137 lines
3.6 KiB
C++
137 lines
3.6 KiB
C++
|
|
#include "benchmark/benchmark.h"
|
|
|
|
#define BASIC_BENCHMARK_TEST(x) BENCHMARK(x)->Arg(8)->Arg(512)->Arg(8192)
|
|
|
|
void BM_empty(benchmark::State& state) {
|
|
for (auto _ : state) {
|
|
benchmark::DoNotOptimize(state.iterations());
|
|
}
|
|
}
|
|
BENCHMARK(BM_empty);
|
|
BENCHMARK(BM_empty)->ThreadPerCpu();
|
|
|
|
void BM_spin_empty(benchmark::State& state) {
|
|
for (auto _ : state) {
|
|
for (int x = 0; x < state.range(0); ++x) {
|
|
benchmark::DoNotOptimize(x);
|
|
}
|
|
}
|
|
}
|
|
BASIC_BENCHMARK_TEST(BM_spin_empty);
|
|
BASIC_BENCHMARK_TEST(BM_spin_empty)->ThreadPerCpu();
|
|
|
|
void BM_spin_pause_before(benchmark::State& state) {
|
|
for (int i = 0; i < state.range(0); ++i) {
|
|
benchmark::DoNotOptimize(i);
|
|
}
|
|
for (auto _ : state) {
|
|
for (int i = 0; i < state.range(0); ++i) {
|
|
benchmark::DoNotOptimize(i);
|
|
}
|
|
}
|
|
}
|
|
BASIC_BENCHMARK_TEST(BM_spin_pause_before);
|
|
BASIC_BENCHMARK_TEST(BM_spin_pause_before)->ThreadPerCpu();
|
|
|
|
void BM_spin_pause_during(benchmark::State& state) {
|
|
for (auto _ : state) {
|
|
state.PauseTiming();
|
|
for (int i = 0; i < state.range(0); ++i) {
|
|
benchmark::DoNotOptimize(i);
|
|
}
|
|
state.ResumeTiming();
|
|
for (int i = 0; i < state.range(0); ++i) {
|
|
benchmark::DoNotOptimize(i);
|
|
}
|
|
}
|
|
}
|
|
BASIC_BENCHMARK_TEST(BM_spin_pause_during);
|
|
BASIC_BENCHMARK_TEST(BM_spin_pause_during)->ThreadPerCpu();
|
|
|
|
void BM_pause_during(benchmark::State& state) {
|
|
for (auto _ : state) {
|
|
state.PauseTiming();
|
|
state.ResumeTiming();
|
|
}
|
|
}
|
|
BENCHMARK(BM_pause_during);
|
|
BENCHMARK(BM_pause_during)->ThreadPerCpu();
|
|
BENCHMARK(BM_pause_during)->UseRealTime();
|
|
BENCHMARK(BM_pause_during)->UseRealTime()->ThreadPerCpu();
|
|
|
|
void BM_spin_pause_after(benchmark::State& state) {
|
|
for (auto _ : state) {
|
|
for (int i = 0; i < state.range(0); ++i) {
|
|
benchmark::DoNotOptimize(i);
|
|
}
|
|
}
|
|
for (int i = 0; i < state.range(0); ++i) {
|
|
benchmark::DoNotOptimize(i);
|
|
}
|
|
}
|
|
BASIC_BENCHMARK_TEST(BM_spin_pause_after);
|
|
BASIC_BENCHMARK_TEST(BM_spin_pause_after)->ThreadPerCpu();
|
|
|
|
void BM_spin_pause_before_and_after(benchmark::State& state) {
|
|
for (int i = 0; i < state.range(0); ++i) {
|
|
benchmark::DoNotOptimize(i);
|
|
}
|
|
for (auto _ : state) {
|
|
for (int i = 0; i < state.range(0); ++i) {
|
|
benchmark::DoNotOptimize(i);
|
|
}
|
|
}
|
|
for (int i = 0; i < state.range(0); ++i) {
|
|
benchmark::DoNotOptimize(i);
|
|
}
|
|
}
|
|
BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after);
|
|
BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after)->ThreadPerCpu();
|
|
|
|
void BM_empty_stop_start(benchmark::State& state) {
|
|
for (auto _ : state) {
|
|
}
|
|
}
|
|
BENCHMARK(BM_empty_stop_start);
|
|
BENCHMARK(BM_empty_stop_start)->ThreadPerCpu();
|
|
|
|
|
|
void BM_KeepRunning(benchmark::State& state) {
|
|
size_t iter_count = 0;
|
|
assert(iter_count == state.iterations());
|
|
while (state.KeepRunning()) {
|
|
++iter_count;
|
|
}
|
|
assert(iter_count == state.iterations());
|
|
}
|
|
BENCHMARK(BM_KeepRunning);
|
|
|
|
void BM_KeepRunningBatch(benchmark::State& state) {
|
|
// Choose a prime batch size to avoid evenly dividing max_iterations.
|
|
const size_t batch_size = 101;
|
|
size_t iter_count = 0;
|
|
while (state.KeepRunningBatch(batch_size)) {
|
|
iter_count += batch_size;
|
|
}
|
|
assert(state.iterations() == iter_count);
|
|
}
|
|
BENCHMARK(BM_KeepRunningBatch);
|
|
|
|
void BM_RangedFor(benchmark::State& state) {
|
|
size_t iter_count = 0;
|
|
for (auto _ : state) {
|
|
++iter_count;
|
|
}
|
|
assert(iter_count == state.max_iterations);
|
|
}
|
|
BENCHMARK(BM_RangedFor);
|
|
|
|
// Ensure that StateIterator provides all the necessary typedefs required to
|
|
// instantiate std::iterator_traits.
|
|
static_assert(std::is_same<
|
|
typename std::iterator_traits<benchmark::State::StateIterator>::value_type,
|
|
typename benchmark::State::StateIterator::value_type>::value, "");
|
|
|
|
BENCHMARK_MAIN();
|