mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-25 04:02:41 +01:00
0f55045526
This patch pulls google/benchmark v1.4.1 into the LLVM tree so that any project could use it for benchmark generation. A dummy benchmark is added to `llvm/benchmarks/DummyYAML.cpp` to validate the correctness of the build process. The current version does not utilize LLVM LNT and LLVM CMake infrastructure, but that might be sufficient for most users. Two introduced CMake variables: * `LLVM_INCLUDE_BENCHMARKS` (`ON` by default) generates benchmark targets * `LLVM_BUILD_BENCHMARKS` (`OFF` by default) adds generated benchmark targets to the list of default LLVM targets (i.e. if `ON` benchmarks will be built upon standard build invocation, e.g. `ninja` or `make` with no specific targets) List of modifications: * `BENCHMARK_ENABLE_TESTING` is disabled * `BENCHMARK_ENABLE_EXCEPTIONS` is disabled * `BENCHMARK_ENABLE_INSTALL` is disabled * `BENCHMARK_ENABLE_GTEST_TESTS` is disabled * `BENCHMARK_DOWNLOAD_DEPENDENCIES` is disabled Original discussion can be found here: http://lists.llvm.org/pipermail/llvm-dev/2018-August/125023.html Reviewed by: dberris, lebedev.ri Subscribers: ilya-biryukov, ioeric, EricWF, lebedev.ri, srhines, dschuff, mgorny, krytarowski, fedor.sergeev, mgrang, jfb, llvm-commits Differential Revision: https://reviews.llvm.org/D50894 llvm-svn: 340809
193 lines
6.2 KiB
C++
193 lines
6.2 KiB
C++
|
|
#undef NDEBUG
|
|
#include <cassert>
|
|
#include <vector>
|
|
|
|
#include "../src/check.h" // NOTE: check.h is for internal use only!
|
|
#include "benchmark/benchmark.h"
|
|
|
|
namespace {
|
|
|
|
class TestReporter : public benchmark::ConsoleReporter {
|
|
public:
|
|
virtual bool ReportContext(const Context& context) {
|
|
return ConsoleReporter::ReportContext(context);
|
|
};
|
|
|
|
virtual void ReportRuns(const std::vector<Run>& report) {
|
|
all_runs_.insert(all_runs_.end(), begin(report), end(report));
|
|
ConsoleReporter::ReportRuns(report);
|
|
}
|
|
|
|
TestReporter() {}
|
|
virtual ~TestReporter() {}
|
|
|
|
mutable std::vector<Run> all_runs_;
|
|
};
|
|
|
|
struct TestCase {
|
|
std::string name;
|
|
bool error_occurred;
|
|
std::string error_message;
|
|
|
|
typedef benchmark::BenchmarkReporter::Run Run;
|
|
|
|
void CheckRun(Run const& run) const {
|
|
CHECK(name == run.benchmark_name) << "expected " << name << " got "
|
|
<< run.benchmark_name;
|
|
CHECK(error_occurred == run.error_occurred);
|
|
CHECK(error_message == run.error_message);
|
|
if (error_occurred) {
|
|
// CHECK(run.iterations == 0);
|
|
} else {
|
|
CHECK(run.iterations != 0);
|
|
}
|
|
}
|
|
};
|
|
|
|
std::vector<TestCase> ExpectedResults;
|
|
|
|
int AddCases(const char* base_name, std::initializer_list<TestCase> const& v) {
|
|
for (auto TC : v) {
|
|
TC.name = base_name + TC.name;
|
|
ExpectedResults.push_back(std::move(TC));
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
#define CONCAT(x, y) CONCAT2(x, y)
|
|
#define CONCAT2(x, y) x##y
|
|
#define ADD_CASES(...) int CONCAT(dummy, __LINE__) = AddCases(__VA_ARGS__)
|
|
|
|
} // end namespace
|
|
|
|
void BM_error_before_running(benchmark::State& state) {
|
|
state.SkipWithError("error message");
|
|
while (state.KeepRunning()) {
|
|
assert(false);
|
|
}
|
|
}
|
|
BENCHMARK(BM_error_before_running);
|
|
ADD_CASES("BM_error_before_running", {{"", true, "error message"}});
|
|
|
|
|
|
void BM_error_before_running_batch(benchmark::State& state) {
|
|
state.SkipWithError("error message");
|
|
while (state.KeepRunningBatch(17)) {
|
|
assert(false);
|
|
}
|
|
}
|
|
BENCHMARK(BM_error_before_running_batch);
|
|
ADD_CASES("BM_error_before_running_batch", {{"", true, "error message"}});
|
|
|
|
void BM_error_before_running_range_for(benchmark::State& state) {
|
|
state.SkipWithError("error message");
|
|
for (auto _ : state) {
|
|
assert(false);
|
|
}
|
|
}
|
|
BENCHMARK(BM_error_before_running_range_for);
|
|
ADD_CASES("BM_error_before_running_range_for", {{"", true, "error message"}});
|
|
|
|
void BM_error_during_running(benchmark::State& state) {
|
|
int first_iter = true;
|
|
while (state.KeepRunning()) {
|
|
if (state.range(0) == 1 && state.thread_index <= (state.threads / 2)) {
|
|
assert(first_iter);
|
|
first_iter = false;
|
|
state.SkipWithError("error message");
|
|
} else {
|
|
state.PauseTiming();
|
|
state.ResumeTiming();
|
|
}
|
|
}
|
|
}
|
|
BENCHMARK(BM_error_during_running)->Arg(1)->Arg(2)->ThreadRange(1, 8);
|
|
ADD_CASES("BM_error_during_running", {{"/1/threads:1", true, "error message"},
|
|
{"/1/threads:2", true, "error message"},
|
|
{"/1/threads:4", true, "error message"},
|
|
{"/1/threads:8", true, "error message"},
|
|
{"/2/threads:1", false, ""},
|
|
{"/2/threads:2", false, ""},
|
|
{"/2/threads:4", false, ""},
|
|
{"/2/threads:8", false, ""}});
|
|
|
|
void BM_error_during_running_ranged_for(benchmark::State& state) {
|
|
assert(state.max_iterations > 3 && "test requires at least a few iterations");
|
|
int first_iter = true;
|
|
// NOTE: Users should not write the for loop explicitly.
|
|
for (auto It = state.begin(), End = state.end(); It != End; ++It) {
|
|
if (state.range(0) == 1) {
|
|
assert(first_iter);
|
|
first_iter = false;
|
|
state.SkipWithError("error message");
|
|
// Test the unfortunate but documented behavior that the ranged-for loop
|
|
// doesn't automatically terminate when SkipWithError is set.
|
|
assert(++It != End);
|
|
break; // Required behavior
|
|
}
|
|
}
|
|
}
|
|
BENCHMARK(BM_error_during_running_ranged_for)->Arg(1)->Arg(2)->Iterations(5);
|
|
ADD_CASES("BM_error_during_running_ranged_for",
|
|
{{"/1/iterations:5", true, "error message"},
|
|
{"/2/iterations:5", false, ""}});
|
|
|
|
|
|
|
|
void BM_error_after_running(benchmark::State& state) {
|
|
for (auto _ : state) {
|
|
benchmark::DoNotOptimize(state.iterations());
|
|
}
|
|
if (state.thread_index <= (state.threads / 2))
|
|
state.SkipWithError("error message");
|
|
}
|
|
BENCHMARK(BM_error_after_running)->ThreadRange(1, 8);
|
|
ADD_CASES("BM_error_after_running", {{"/threads:1", true, "error message"},
|
|
{"/threads:2", true, "error message"},
|
|
{"/threads:4", true, "error message"},
|
|
{"/threads:8", true, "error message"}});
|
|
|
|
void BM_error_while_paused(benchmark::State& state) {
|
|
bool first_iter = true;
|
|
while (state.KeepRunning()) {
|
|
if (state.range(0) == 1 && state.thread_index <= (state.threads / 2)) {
|
|
assert(first_iter);
|
|
first_iter = false;
|
|
state.PauseTiming();
|
|
state.SkipWithError("error message");
|
|
} else {
|
|
state.PauseTiming();
|
|
state.ResumeTiming();
|
|
}
|
|
}
|
|
}
|
|
BENCHMARK(BM_error_while_paused)->Arg(1)->Arg(2)->ThreadRange(1, 8);
|
|
ADD_CASES("BM_error_while_paused", {{"/1/threads:1", true, "error message"},
|
|
{"/1/threads:2", true, "error message"},
|
|
{"/1/threads:4", true, "error message"},
|
|
{"/1/threads:8", true, "error message"},
|
|
{"/2/threads:1", false, ""},
|
|
{"/2/threads:2", false, ""},
|
|
{"/2/threads:4", false, ""},
|
|
{"/2/threads:8", false, ""}});
|
|
|
|
int main(int argc, char* argv[]) {
|
|
benchmark::Initialize(&argc, argv);
|
|
|
|
TestReporter test_reporter;
|
|
benchmark::RunSpecifiedBenchmarks(&test_reporter);
|
|
|
|
typedef benchmark::BenchmarkReporter::Run Run;
|
|
auto EB = ExpectedResults.begin();
|
|
|
|
for (Run const& run : test_reporter.all_runs_) {
|
|
assert(EB != ExpectedResults.end());
|
|
EB->CheckRun(run);
|
|
++EB;
|
|
}
|
|
assert(EB == ExpectedResults.end());
|
|
|
|
return 0;
|
|
}
|