mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-25 20:23:11 +01:00
c40a5a9f25
Summary: These changes are to allow to a Result object to have nested Result objects in order to support microbenchmarks. Currently lit is restricted to reporting one result object for one test, this change provides support tests that want to report individual timings for individual kernels. This revision is the result of the discussions in https://reviews.llvm.org/D32272#794759, https://reviews.llvm.org/D37421#f8003b27 and https://reviews.llvm.org/D38496. It is a separation of the changes purposed in https://reviews.llvm.org/D40077. This change will enable adding LCALS (Livermore Compiler Analysis Loop Suite) collection of loop kernels to the llvm test suite using the google benchmark library (https://reviews.llvm.org/D43319) with tracking of individual kernel timings. Previously microbenchmarks had been handled by using macros to section groups of microbenchmarks together and build many executables while still getting a grouped timing (MultiSource/TSVC). Recently the google benchmark library was added to the test suite and utilized with a litsupport plugin. However the limitation of 1 test 1 result limited its use to passing a runtime option to run only 1 microbenchmark with several hand written tests (MicroBenchmarks/XRay). This runs the same executable many times with different hand-written tests. I will update the litsupport plugin to utilize the new functionality (https://reviews.llvm.org/D43316). These changes allow lit to report micro test results if desired in order to get many precise timing results from 1 run of 1 test executable. Reviewers: MatzeB, hfinkel, rengolin, delcypher Differential Revision: https://reviews.llvm.org/D43314 llvm-svn: 327422
52 lines
1.6 KiB
Python
52 lines
1.6 KiB
Python
# RUN: %{lit} -j 1 -v %{inputs}/test-data-micro --output %t.results.out
|
|
# RUN: FileCheck < %t.results.out %s
|
|
# RUN: rm %t.results.out
|
|
|
|
|
|
# CHECK: {
|
|
# CHECK: "__version__"
|
|
# CHECK: "elapsed"
|
|
# CHECK-NEXT: "tests": [
|
|
# CHECK-NEXT: {
|
|
# CHECK-NEXT: "code": "PASS",
|
|
# CHECK-NEXT: "elapsed": null,
|
|
# CHECK-NEXT: "metrics": {
|
|
# CHECK-NEXT: "micro_value0": 4,
|
|
# CHECK-NEXT: "micro_value1": 1.3
|
|
# CHECK-NEXT: },
|
|
# CHECK-NEXT: "name": "test-data-micro :: micro-tests.ini:test{{[0-2]}}",
|
|
# CHECK-NEXT: "output": ""
|
|
# CHECK-NEXT: },
|
|
# CHECK-NEXT: {
|
|
# CHECK-NEXT: "code": "PASS",
|
|
# CHECK-NEXT: "elapsed": null,
|
|
# CHECK-NEXT: "metrics": {
|
|
# CHECK-NEXT: "micro_value0": 4,
|
|
# CHECK-NEXT: "micro_value1": 1.3
|
|
# CHECK-NEXT: },
|
|
# CHECK-NEXT: "name": "test-data-micro :: micro-tests.ini:test{{[0-2]}}",
|
|
# CHECK-NEXT: "output": ""
|
|
# CHECK-NEXT: },
|
|
# CHECK-NEXT: {
|
|
# CHECK-NEXT: "code": "PASS",
|
|
# CHECK-NEXT: "elapsed": null,
|
|
# CHECK-NEXT: "metrics": {
|
|
# CHECK-NEXT: "micro_value0": 4,
|
|
# CHECK-NEXT: "micro_value1": 1.3
|
|
# CHECK-NEXT: },
|
|
# CHECK-NEXT: "name": "test-data-micro :: micro-tests.ini:test{{[0-2]}}",
|
|
# CHECK-NEXT: "output": ""
|
|
# CHECK-NEXT: },
|
|
# CHECK-NEXT: {
|
|
# CHECK-NEXT: "code": "PASS",
|
|
# CHECK-NEXT: "elapsed": {{[0-9.]+}},
|
|
# CHECK-NEXT: "metrics": {
|
|
# CHECK-NEXT: "value0": 1,
|
|
# CHECK-NEXT: "value1": 2.3456
|
|
# CHECK-NEXT: },
|
|
# CHECK-NEXT: "name": "test-data-micro :: micro-tests.ini",
|
|
# CHECK-NEXT: "output": "Test passed."
|
|
# CHECK-NEXT: }
|
|
# CHECK-NEXT: ]
|
|
# CHECK-NEXT: }
|