mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-25 12:12:47 +01:00
6702f17f53
I am planning to use this tool to find too noisy (missed) optimization remarks. Long term it may actually be better to just have another tool that exports the remarks into an sqlite database and perform queries like this in SQL. This splits out the YAML parsing from opt-viewer.py into a new Python module optrecord.py. This is the result of the script on the LLVM testsuite: Total number of remarks 714433 Top 10 remarks by pass: inline 52% gvn 24% licm 13% loop-vectorize 5% asm-printer 3% loop-unroll 1% regalloc 1% inline-cost 0% slp-vectorizer 0% loop-delete 0% Top 10 remarks: gvn/LoadClobbered 20% inline/Inlined 19% inline/CanBeInlined 18% inline/NoDefinition 9% licm/LoadWithLoopInvariantAddressInvalidated 6% licm/Hoisted 6% asm-printer/InstructionCount 3% inline/TooCostly 3% gvn/LoadElim 3% loop-vectorize/MissedDetails 2% Beside some refactoring, I also changed optrecords not to use context to access global data (max_hotness). Because of the separate module this would have required splitting context into two. However it's not possible to access the optrecord context from the SourceFileRenderer when calling back to Remark.RelativeHotness. llvm-svn: 296682
57 lines
1.7 KiB
Python
Executable File
57 lines
1.7 KiB
Python
Executable File
#!/usr/bin/env python2.7
|
|
|
|
from __future__ import print_function
|
|
|
|
desc = '''Generate statistics about optimization records from the YAML files
|
|
generated with -fsave-optimization-record and -fdiagnostics-show-hotness.
|
|
|
|
The tools requires PyYAML and Pygments Python packages.'''
|
|
|
|
import optrecord
|
|
import argparse
|
|
import operator
|
|
from collections import defaultdict
|
|
from multiprocessing import cpu_count, Pool
|
|
|
|
if __name__ == '__main__':
|
|
parser = argparse.ArgumentParser(description=desc)
|
|
parser.add_argument('yaml_files', nargs='+')
|
|
parser.add_argument(
|
|
'--jobs',
|
|
'-j',
|
|
default=cpu_count(),
|
|
type=int,
|
|
help='Max job count (defaults to current CPU count)')
|
|
args = parser.parse_args()
|
|
|
|
if len(args.yaml_files) == 0:
|
|
parser.print_help()
|
|
sys.exit(1)
|
|
|
|
if args.jobs == 1:
|
|
pmap = map
|
|
else:
|
|
pool = Pool(processes=args.jobs)
|
|
pmap = pool.map
|
|
|
|
all_remarks, file_remarks, _ = optrecord.gather_results(pmap, args.yaml_files)
|
|
|
|
bypass = defaultdict(int)
|
|
byname = defaultdict(int)
|
|
for r in all_remarks.itervalues():
|
|
bypass[r.Pass] += 1
|
|
byname[r.Pass + "/" + r.Name] += 1
|
|
|
|
total = len(all_remarks)
|
|
print("{:24s} {:10d}\n".format("Total number of remarks", total))
|
|
|
|
print("Top 10 remarks by pass:")
|
|
for (passname, count) in sorted(bypass.items(), key=operator.itemgetter(1),
|
|
reverse=True)[:10]:
|
|
print(" {:30s} {:2.0f}%". format(passname, count * 100. / total))
|
|
|
|
print("\nTop 10 remarks:")
|
|
for (name, count) in sorted(byname.items(), key=operator.itemgetter(1),
|
|
reverse=True)[:10]:
|
|
print(" {:30s} {:2.0f}%". format(name, count * 100. / total))
|