Flutter Engine
The Flutter Engine
displaylist_benchmark_parser.py
Go to the documentation of this file.
1#!/usr/bin/env python3
2#
3# Copyright 2013 The Flutter Authors. All rights reserved.
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
6
7import argparse
8import csv
9import json
10import sys
11import matplotlib.pyplot as plt # pylint: disable=import-error
12from matplotlib.backends.backend_pdf import PdfPages as pdfp # pylint: disable=import-error
13
14
15class BenchmarkResult: # pylint: disable=too-many-instance-attributes
16
17 def __init__(self, name, backend, time_unit, draw_call_count):
18 self.name = name
19 self.series = {}
20 self.series_labels = {}
21 self.backend = backend
22 self.large_y_values = False
23 self.y_limit = 200
24 self.time_unit = time_unit
25 self.draw_call_count = draw_call_count
27
28 def __repr__(self):
29 return 'Name: % s\nBackend: % s\nSeries: % s\nSeriesLabels: % s\n' % (
30 self.name, self.backend, self.series, self.series_labels
31 )
32
33 def add_data_point(self, family, xval, yval):
34 if family not in self.series:
35 self.series[family] = {'x': [], 'y': []}
36
37 self.series[family]['x'].append(xval)
38 self.series[family]['y'].append(yval)
39
40 if yval > self.y_limit:
41 self.large_y_values = True
42
43 def add_optional_value(self, name, xval, yval):
44 if name not in self.optional_values:
45 self.optional_values[name] = {}
46
47 self.optional_values[name][xval] = yval
48
49 def set_family_label(self, family, label):
50 # I'm not keying the main series dict off the family label
51 # just in case we get data where the two aren't a 1:1 mapping
52 if family in self.series_labels:
53 assert self.series_labels[family] == label
54 return
55
56 self.series_labels[family] = label
57
58 def plot(self):
59 figures = []
60 figures.append(plt.figure(dpi=1200, frameon=False, figsize=(11, 8.5)))
61
62 for family in self.series:
63 plt.plot(self.series[family]['x'], self.series[family]['y'], label=self.series_labels[family])
64
65 plt.xlabel('Benchmark Seed')
66 plt.ylabel('Time (' + self.time_unit + ')')
67
68 title = ''
69 # Crop the Y axis so that we can see what's going on at the lower end
70 if self.large_y_values:
71 plt.ylim((0, self.y_limit))
72 title = self.name + ' ' + self.backend + ' (Cropped)'
73 else:
74 title = self.name + ' ' + self.backend
75
76 if self.draw_call_count != -1:
77 title += '\nDraw Call Count: ' + str(int(self.draw_call_count))
78
79 plt.title(title)
80
81 plt.grid(which='both', axis='both')
82
83 plt.legend(fontsize='xx-small')
84 plt.plot()
85
86 if self.large_y_values:
87 # Plot again but with the full Y axis visible
88 figures.append(plt.figure(dpi=1200, frameon=False, figsize=(11, 8.5)))
89 for family in self.series:
90 plt.plot(
91 self.series[family]['x'], self.series[family]['y'], label=self.series_labels[family]
92 )
93
94 plt.xlabel('Benchmark Seed')
95 plt.ylabel('Time (' + self.time_unit + ')')
96 title = self.name + ' ' + self.backend + ' (Complete)'
97
98 if self.draw_call_count != -1:
99 title += '\nDraw Call Count: ' + str(int(self.draw_call_count))
100
101 plt.title(title)
102
103 plt.grid(which='both', axis='both')
104
105 plt.legend(fontsize='xx-small')
106 plt.plot()
107
108 return figures
109
110 def write_csv(self, writer):
111 # For now assume that all our series have the same x values
112 # this is true for now, but may differ in the future with benchmark changes
113 x_values = []
114 y_values = []
115 for family in self.series:
116 x_values = ['x'] + self.series[family]['x']
117 y_values.append([self.series_labels[family]] + self.series[family]['y'])
118
119 for name in self.optional_values:
120 column = [name]
121 for key in self.optional_values[name]:
122 column.append(self.optional_values[name][key])
123 y_values.append(column)
124
125 writer.writerow([self.name, self.draw_call_count])
126 for line, _ in enumerate(x_values):
127 row = [x_values[line]]
128 for series, _ in enumerate(y_values):
129 row.append(y_values[series][line])
130 writer.writerow(row)
131
132
133def main():
134 parser = argparse.ArgumentParser()
135
136 parser.add_argument(
137 'filename', action='store', help='Path to the JSON output from Google Benchmark'
138 )
139 parser.add_argument(
140 '-o',
141 '--output-pdf',
142 dest='output_pdf',
143 action='store',
144 default='output.pdf',
145 help='Filename to output the PDF of graphs to.'
146 )
147 parser.add_argument(
148 '-c',
149 '--output-csv',
150 dest='output_csv',
151 action='store',
152 default='output.csv',
153 help='Filename to output the CSV data to.'
154 )
155
156 args = parser.parse_args()
157 json_data = parse_json(args.filename)
158 return process_benchmark_data(json_data, args.output_pdf, args.output_csv)
159
160
161def error(message):
162 print(message)
163 sys.exit(1)
164
165
166def extrac_attributes_label(benchmark_result):
167 # Possible attribute keys are:
168 # AntiAliasing
169 # HairlineStroke
170 # StrokedStyle
171 # FilledStyle
172 attributes = ['AntiAliasing', 'HairlineStroke', 'StrokedStyle', 'FilledStyle']
173 label = ''
174
175 for attr in attributes:
176 try:
177 if benchmark_result[attr] != 0:
178 label += attr + ', '
179 except KeyError:
180 pass
181
182 return label[:-2]
183
184
185def process_benchmark_data(benchmark_json, output_pdf, output_csv):
186 benchmark_results_data = {}
187
188 for benchmark_result in benchmark_json:
189 # Skip aggregate results
190 if 'aggregate_name' in benchmark_result:
191 continue
192
193 benchmark_variant = benchmark_result['name'].split('/')
194 # The final split is always `real_time` and can be discarded
195 benchmark_variant.remove('real_time')
196
197 splits = len(benchmark_variant)
198 # First split is always the benchmark function name
199 benchmark_name = benchmark_variant[0]
200 # The last split is always the seeded value into the benchmark
201 benchmark_seeded_value = benchmark_variant[splits - 1]
202 # The second last split is always the backend
203 benchmark_backend = benchmark_variant[splits - 2]
204 # Time taken (wall clock time) for benchmark to run
205 benchmark_real_time = benchmark_result['real_time']
206 benchmark_unit = benchmark_result['time_unit']
207
208 benchmark_family_index = benchmark_result['family_index']
209
210 benchmark_family_label = ''
211 if splits > 3:
212 for i in range(1, splits - 2):
213 benchmark_family_label += benchmark_variant[i] + ', '
214
215 benchmark_family_attributes = extrac_attributes_label(benchmark_result)
216
217 if benchmark_family_attributes == '':
218 benchmark_family_label = benchmark_family_label[:-2]
219 else:
220 benchmark_family_label = benchmark_family_label + benchmark_family_attributes
221
222 if 'DrawCallCount' in benchmark_result:
223 benchmark_draw_call_count = benchmark_result['DrawCallCount']
224 else:
225 benchmark_draw_call_count = -1
226
227 optional_keys = ['DrawCallCount_Varies', 'VerbCount', 'PointCount', 'VertexCount', 'GlyphCount']
228
229 if benchmark_name not in benchmark_results_data:
230 benchmark_results_data[benchmark_name] = BenchmarkResult(
231 benchmark_name, benchmark_backend, benchmark_unit, benchmark_draw_call_count
232 )
233
234 for key in optional_keys:
235 if key in benchmark_result:
236 benchmark_results_data[benchmark_name].add_optional_value(
237 key, benchmark_seeded_value, benchmark_result[key]
238 )
239
240 benchmark_results_data[benchmark_name].add_data_point(
241 benchmark_family_index, benchmark_seeded_value, benchmark_real_time
242 )
243 benchmark_results_data[benchmark_name].set_family_label(
244 benchmark_family_index, benchmark_family_label
245 )
246
247 pdf = pdfp(output_pdf)
248
249 csv_file = open(output_csv, 'w')
250 csv_writer = csv.writer(csv_file)
251
252 for benchmark in benchmark_results_data:
253 figures = benchmark_results_data[benchmark].plot()
254 for fig in figures:
255 pdf.savefig(fig)
256 benchmark_results_data[benchmark].write_csv(csv_writer)
257 pdf.close()
258
259
260def parse_json(filename):
261 try:
262 json_file = open(filename, 'r')
263 except: # pylint: disable=bare-except
264 error('Unable to load file.')
265
266 try:
267 json_data = json.load(json_file)
268 except JSONDecodeError: # pylint: disable=undefined-variable
269 error('Invalid JSON. Unable to parse.')
270
271 return json_data['benchmarks']
272
273
274if __name__ == '__main__':
275 sys.exit(main())
def __init__(self, name, backend, time_unit, draw_call_count)
static void append(char **dst, size_t *count, const char *src, size_t n)
Definition: editor.cpp:211
def process_benchmark_data(benchmark_json, output_pdf, output_csv)
Definition: main.py:1
def print(*args, **kwargs)
Definition: run_tests.py:49
static void plot(SkCanvas *canvas, const char *fn, float xMin, float xMax, float yMin, float yMax, const char *label=nullptr, bool requireES3=false)