Flutter Engine
The Flutter Engine
Classes | Functions
displaylist_benchmark_parser Namespace Reference

Classes

class  BenchmarkResult
 

Functions

def main ()
 
def error (message)
 
def extrac_attributes_label (benchmark_result)
 
def process_benchmark_data (benchmark_json, output_pdf, output_csv)
 
def parse_json (filename)
 

Function Documentation

◆ error()

def displaylist_benchmark_parser.error (   message)

Definition at line 161 of file displaylist_benchmark_parser.py.

161def error(message):
162 print(message)
163 sys.exit(1)
164
165
def print(*args, **kwargs)
Definition: run_tests.py:49

◆ extrac_attributes_label()

def displaylist_benchmark_parser.extrac_attributes_label (   benchmark_result)

Definition at line 166 of file displaylist_benchmark_parser.py.

166def extrac_attributes_label(benchmark_result):
167 # Possible attribute keys are:
168 # AntiAliasing
169 # HairlineStroke
170 # StrokedStyle
171 # FilledStyle
172 attributes = ['AntiAliasing', 'HairlineStroke', 'StrokedStyle', 'FilledStyle']
173 label = ''
174
175 for attr in attributes:
176 try:
177 if benchmark_result[attr] != 0:
178 label += attr + ', '
179 except KeyError:
180 pass
181
182 return label[:-2]
183
184

◆ main()

def displaylist_benchmark_parser.main ( )

Definition at line 133 of file displaylist_benchmark_parser.py.

133def main():
134 parser = argparse.ArgumentParser()
135
136 parser.add_argument(
137 'filename', action='store', help='Path to the JSON output from Google Benchmark'
138 )
139 parser.add_argument(
140 '-o',
141 '--output-pdf',
142 dest='output_pdf',
143 action='store',
144 default='output.pdf',
145 help='Filename to output the PDF of graphs to.'
146 )
147 parser.add_argument(
148 '-c',
149 '--output-csv',
150 dest='output_csv',
151 action='store',
152 default='output.csv',
153 help='Filename to output the CSV data to.'
154 )
155
156 args = parser.parse_args()
157 json_data = parse_json(args.filename)
158 return process_benchmark_data(json_data, args.output_pdf, args.output_csv)
159
160
def process_benchmark_data(benchmark_json, output_pdf, output_csv)

◆ parse_json()

def displaylist_benchmark_parser.parse_json (   filename)

Definition at line 260 of file displaylist_benchmark_parser.py.

260def parse_json(filename):
261 try:
262 json_file = open(filename, 'r')
263 except: # pylint: disable=bare-except
264 error('Unable to load file.')
265
266 try:
267 json_data = json.load(json_file)
268 except JSONDecodeError: # pylint: disable=undefined-variable
269 error('Invalid JSON. Unable to parse.')
270
271 return json_data['benchmarks']
272
273

◆ process_benchmark_data()

def displaylist_benchmark_parser.process_benchmark_data (   benchmark_json,
  output_pdf,
  output_csv 
)

Definition at line 185 of file displaylist_benchmark_parser.py.

185def process_benchmark_data(benchmark_json, output_pdf, output_csv):
186 benchmark_results_data = {}
187
188 for benchmark_result in benchmark_json:
189 # Skip aggregate results
190 if 'aggregate_name' in benchmark_result:
191 continue
192
193 benchmark_variant = benchmark_result['name'].split('/')
194 # The final split is always `real_time` and can be discarded
195 benchmark_variant.remove('real_time')
196
197 splits = len(benchmark_variant)
198 # First split is always the benchmark function name
199 benchmark_name = benchmark_variant[0]
200 # The last split is always the seeded value into the benchmark
201 benchmark_seeded_value = benchmark_variant[splits - 1]
202 # The second last split is always the backend
203 benchmark_backend = benchmark_variant[splits - 2]
204 # Time taken (wall clock time) for benchmark to run
205 benchmark_real_time = benchmark_result['real_time']
206 benchmark_unit = benchmark_result['time_unit']
207
208 benchmark_family_index = benchmark_result['family_index']
209
210 benchmark_family_label = ''
211 if splits > 3:
212 for i in range(1, splits - 2):
213 benchmark_family_label += benchmark_variant[i] + ', '
214
215 benchmark_family_attributes = extrac_attributes_label(benchmark_result)
216
217 if benchmark_family_attributes == '':
218 benchmark_family_label = benchmark_family_label[:-2]
219 else:
220 benchmark_family_label = benchmark_family_label + benchmark_family_attributes
221
222 if 'DrawCallCount' in benchmark_result:
223 benchmark_draw_call_count = benchmark_result['DrawCallCount']
224 else:
225 benchmark_draw_call_count = -1
226
227 optional_keys = ['DrawCallCount_Varies', 'VerbCount', 'PointCount', 'VertexCount', 'GlyphCount']
228
229 if benchmark_name not in benchmark_results_data:
230 benchmark_results_data[benchmark_name] = BenchmarkResult(
231 benchmark_name, benchmark_backend, benchmark_unit, benchmark_draw_call_count
232 )
233
234 for key in optional_keys:
235 if key in benchmark_result:
236 benchmark_results_data[benchmark_name].add_optional_value(
237 key, benchmark_seeded_value, benchmark_result[key]
238 )
239
240 benchmark_results_data[benchmark_name].add_data_point(
241 benchmark_family_index, benchmark_seeded_value, benchmark_real_time
242 )
243 benchmark_results_data[benchmark_name].set_family_label(
244 benchmark_family_index, benchmark_family_label
245 )
246
247 pdf = pdfp(output_pdf)
248
249 csv_file = open(output_csv, 'w')
250 csv_writer = csv.writer(csv_file)
251
252 for benchmark in benchmark_results_data:
253 figures = benchmark_results_data[benchmark].plot()
254 for fig in figures:
255 pdf.savefig(fig)
256 benchmark_results_data[benchmark].write_csv(csv_writer)
257 pdf.close()
258
259
static void plot(SkCanvas *canvas, const char *fn, float xMin, float xMax, float yMin, float yMax, const char *label=nullptr, bool requireES3=false)