Flutter Engine
The Flutter Engine
BazelBenchmarkTestRunner.cpp
Go to the documentation of this file.
1/*
2 * Copyright 2023 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "bench/Benchmark.h"
19#include "src/base/SkTime.h"
21#include "src/utils/SkOSPath.h"
23#include "tools/ProcStats.h"
24#include "tools/Stats.h"
30#include "tools/timer/Timer.h"
31
32#include <cinttypes>
33#include <sstream>
34
35static DEFINE_string(skip, "", "Space-separated list of test cases (regexps) to skip.");
37 match,
38 "",
39 "Space-separated list of test cases (regexps) to run. Will run all tests if omitted.");
40
41// TODO(lovisolo): Should we check that this is a valid Git hash?
43 gitHash,
44 "",
45 "Git hash to include in the results.json output file, which can be ingested by Perf.");
46
47static DEFINE_string(issue,
48 "",
49 "Changelist ID (e.g. a Gerrit changelist number) to include in the "
50 "results.json output file, which can be ingested by Perf.");
51
52static DEFINE_string(patchset,
53 "",
54 "Patchset ID (e.g. a Gerrit patchset number) to include in the results.json "
55 "output file, which can be ingested by Perf.");
56
58 "",
59 "Space-separated key/value pairs common to all benchmarks. These will be "
60 "included in the results.json output file, which can be ingested by Perf.");
61
63 links,
64 "",
65 "Space-separated name/URL pairs with additional information about the benchmark execution, "
66 "for example links to the Swarming bot and task pages, named \"swarming_bot\" and "
67 "\"swarming_task\", respectively. These links are included in the "
68 "results.json output file, which can be ingested by Perf.");
69
70// When running under Bazel and overriding the output directory, you might encounter errors
71// such as "No such file or directory" and "Read-only file system". The former can happen
72// when running on RBE because the passed in output dir might not exist on the remote
73// worker, whereas the latter can happen when running locally in sandboxed mode, which is
74// the default strategy when running outside of RBE. One possible workaround is to run the
75// test as a local subprocess, which can be done by passing flag --strategy=TestRunner=local
76// to Bazel.
77//
78// Reference: https://bazel.build/docs/user-manual#execution-strategy.
79static DEFINE_string(outputDir,
80 "",
81 "Directory where to write any output JSON and PNG files. "
82 "Optional when running under Bazel "
83 "(e.g. \"bazel test //path/to:test\") as it defaults to "
84 "$TEST_UNDECLARED_OUTPUTS_DIR.");
85
86static DEFINE_string(surfaceConfig,
87 "",
88 "Name of the Surface configuration to use (e.g. \"8888\"). This determines "
89 "how we construct the SkSurface from which we get the SkCanvas that "
90 "benchmarks will draw on. See file "
91 "//tools/testrunners/common/surface_manager/SurfaceManager.h for details.");
92
94 cpuName,
95 "",
96 "Contents of the \"cpu_or_gpu_value\" dimension for CPU-bound traces (e.g. \"AVX512\").");
97
99 gpuName,
100 "",
101 "Contents of the \"cpu_or_gpu_value\" dimension for GPU-bound traces (e.g. \"RTX3060\").");
102
104 writePNGs,
105 false,
106 "Whether or not to write to the output directory any bitmaps produced by benchmarks.");
107
108// Mutually exclusive with --autoTuneLoops.
109static DEFINE_int(loops, 0, "The number of benchmark runs that constitutes a single sample.");
110
111// Mutually exclusive with --loops.
112static DEFINE_bool(autoTuneLoops,
113 false,
114 "Auto-tune (automatically determine) the number of benchmark runs that "
115 "constitutes a single sample. Timings are only reported when auto-tuning.");
116
118 autoTuneLoopsMax,
119 1000000,
120 "Maximum number of benchmark runs per single sample when auto-tuning. Ignored unless flag "
121 "--autoTuneLoops is set.");
122
123// Mutually exclusive with --ms.
124static DEFINE_int(samples, 10, "Number of samples to collect for each benchmark.");
125
126// Mutually exclusive with --samples.
127static DEFINE_int(ms, 0, "For each benchmark, collect samples for this many milliseconds.");
128
129static DEFINE_int(flushEvery,
130 10,
131 "Flush the results.json output file every n-th run. This file "
132 "can be ingested by Perf.");
133
134static DEFINE_bool(csv, false, "Print status in CSV format.");
135
136static DEFINE_bool2(quiet, q, false, "if true, do not print status updates.");
137
138static DEFINE_bool2(verbose, v, false, "Enable verbose output from the test runner.");
139
140// Set in //bazel/devicesrc but only consumed by adb_test_runner.go. We cannot use the
141// DEFINE_string macro because the flag name includes dashes.
142[[maybe_unused]] static bool unused =
143 SkFlagInfo::CreateStringFlag("device-specific-bazel-config",
144 nullptr,
146 nullptr,
147 "Ignored by this test runner.",
148 nullptr);
149
150static void validate_flags(bool isBazelTest) {
152 {{"--issue", FLAGS_issue.size() > 0}, {"--patchset", FLAGS_patchset.size() > 0}});
153 TestRunner::FlagValidators::StringAtMostOne("--issue", FLAGS_issue);
154 TestRunner::FlagValidators::StringAtMostOne("--patchset", FLAGS_patchset);
155 TestRunner::FlagValidators::StringEven("--key", FLAGS_key);
156 TestRunner::FlagValidators::StringEven("--links", FLAGS_links);
157
158 if (!isBazelTest) {
159 TestRunner::FlagValidators::StringNonEmpty("--outputDir", FLAGS_outputDir);
160 }
161 TestRunner::FlagValidators::StringAtMostOne("--outputDir", FLAGS_outputDir);
162
163 TestRunner::FlagValidators::StringNonEmpty("--surfaceConfig", FLAGS_surfaceConfig);
164 TestRunner::FlagValidators::StringAtMostOne("--surfaceConfig", FLAGS_surfaceConfig);
165
166 TestRunner::FlagValidators::StringAtMostOne("--cpuName", FLAGS_cpuName);
167 TestRunner::FlagValidators::StringAtMostOne("--gpuName", FLAGS_gpuName);
168
170 {{"--loops", FLAGS_loops != 0}, {"--autoTuneLoops", FLAGS_autoTuneLoops}});
171 if (!FLAGS_autoTuneLoops) {
172 TestRunner::FlagValidators::IntGreaterOrEqual("--loops", FLAGS_loops, 1);
173 }
174
175 TestRunner::FlagValidators::IntGreaterOrEqual("--autoTuneLoopsMax", FLAGS_autoTuneLoopsMax, 1);
176
178 {{"--samples", FLAGS_samples != 0}, {"--ms", FLAGS_ms != 0}});
179 if (FLAGS_ms == 0) {
180 TestRunner::FlagValidators::IntGreaterOrEqual("--samples", FLAGS_samples, 1);
181 }
182 if (FLAGS_samples == 0) {
184 }
185}
186
187// Helper class to produce JSON files in Perf ingestion format. The format is determined by Perf's
188// format.Format Go struct:
189//
190// https://skia.googlesource.com/buildbot/+/e12f70e0a3249af6dd7754d55958ee64a22e0957/perf/go/ingest/format/format.go#168
191//
192// Note that the JSON format produced by this class differs from Nanobench. The latter follows
193// Perf's legacy format, which is determined by the format.BenchData Go struct:
194//
195// https://skia.googlesource.com/buildbot/+/e12f70e0a3249af6dd7754d55958ee64a22e0957/perf/go/ingest/format/leagacyformat.go#26
197public:
198 // This struct mirrors Perf's format.SingleMeasurement Go struct:
199 // https://skia.googlesource.com/buildbot/+/e12f70e0a3249af6dd7754d55958ee64a22e0957/perf/go/ingest/format/format.go#31.
201 std::string value;
203 };
204
205 // This struct mirrors Perf's format.Result Go struct:
206 // https://skia.googlesource.com/buildbot/+/e12f70e0a3249af6dd7754d55958ee64a22e0957/perf/go/ingest/format/format.go#69.
207 //
208 // Note that the format.Result Go struct supports either one single measurement, or multiple
209 // measurements represented as a dictionary from arbitrary string keys to an array of
210 // format.SingleMeasurement Go structs. This class focuses on the latter variant.
211 struct Result {
212 std::map<std::string, std::string> key;
213 std::map<std::string, std::vector<SingleMeasurement>> measurements;
214 };
215
217 : fFileWStream(path)
218 , fJson(&fFileWStream, SkJSONWriter::Mode::kPretty)
219 , fAddingResults(false) {
220 fJson.beginObject(); // Root object.
221 fJson.appendS32("version", 1);
222 }
223
224 void addGitHash(std::string gitHash) {
225 assertNotAddingResults();
226 fJson.appendCString("git_hash", gitHash.c_str());
227 }
228
229 void addChangelistInfo(std::string issue, std::string patchset) {
230 assertNotAddingResults();
231 fJson.appendCString("issue", issue.c_str());
232 fJson.appendCString("patchset", patchset.c_str());
233 }
234
235 void addKey(std::map<std::string, std::string> key) {
236 assertNotAddingResults();
237 fJson.beginObject("key");
238 for (auto const& [name, value] : key) {
239 fJson.appendCString(name.c_str(), value.c_str());
240 }
241 fJson.endObject();
242 }
243
244 void addLinks(std::map<std::string, std::string> links) {
245 assertNotAddingResults();
246 fJson.beginObject("links");
247 for (auto const& [key, value] : links) {
248 fJson.appendCString(key.c_str(), value.c_str());
249 }
250 fJson.endObject();
251 }
252
254 if (!fAddingResults) {
255 fAddingResults = true;
256 fJson.beginArray("results"); // "results" array.
257 }
258
259 fJson.beginObject(); // Result object.
260
261 // Key.
262 fJson.beginObject("key"); // "key" dictionary.
263 for (auto const& [name, value] : result.key) {
264 fJson.appendCString(name.c_str(), value.c_str());
265 }
266 fJson.endObject(); // "key" dictionary.
267
268 // Measurements.
269 fJson.beginObject("measurements"); // "measurements" dictionary.
270 for (auto const& [name, singleMeasurements] : result.measurements) {
271 fJson.beginArray(name.c_str()); // <name> array.
272 for (const SingleMeasurement& singleMeasurement : singleMeasurements) {
273 // Based on
274 // https://skia.googlesource.com/skia/+/a063eaeaf1e09e4d6f42e0f44a5723622a46d21c/bench/ResultsWriter.h#51.
275 //
276 // Don't record if NaN or Inf.
277 if (SkIsFinite(singleMeasurement.measurement)) {
278 fJson.beginObject();
279 fJson.appendCString("value", singleMeasurement.value.c_str());
280 fJson.appendDoubleDigits("measurement", singleMeasurement.measurement, 16);
281 fJson.endObject();
282 }
283 }
284 fJson.endArray(); // <name> array.
285 }
286 fJson.endObject(); // "measurements" dictionary.
287
288 fJson.endObject(); // Result object.
289 }
290
291 void flush() { fJson.flush(); }
292
294 if (fAddingResults) {
295 fJson.endArray(); // "results" array;
296 }
297 fJson.endObject(); // Root object.
298 }
299
300private:
301 void assertNotAddingResults() {
302 if (fAddingResults) {
303 SK_ABORT("Cannot perform this operation after addResults() is called.");
304 }
305 }
306
307 SkFILEWStream fFileWStream;
308 SkJSONWriter fJson;
309 bool fAddingResults;
310};
311
312// Manages an autorelease pool for Metal. On other platforms, pool.drain() is a no-op.
314
315static double now_ms() { return SkTime::GetNSecs() * 1e-6; }
316
317// Based on
318// https://skia.googlesource.com/skia/+/a063eaeaf1e09e4d6f42e0f44a5723622a46d21c/bench/nanobench.cpp#1503.
320 static bool warm = false;
321 if (warm) {
322 return;
323 }
324 if (FLAGS_ms < 1000) {
325 // Run the first bench for 1000ms to warm up the test runner if FLAGS_ms < 1000.
326 // Otherwise, the first few benches' measurements will be inaccurate.
327 auto stop = now_ms() + 1000;
328 do {
329 target->time(loops);
330 pool.drain();
331 } while (now_ms() < stop);
332 }
333 warm = true;
334}
335
336// Collects samples for the given benchmark. Returns a boolean indicating success or failure, the
337// number of benchmark runs used for each sample, the samples and any statistics produced by the
338// benchmark and/or target.
339//
340// Based on
341// https://skia.googlesource.com/skia/+/a063eaeaf1e09e4d6f42e0f44a5723622a46d21c/bench/nanobench.cpp#1489.
343 int* loops,
346 skia_private::TArray<double>* statValues) {
347 target->setup();
348
349 if (FLAGS_autoTuneLoops) {
350 auto [autoTunedLoops, ok] = target->autoTuneLoops();
351 if (!ok) {
352 // Can't be timed. A warning has already been printed.
353 target->tearDown();
354 return false;
355 }
356 *loops = autoTunedLoops;
357 if (*loops > FLAGS_autoTuneLoopsMax) {
359 "Warning: Clamping loops from %d to %d (per the --autoTuneLoopsMax flag) for "
360 "benchmark \"%s\".",
361 *loops,
362 FLAGS_autoTuneLoopsMax,
363 target->getBenchmark()->getUniqueName());
364 *loops = FLAGS_autoTuneLoopsMax;
365 }
366 } else {
367 *loops = FLAGS_loops;
368 }
369
370 // Warm up the test runner to increase the chances of getting consistent measurements. Only
371 // done once for the entire lifecycle of the test runner.
373
374 // Each individual BenchmarkTarget must also be warmed up.
375 target->warmUp(*loops);
376
377 if (FLAGS_ms) {
378 // Collect as many samples as possible for a specific duration of time.
379 auto stop = now_ms() + FLAGS_ms;
380 do {
381 samples->push_back(target->time(*loops) / *loops);
382 pool.drain();
383 } while (now_ms() < stop);
384 } else {
385 // Collect an exact number of samples.
386 samples->reset(FLAGS_samples);
387 for (int s = 0; s < FLAGS_samples; s++) {
388 (*samples)[s] = target->time(*loops) / *loops;
389 pool.drain();
390 }
391 }
392
393 // Scale each sample to the benchmark's own units, time/unit.
394 for (double& sample : *samples) {
395 sample *= (1.0 / target->getBenchmark()->getUnits());
396 }
397
398 target->dumpStats(statKeys, statValues);
399 target->tearDown();
400
401 return true;
402}
403
404// Based on
405// https://skia.googlesource.com/skia/+/a063eaeaf1e09e4d6f42e0f44a5723622a46d21c/bench/nanobench.cpp#432.
406static void maybe_write_png(BenchmarkTarget* target, std::string outputDir) {
407 if (target->getBackend() == Benchmark::Backend::kNonRendering) {
408 return;
409 }
410
411 SkString filename = SkStringPrintf("%s.png", target->getBenchmark()->getUniqueName());
412 filename = SkOSPath::Join(outputDir.c_str(), filename.c_str());
413
414 if (!target->getCanvas() ||
415 target->getCanvas()->imageInfo().colorType() == kUnknown_SkColorType) {
416 return;
417 }
418
419 SkBitmap bmp;
420 bmp.allocPixels(target->getCanvas()->imageInfo());
421 if (!target->getCanvas()->readPixels(bmp, 0, 0)) {
422 TestRunner::Log("Warning: Could not read canvas pixels for benchmark \"%s\".",
423 target->getBenchmark()->getUniqueName());
424 return;
425 }
426
427 SkFILEWStream stream(filename.c_str());
428 if (!stream.isValid()) {
429 TestRunner::Log("Warning: Could not write file \"%s\".", filename.c_str());
430 return;
431 }
432 if (!SkPngEncoder::Encode(&stream, bmp.pixmap(), {})) {
433 TestRunner::Log("Warning: Could not encode pixels from benchmark \"%s\" as PNG.",
434 target->getBenchmark()->getUniqueName());
435 return;
436 }
437
438 if (FLAGS_verbose) {
439 TestRunner::Log("PNG for benchmark \"%s\" written to: %s",
440 target->getBenchmark()->getUniqueName(),
441 filename.c_str());
442 }
443}
444
445// Non-static because it is used from RasterBenchmarkTarget.cpp.
446SkString humanize(double ms) {
447 if (FLAGS_verbose) return SkStringPrintf("%" PRIu64, (uint64_t)(ms * 1e6));
448 return HumanizeMs(ms);
449}
450
451#define HUMANIZE(ms) humanize(ms).c_str()
452
453static SkString to_string(int n) {
454 SkString str;
455 str.appendS32(n);
456 return str;
457}
458
459// Based on
460// https://skia.googlesource.com/skia/+/a063eaeaf1e09e4d6f42e0f44a5723622a46d21c/bench/nanobench.cpp#1593.
464 std::string surfaceConfig,
465 int loops) {
466 if (!FLAGS_autoTuneLoops) {
467 TestRunner::Log("%4d/%-4dMB\t%s\t%s",
470 target->getBenchmark()->getUniqueName(),
471 surfaceConfig.c_str());
472 } else if (FLAGS_quiet) {
473 const char* mark = " ";
474 const double stddev_percent = sk_ieee_double_divide(100 * sqrt(stats->var), stats->mean);
475 if (stddev_percent > 5) mark = "?";
476 if (stddev_percent > 10) mark = "!";
477 TestRunner::Log("%10.2f %s\t%s\t%s",
478 stats->median * 1e3,
479 mark,
480 target->getBenchmark()->getUniqueName(),
481 surfaceConfig.c_str());
482 } else if (FLAGS_csv) {
483 const double stddev_percent = sk_ieee_double_divide(100 * sqrt(stats->var), stats->mean);
484 SkDebugf("%g,%g,%g,%g,%g,%s,%s\n",
485 stats->min,
486 stats->median,
487 stats->mean,
488 stats->max,
489 stddev_percent,
490 surfaceConfig.c_str(),
491 target->getBenchmark()->getUniqueName());
492 } else {
493 const double stddev_percent = sk_ieee_double_divide(100 * sqrt(stats->var), stats->mean);
494 TestRunner::Log("%4d/%-4dMB\t%d\t%s\t%s\t%s\t%s\t%.0f%%\t%s\t%s\t%s",
497 loops,
498 HUMANIZE(stats->min),
499 HUMANIZE(stats->median),
500 HUMANIZE(stats->mean),
501 HUMANIZE(stats->max),
502 stddev_percent,
503 FLAGS_ms ? to_string(samples->size()).c_str() : stats->plot.c_str(),
504 surfaceConfig.c_str(),
505 target->getBenchmark()->getUniqueName());
506 }
507
508 target->printStats();
509
510 if (FLAGS_verbose) {
511 std::ostringstream oss;
512 oss << "Samples: ";
513 for (int j = 0; j < samples->size(); j++) {
514 oss << HUMANIZE((*samples)[j]) << " ";
515 }
516 oss << target->getBenchmark()->getUniqueName();
517 TestRunner::Log("%s", oss.str().c_str());
518 }
519}
520
521// Based on
522// https://skia.googlesource.com/skia/+/a063eaeaf1e09e4d6f42e0f44a5723622a46d21c/bench/nanobench.cpp#1337.
523int main(int argc, char** argv) {
525
526 // When running under Bazel (e.g. "bazel test //path/to:test"), we'll store output files in
527 // $TEST_UNDECLARED_OUTPUTS_DIR unless overridden via the --outputDir flag.
528 //
529 // See https://bazel.build/reference/test-encyclopedia#initial-conditions.
530 std::string testUndeclaredOutputsDir;
531 if (char* envVar = std::getenv("TEST_UNDECLARED_OUTPUTS_DIR")) {
532 testUndeclaredOutputsDir = envVar;
533 }
534 bool isBazelTest = !testUndeclaredOutputsDir.empty();
535
536 // Parse and validate flags.
538 validate_flags(isBazelTest);
539
540 // TODO(lovisolo): Define an enum for surface configs and turn this flag into an enum value.
541 std::string surfaceConfig = FLAGS_surfaceConfig[0];
542
543 // Directory where we will write the output JSON file and any PNGs produced by the benchmarks.
544 std::string outputDir =
545 FLAGS_outputDir.isEmpty() ? testUndeclaredOutputsDir : FLAGS_outputDir[0];
546
547 std::string cpuName = FLAGS_cpuName.isEmpty() ? "" : FLAGS_cpuName[0];
548 std::string gpuName = FLAGS_gpuName.isEmpty() ? "" : FLAGS_gpuName[0];
549
550 // Output JSON file.
551 //
552 // TODO(lovisolo): Define a constant with the file name, use it here and in flag descriptions.
553 SkString jsonPath = SkOSPath::Join(outputDir.c_str(), "results.json");
554 ResultsJSONWriter jsonWriter(jsonPath.c_str());
555
556 if (FLAGS_gitHash.size() == 1) {
557 jsonWriter.addGitHash(FLAGS_gitHash[0]);
558 } else {
560 "Warning: No --gitHash flag was specified. Perf ingestion ignores JSON files that "
561 "do not specify a Git hash. This is fine for local debugging, but CI tasks should "
562 "always set the --gitHash flag.");
563 }
564 if (FLAGS_issue.size() == 1 && FLAGS_patchset.size() == 1) {
565 jsonWriter.addChangelistInfo(FLAGS_issue[0], FLAGS_patchset[0]);
566 }
567
568 // Key.
569 std::map<std::string, std::string> keyValuePairs = {
570 // Add a key/value pair that nanobench will never use in order to avoid accidentally
571 // polluting an existing trace.
572 {"build_system", "bazel"},
573 };
574 for (int i = 1; i < FLAGS_key.size(); i += 2) {
575 keyValuePairs[FLAGS_key[i - 1]] = FLAGS_key[i];
576 }
577 keyValuePairs.merge(GetCompilationModeGoldAndPerfKeyValuePairs());
578 jsonWriter.addKey(keyValuePairs);
579
580 // Links.
581 if (FLAGS_links.size()) {
582 std::map<std::string, std::string> links;
583 for (int i = 1; i < FLAGS_links.size(); i += 2) {
584 links[FLAGS_links[i - 1]] = FLAGS_links[i];
585 }
586 jsonWriter.addLinks(links);
587 }
588
589 int runs = 0;
590 bool missingCpuOrGpuWarningLogged = false;
591 for (auto benchmarkFactory : BenchRegistry::Range()) {
592 std::unique_ptr<Benchmark> benchmark(benchmarkFactory(nullptr));
593
594 if (!TestRunner::ShouldRunTestCase(benchmark->getUniqueName(), FLAGS_match, FLAGS_skip)) {
595 TestRunner::Log("Skipping %s", benchmark->getName());
596 continue;
597 }
598
599 benchmark->delayedSetup();
600
601 std::unique_ptr<BenchmarkTarget> target =
602 BenchmarkTarget::FromConfig(surfaceConfig, benchmark.get());
604
605 if (benchmark->isSuitableFor(target->getBackend())) {
606 // Print warning about missing cpu_or_gpu key if necessary.
607 if (target->isCpuOrGpuBound() == SurfaceManager::CpuOrGpu::kCPU && cpuName == "" &&
608 !missingCpuOrGpuWarningLogged) {
610 "Warning: The surface is CPU-bound, but flag --cpuName was not provided. "
611 "Perf traces will omit keys \"cpu_or_gpu\" and \"cpu_or_gpu_value\".");
612 missingCpuOrGpuWarningLogged = true;
613 }
614 if (target->isCpuOrGpuBound() == SurfaceManager::CpuOrGpu::kGPU && gpuName == "" &&
615 !missingCpuOrGpuWarningLogged) {
617 "Warning: The surface is GPU-bound, but flag --gpuName was not provided. "
618 "Perf traces will omit keys \"cpu_or_gpu\" and \"cpu_or_gpu_value\".");
619 missingCpuOrGpuWarningLogged = true;
620 }
621
622 // Run benchmark and collect samples.
623 int loops;
627 if (!sample_benchmark(target.get(), &loops, &samples, &statKeys, &statValues)) {
628 // Sampling failed. A warning has already been printed.
629 pool.drain();
630 continue;
631 }
632
633 if (FLAGS_writePNGs) {
634 // Save the bitmap produced by the benchmark to disk, if applicable. Not all
635 // benchmarks produce bitmaps, e.g. those that use the "nonrendering" config.
636 maybe_write_png(target.get(), outputDir);
637 }
638
639 // Building stats.plot often shows up in profiles, so skip building it when we're
640 // not going to print it anyway.
641 const bool want_plot = !FLAGS_quiet && !FLAGS_ms;
642 Stats stats(samples, want_plot);
643
644 print_benchmark_stats(&stats, &samples, target.get(), surfaceConfig, loops);
645
647 result.key = {
648 // Based on
649 // https://skia.googlesource.com/skia/+/a063eaeaf1e09e4d6f42e0f44a5723622a46d21c/bench/nanobench.cpp#1566.
650 {"name", std::string(benchmark->getName())},
651
652 // Replaces the "config" and "extra_config" keys set by nanobench.
653 {"surface_config", surfaceConfig},
654
655 // Based on
656 // https://skia.googlesource.com/skia/+/a063eaeaf1e09e4d6f42e0f44a5723622a46d21c/bench/nanobench.cpp#1578.
657 //
658 // TODO(lovisolo): Determine these dynamically when we add support for GMBench,
659 // SKPBench, etc.
660 {"source_type", "bench"},
661 {"bench_type", "micro"},
662
663 // Nanobench adds a "test" key consisting of "<unique name>_<width>_<height>",
664 // presumably with the goal of making the trace ID unique, see
665 // https://skia.googlesource.com/skia/+/a063eaeaf1e09e4d6f42e0f44a5723622a46d21c/bench/nanobench.cpp#1456.
666 //
667 // However, we can accomplish unique trace IDs by expressing "<width>" and
668 // "<height>" as their own keys.
669 //
670 // Regarding the "<unique name>" part of the "test" key:
671 //
672 // - Nanobench sets "<unique name>" to the result of
673 // Benchmark::getUniqueName():
674 // https://skia.googlesource.com/skia/+/a063eaeaf1e09e4d6f42e0f44a5723622a46d21c/bench/Benchmark.h#41.
675 //
676 // - Benchmark::getUniqueName() returns Benchmark::getName() except for the
677 // following two cases.
678 //
679 // - SKPBench::getUniqueName() returns "<name>_<scale>":
680 // https://skia.googlesource.com/skia/+/a063eaeaf1e09e4d6f42e0f44a5723622a46d21c/bench/SKPBench.cpp#33.
681 //
682 // - SKPAnimationBench returns "<name>_<tag>":
683 // https://skia.googlesource.com/skia/+/a063eaeaf1e09e4d6f42e0f44a5723622a46d21c/bench/SKPAnimationBench.cpp#18.
684 //
685 // Therefore it is important that we add "<scale>" and "<tag>" as their own
686 // keys when we eventually add support for these kinds of benchmarks.
687 //
688 // Based on
689 // https://skia.googlesource.com/skia/+/a063eaeaf1e09e4d6f42e0f44a5723622a46d21c/bench/nanobench.cpp#1456.
690 {"width", SkStringPrintf("%d", benchmark->getSize().width()).c_str()},
691 {"height", SkStringPrintf("%d", benchmark->getSize().height()).c_str()},
692 };
693 result.key.merge(target->getKeyValuePairs(cpuName, gpuName));
694 result.measurements["ms"] = {
695 // Based on
696 // https://skia.googlesource.com/skia/+/a063eaeaf1e09e4d6f42e0f44a5723622a46d21c/bench/nanobench.cpp#1571.
697 {.value = "min", .measurement = stats.min},
698 {.value = "ratio",
699 .measurement = sk_ieee_double_divide(stats.median, stats.min)},
700 };
701 if (!statKeys.empty()) {
702 // Based on
703 // https://skia.googlesource.com/skia/+/a063eaeaf1e09e4d6f42e0f44a5723622a46d21c/bench/nanobench.cpp#1580.
704 //
705 // Only SKPBench currently returns valid key/value pairs.
706 SkASSERT(statKeys.size() == statValues.size());
707 result.measurements["stats"] = {};
708 for (int i = 0; i < statKeys.size(); i++) {
709 result.measurements["stats"].push_back(
710 {.value = statKeys[i].c_str(), .measurement = statValues[i]});
711 }
712 }
713 jsonWriter.addResult(result);
714
715 runs++;
716 if (runs % FLAGS_flushEvery == 0) {
717 jsonWriter.flush();
718 }
719
720 pool.drain();
721 } else {
722 if (FLAGS_verbose) {
723 TestRunner::Log("Skipping \"%s\" because backend \"%s\" was unsuitable.\n",
724 target->getBenchmark()->getUniqueName(),
725 surfaceConfig.c_str());
726 }
727 }
728 }
729
731
733
734 // Based on
735 // https://skia.googlesource.com/skia/+/a063eaeaf1e09e4d6f42e0f44a5723622a46d21c/bench/nanobench.cpp#1668.
736 jsonWriter.addResult({
737 .key =
738 {
739 {"name", "memory_usage"},
740 },
741 .measurements =
742 {
743 {"resident_set_size_mb",
744 {{.value = "max",
745 .measurement = double(sk_tools::getMaxResidentSetSizeMB())}}},
746 },
747 });
748
749 TestRunner::Log("JSON file written to: %s", jsonPath.c_str());
750 TestRunner::Log("PNGs (if any) written to: %s", outputDir.c_str());
751
752 return 0;
753}
static double now_ms()
int main(int argc, char **argv)
static SkString to_string(int n)
#define HUMANIZE(ms)
static void maybe_write_png(BenchmarkTarget *target, std::string outputDir)
AutoreleasePool pool
static bool unused
static void warm_up_test_runner_once(BenchmarkTarget *target, int loops)
SkString humanize(double ms)
static DEFINE_bool2(quiet, q, false, "if true, do not print status updates.")
static DEFINE_int(loops, 0, "The number of benchmark runs that constitutes a single sample.")
static void validate_flags(bool isBazelTest)
static DEFINE_string(skip, "", "Space-separated list of test cases (regexps) to skip.")
static void print_benchmark_stats(Stats *stats, skia_private::TArray< double > *samples, BenchmarkTarget *target, std::string surfaceConfig, int loops)
static DEFINE_bool(writePNGs, false, "Whether or not to write to the output directory any bitmaps produced by benchmarks.")
static int sample_benchmark(BenchmarkTarget *target, int *loops, skia_private::TArray< double > *samples, skia_private::TArray< SkString > *statKeys, skia_private::TArray< double > *statValues)
std::map< std::string, std::string > GetCompilationModeGoldAndPerfKeyValuePairs()
#define SK_ABORT(message,...)
Definition: SkAssert.h:70
#define SkASSERT_RELEASE(cond)
Definition: SkAssert.h:100
#define SkASSERT(cond)
Definition: SkAssert.h:116
@ kUnknown_SkColorType
uninitialized
Definition: SkColorType.h:20
void SK_SPI SkDebugf(const char format[],...) SK_PRINTF_LIKE(1
static bool SkIsFinite(T x, Pack... values)
static constexpr double sk_ieee_double_divide(double numer, double denom)
static bool skip(SkStream *stream, size_t amount)
static bool ok(int result)
SK_API SkString SkStringPrintf(const char *format,...) SK_PRINTF_LIKE(1
Creates a new string and writes into it using a printf()-style format.
SkString HumanizeMs(double ms)
Definition: Timer.cpp:9
static std::unique_ptr< BenchmarkTarget > FromConfig(std::string surfaceConfig, Benchmark *benchmark)
static void printGlobalStats()
static void Parse(int argc, const char *const *argv)
void addLinks(std::map< std::string, std::string > links)
void addGitHash(std::string gitHash)
void addChangelistInfo(std::string issue, std::string patchset)
ResultsJSONWriter(const char *path)
void addResult(Result result)
void addKey(std::map< std::string, std::string > key)
void allocPixels(const SkImageInfo &info, size_t rowBytes)
Definition: SkBitmap.cpp:258
const SkPixmap & pixmap() const
Definition: SkBitmap.h:133
static bool CreateStringFlag(const char *name, const char *shortName, CommandLineFlags::StringArray *pStrings, const char *defaultValue, const char *helpString, const char *extendedHelpString)
static void PurgeAllCaches()
Definition: SkGraphics.cpp:40
void appendS32(int32_t value)
Definition: SkJSONWriter.h:237
void beginArray(const char *name=nullptr, bool multiline=true)
Definition: SkJSONWriter.h:146
void beginObject(const char *name=nullptr, bool multiline=true)
Definition: SkJSONWriter.h:114
void endObject()
Definition: SkJSONWriter.h:126
void appendCString(const char *value)
Definition: SkJSONWriter.h:224
void endArray()
Definition: SkJSONWriter.h:158
void flush()
Definition: SkJSONWriter.h:78
void appendDoubleDigits(double value, int digits)
Definition: SkJSONWriter.h:247
static SkString Join(const char *rootPath, const char *relativePath)
Definition: SkOSPath.cpp:14
void appendS32(int32_t value)
Definition: SkString.h:208
const char * c_str() const
Definition: SkString.h:133
bool empty() const
Definition: SkTArray.h:199
void reset(int n)
Definition: SkTArray.h:144
int size() const
Definition: SkTArray.h:421
struct MyStruct s
uint8_t value
GAsyncResult * result
uint32_t * target
static void mark(SkCanvas *canvas, SkScalar x, SkScalar y, Fn &&fn)
Definition: gm.cpp:211
char ** argv
Definition: library.h:9
SK_API bool Encode(SkWStream *dst, const SkPixmap &src, const Options &options)
double GetNSecs()
Definition: SkTime.cpp:17
void StringEven(std::string name, CommandLineFlags::StringArray flag)
Definition: TestRunner.cpp:31
void IntGreaterOrEqual(std::string name, int flag, int min)
Definition: TestRunner.cpp:38
void ExactlyOne(std::map< std::string, bool > flags)
Definition: TestRunner.cpp:62
void AllOrNone(std::map< std::string, bool > flags)
Definition: TestRunner.cpp:44
void StringNonEmpty(std::string name, CommandLineFlags::StringArray flag)
Definition: TestRunner.cpp:17
void StringAtMostOne(std::string name, CommandLineFlags::StringArray flag)
Definition: TestRunner.cpp:24
bool ShouldRunTestCase(const char *name, CommandLineFlags::StringArray &matchFlag, CommandLineFlags::StringArray &skipFlag)
Definition: TestRunner.cpp:113
void Log(const char *format,...) SK_PRINTF_LIKE(1
Definition: TestRunner.cpp:137
void InitAndLogCmdlineArgs(int argc, char **argv)
Definition: TestRunner.cpp:88
def match(bench, filt)
Definition: benchmark.py:23
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir path
Definition: switches.h:57
DEF_SWITCHES_START aot vmservice shared library name
Definition: switches.h:32
dictionary stats
Definition: malisc.py:20
int getCurrResidentSetSizeMB()
Definition: ProcStats.cpp:92
int getMaxResidentSetSizeMB()
Definition: ProcStats.cpp:87
SIN Vec< N, float > sqrt(const Vec< N, float > &x)
Definition: SkVx.h:706
std::map< std::string, std::string > key
std::map< std::string, std::vector< SingleMeasurement > > measurements