diff --git a/.github/workflows/dart.yml b/.github/workflows/dart.yml index a0414b5..028f9f6 100644 --- a/.github/workflows/dart.yml +++ b/.github/workflows/dart.yml @@ -7,7 +7,7 @@ name: Dart on: push: - branches: [ main ] + branches: [ main, custom-emitter ] paths: - '**.dart' pull_request: @@ -34,8 +34,8 @@ jobs: run: dart pub get # Uncomment this step to verify the use of 'dart format' on each commit. - # - name: Verify formatting - # run: dart format --output=none --set-exit-if-changed . + - name: Verify formatting + run: dart format --output=none --set-exit-if-changed . # Consider passing '--fatal-infos' for slightly stricter analysis. - name: Analyze project source diff --git a/.gitignore b/.gitignore index 266828e..6f0c0a1 100644 --- a/.gitignore +++ b/.gitignore @@ -26,3 +26,6 @@ doc/api/ # Compiled executables bin/*.exe + +# Exported benchmark scores +benchmark/*.txt diff --git a/CHANGELOG.md b/CHANGELOG.md index ee23140..9f1a93e 100755 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,27 @@ +## 2.0.0 +Breaking changes: +- The classes `Benchmark` and `AsyncBenchmark` are now solely responsible for +generating benchmark scores. The constructor parameters `description` and +`emitter` have been removed. Generating reports is delegated to `ScoreEmitter`. +- The functions `benchmark` and `asyncBenchmark` are not longer generic and +the only optional parameter is: `scoreEmitter`. A custom `ScoreEmitter` can be +used to generate a custom benchmark report. + + +## 1.0.0 +Breaking changes: +- The command `benchmark_runner` now has subcommands `report` and `export`. +- The functions `benchmark` and `asyncBenchmark` are now generic and + accept the optional parameters: + * `E extents ColorPrintEmitter emitter`, + * `report(instance, emitter){}`, where `instance` is an instance of + `Benchmark` or `AsyncBenchmark`, respectively. + The callback can be can be used to pass benchmark scores to the emitter. + ## 0.1.9 - Changed default encoding of standard output of benchmark processes to `Utf8Codec()`. This enables the correct output of histograms in windows -terminals with utf8 support. +terminals with utf8 support. ## 0.1.8 - Updated docs. @@ -19,8 +39,9 @@ inter-quartile-range `iqr` of the score sample is zero. ## 0.1.5 - Made [BenchmarkHelper.sampleSize][sampleSize] a variable assignable with -defined function. This allows changing the benchmark runtime by customizing -the relation between score estimate and score sample size. +a user defined function. This allows changing the score sample generation +customizing the relation between the single score estimate and the +score sample size. ## 0.1.4 - Fixed bugs in runner (results were listed twice, exit code was always 0). diff --git a/README.md b/README.md index eb17ad4..2fffde6 100644 --- a/README.md +++ b/README.md @@ -6,8 +6,7 @@ Benchmarking is used to estimate and compare the execution speed of numerical algorithms and programs. -The package [`benchmark_runner`][benchmark_runner] is based on -[`benchmark_harness`][benchmark_harness] and includes helper +The package [`benchmark_runner`][benchmark_runner] includes helper functions for writing *inline* micro-benchmarks with the option of printing a score **histogram** and reporting the score **mean** ± **standard deviation**, and score **median** ± **inter quartile range**. @@ -36,8 +35,6 @@ Write inline benchmarks using the functions: asynchronous benchmarks. ```Dart - // ignore_for_file: unused_local_variable - import 'package:benchmark_runner/benchmark_runner.dart'; /// Returns the value [t] after waiting for [duration]. @@ -53,7 +50,7 @@ Write inline benchmarks using the functions: await asyncBenchmark('5ms', () async { await later(27, Duration(milliseconds: 5)); - }, emitStats: false); + }, scoreEmitter: MeanEmitter()); }); group('Set', () async { @@ -72,13 +69,12 @@ Write inline benchmarks using the functions: ``` ### 1. Running a Single Benchmark File A *single* benchmark file may be run as a Dart executable: -```Console -$ dart benchmark/example_async_benchmark.dart -``` -![Console Output Single](https://raw.githubusercontent.com/simphotonics/benchmark_runner/main/images/console_output_single.png) +![Console Output Single](https://raw.githubusercontent.com/simphotonics/benchmark_runner/custom-emitter/images/single_report.png) -The console output is shown above. The following colours and coding -are used: +The console output is shown above. By default, +the functions [`benchmark`][benchmark] and +[`asyncBenchmark`][asyncBenchmark] +emit benchmark score statistics. * The first column shows the micro-benchmark runtime, followed by the group name and the benchmark name. * The labels of asynchronous groups and benchmarks are marked with an hour-glass @@ -91,30 +87,67 @@ using *blue* foreground. using *cyan* foreground. * Errors are printed using *red* foreground. -### 2. Running Several Benchmark Files +### 2. Using the Benchmark Runner To run *several* benchmark files (with the format`*_benchmark.dart`) -invoke the benchmark_runner and specify a directory. +and print a report, invoke the sub-command `report` and specify a directory. If no directory is specified, it defaults to `benchmark`: -```Console -$ dart run benchmark_runner -``` - -![Console Output](https://raw.githubusercontent.com/simphotonics/benchmark_runner/main/images/console_output.png) +![Runner Report](https://raw.githubusercontent.com/simphotonics/benchmark_runner/custom-emitter/images/runner_report.png) A typical console output is shown above. In this example, the benchmark_runner detected two benchmark files, ran the micro-benchmarks and produced a report. - * The summary shows the total number of completed benchmarks, the number of benchmarks with errors and the number of groups with errors (that do not occur within the scope of a benchmark function). -* To show a stack trace for each error, run the benchmark_runner using -the option ``-v`` or `--verbose`. +* To show a stack trace for each error, use the option ``-v`` or `--verbose`. * The total benchmark run time may be shorter than the sum of the micro-benchmark run times since each executable benchmark file is run in a separate process. +### 3. Exporting Benchmark Scores + +To export benchmark scores use the sub-command `export`: +``` +$ dart run benchmark_runner export --outputDir=scores --extension=csv searchDirectory +``` +In the example above, `searchDirectory` is scanned for `*_benchmark.dart` +files. For each benchmark file, a corresponding file `*_benchmark.csv` is +written to the directory `scores`. + +Note: The directory must exist and the user +must have write access. When exporting benchmark scores to a file +and the emitter output is colorized, +it is recommended to use the option `--isMonochrome`, to +avoid spurious characters due to the use of Ansi modifiers. + +The functions [`benchmark`][benchmark] and +[`asyncBenchmark`][asyncBenchmark] accept the optional parameters `scoreEmitter`. +The parameter expects an object of type `ScoreEmitter` and +can be used to customize the score reports e.g. +to make the score format more suitable for writing to a file: + +```Dart +import 'package:benchmark_runner/benchmark_runner.dart'; + +class CustomEmitter implements ScoreEmitter { + @override + void emit({required description, required Score score}) { + print('# Mean Standard Deviation'); + print('${score.stats.mean} ${score.stats.stdDev}'); + } +} + +void main(){ + benchmark( + 'construct list | use custom emitter', + () { + var list = [for (var i = 0; i < 1000; ++i) i]; + }, + scoreEmitter: CustomEmitter(), + ); +} +``` ## Tips and Tricks @@ -133,49 +166,55 @@ as reported by [`benchmark_harness`][benchmark_harness] and the score statistics. - By default, [`benchmark`][benchmark] and -[`asyncBenchmark`][asyncBenchmark] report score statistics. In order to generate -the report provided by [`benchmark_harness`][benchmark_harness] use the -optional argument `emitStats: false`. +[`asyncBenchmark`][asyncBenchmark] report score statistics. In order to print +the report similar to that produced by +[`benchmark_harness`][benchmark_harness], use the +optional argument `emitter: MeanEmitter()`. -- Color output can be switched off by using the option: `--isMonochrome` when -calling the benchmark runner. When executing a single benchmark file the +- Color output can be switched off by using the option: `--isMonochrome` or `-m` +when calling the benchmark runner. When executing a single benchmark file the corresponding option is `--define=isMonochrome=true`. - The default colors used to style benchmark reports are best suited for a dark terminal background. -They can, however, be altered by setting the static variables defined by +They can, however, be altered by setting the *static* variables defined by the class [`ColorProfile`][ColorProfile]. In the example below, the styling of error messages and the mean value is altered. ```Dart import 'package:ansi_modifier/ansi_modifier.dart'; import 'package:benchmark_runner/benchmark_runner.dart'; - void customColorProfile() { + void adjustColorProfile() { ColorProfile.error = Ansi.red + Ansi.bold; ColorProfile.mean = Ansi.green + Ansi.italic; } void main(List args) { // Call function to apply the new custom color profile. - customProfile(); + adjustColorProfile(); } ``` - When running **asynchronous** benchmarks, the scores are printed in order of -completion. The print the scores in sequential order (as they are listed in the +completion. To print the scores in sequential order (as they are listed in the benchmark executable) it is required to *await* the completion of the async benchmark functions and the enclosing group. ## Score Sampling -In order to calculate benchmark score statistics a sample of scores is +In order to calculate benchmark score *statistics* a sample of scores is required. The question is how to generate the score sample while minimizing systematic errors (like overheads) and keeping the -benchmark run times within acceptable limits. +total benchmark run times within acceptable limits. + +
Click to show details. -To estimate the benchmark score the functions [`warmup`][warmup] -or [`warmupAsync`][warmupAsync] are run for 200 milliseconds. +In a first step, benchmark scores are estimated using the +functions [`warmUp`][warmUp] +or [`warmUpAsync`][warmUpAsync]. The function [`BenchmarkHelper.sampleSize`][sampleSize] +uses the score estimate to determine the sample size and the number of inner +iterations (for short run times each sample entry is averaged). ### 1. Default Sampling Method The graph below shows the sample size (orange curve) as calculated by the function @@ -183,7 +222,7 @@ The graph below shows the sample size (orange curve) as calculated by the functi The green curve shows the lower limit of the total microbenchmark duration and represents the value: `clockTicks * sampleSize * innerIterations`. -![Sample Size](https://raw.githubusercontent.com/simphotonics/benchmark_runner/main/images/sample_size.png) +![Sample Size](https://raw.githubusercontent.com/simphotonics/benchmark_runner/custom-emitter/images/sample_size.png) For short run times below 100000 clock ticks each sample score is generated using the functions [`measure`][measure] or the equivalent asynchronous method [`measureAsync`][measureAsync]. @@ -197,9 +236,10 @@ averaged over (see the cyan curve in the graph above): * ticks > 1e5 => No preliminary averaging of sample scores. ### 2. Custom Sampling Method -To amend the score sampling process the static function +To custominze the score sampling process, the static function [`BenchmarkHelper.sampleSize`][sampleSize] can be replaced with a custom function: ```Dart +/// Generates a sample containing 100 benchmark scores. BenchmarkHelper.sampleSize = (int clockTicks) { return (outer: 100, inner: 1) } @@ -219,6 +259,7 @@ The command above lauches a process and runs a [`gnuplot`][gnuplot] script. For this reason, the program [`gnuplot`][gnuplot] must be installed (with the `qt` terminal enabled). +
## Contributions @@ -260,6 +301,6 @@ Please file feature requests and bugs at the [issue tracker][tracker]. [sampleSize]: https://pub.dev/documentation/benchmark_runner/latest/benchmark_runner/BenchmarkHelper/sampleSize.html -[warmup]: https://pub.dev/documentation/benchmark_runner/latest/benchmark_runner/BenchmarkHelper/warmup.html +[warmUp]: https://pub.dev/documentation/benchmark_runner/latest/benchmark_runner/BenchmarkHelper/warmUp.html -[warmupAsync]: https://pub.dev/documentation/benchmark_runner/latest/benchmark_runner/BenchmarkHelper/warmupAsync.html +[warmUpAsync]: https://pub.dev/documentation/benchmark_runner/latest/benchmark_runner/BenchmarkHelper/warmUpAsync.html diff --git a/analysis_options.yaml b/analysis_options.yaml index ea2c9e9..449df28 100755 --- a/analysis_options.yaml +++ b/analysis_options.yaml @@ -1 +1,5 @@ -include: package:lints/recommended.yaml \ No newline at end of file +include: package:lints/recommended.yaml + +linter: + rules: + - prefer_relative_imports diff --git a/benchmark/custom_emitter_benchmark.dart b/benchmark/custom_emitter_benchmark.dart new file mode 100644 index 0000000..592dded --- /dev/null +++ b/benchmark/custom_emitter_benchmark.dart @@ -0,0 +1,16 @@ +// ignore_for_file: unused_local_variable +import 'package:benchmark_runner/benchmark_runner.dart'; + +class CustomEmitter implements ScoreEmitter { + @override + void emit({required description, required Score score}) { + print('Mean Standard Deviation'); + print('${score.stats.mean} ${score.stats.stdDev}'); + } +} + +void main(List args) { + benchmark('construct | Custom emitter', () { + var list = [for (var i = 0; i < 1000; ++i) i]; + }, scoreEmitter: CustomEmitter()); +} diff --git a/benchmark/example_async_benchmark.dart b/benchmark/example_async_benchmark.dart index 884f0f6..2ff8f09 100644 --- a/benchmark/example_async_benchmark.dart +++ b/benchmark/example_async_benchmark.dart @@ -15,7 +15,7 @@ void main(List args) async { await asyncBenchmark('5ms', () async { await later(27, Duration(milliseconds: 5)); - }, emitStats: false); + }, scoreEmitter: MeanEmitter()); }); group('Set', () async { diff --git a/benchmark/example_benchmark.dart b/benchmark/example_benchmark.dart index 1610106..623b377 100644 --- a/benchmark/example_benchmark.dart +++ b/benchmark/example_benchmark.dart @@ -1,6 +1,4 @@ // ignore_for_file: unused_local_variable -import 'dart:collection'; - import 'package:benchmark_runner/benchmark_runner.dart'; void main(List args) { @@ -23,10 +21,6 @@ void main(List args) { benchmark('construct', () { var list = [for (var i = 0; i < 1000; ++i) i]; - }, emitStats: false); - - benchmark('construct list view', () { - final listView = UnmodifiableListView(originalList); - }); + }, scoreEmitter: MeanEmitter()); }); } diff --git a/bin/benchmark_runner.dart b/bin/benchmark_runner.dart index ee38633..670014d 100755 --- a/bin/benchmark_runner.dart +++ b/bin/benchmark_runner.dart @@ -1,119 +1,3 @@ -import 'dart:io'; - -import 'package:ansi_modifier/ansi_modifier.dart'; import 'package:benchmark_runner/benchmark_runner.dart'; -/// The script usage. -const usage = - '------------------------------------------------------------------------\n' - 'Usage: benchmark_runner [options] [] ' - '\n\n' - ' Note: If a benchmark-directory is specified, the program will attempt \n' - ' to run all dart files ending with \'_benchmark.dart.\'\n' - ' Options:\n' - ' -h, --help Shows script usage.\n' - ' -v, --verbose Enables displaying error messages.\n' - ' --isMonochrome Disables color output.\n'; - -Future main(List args) async { - final clock = Stopwatch()..start(); - final argsCopy = List.of(args); - bool isUsingDefaultDirectory = false; - - // Reading script options. - final isVerbose = args.contains('--verbose') || args.contains('-v'); - final isMonochrome = args.contains('--isMonochrome') ? true : false; - Ansi.status = isMonochrome ? AnsiOutput.disabled : AnsiOutput.enabled; - - argsCopy.remove('--isMonochrome'); - argsCopy.remove('--verbose'); - argsCopy.remove('-v'); - - switch (argsCopy) { - case ['-h'] || ['--help']: - print(usage); - exit(0); - case []: - argsCopy.add('benchmark'); - isUsingDefaultDirectory = true; - default: - } - - // Resolving test files. - final benchmarkFiles = await resolveBenchmarkFiles(argsCopy[0]); - if (benchmarkFiles.isEmpty) { - print(''); - print('Could not resolve any benchmark files using ' - '${isUsingDefaultDirectory ? 'default' : ''}' - ' path: ${argsCopy[0].style(ColorProfile.highlight)}'); - print( - 'Please specify a path to a benchmark directory ' - 'containing benchmark files: \n' - '\$ ${'dart run benchmark_runner'.style(ColorProfile.emphasize)} ' - '${'benchmark_directory'.style(ColorProfile.highlight)}', - ); - print(usage); - exit(ExitCode.noBenchmarkFilesFound.index); - } else { - print('\nFinding benchmark files... '.style(ColorProfile.dim)); - for (final file in benchmarkFiles) { - print(' ${file.path}'); - } - print(''); - } - - // Starting processes. - final fResults = >[]; - for (final file in benchmarkFiles) { - fResults.add(BenchmarkProcess.runBenchmark( - 'dart', - [ - '--define=isBenchmarkProcess=true', - if (isVerbose) '--define=isVerbose=true', - if (isMonochrome) '--define=isMonochrome=true', - file.path, - ], - )); - } - - final stream = Stream.periodic( - const Duration(milliseconds: 500), - (i) => 'Progress timer: ' - '${Duration(milliseconds: i * 500).ssms.style(Ansi.green)}'); - - const cursorToStartOfLine = Ansi.cursorToColumn(1); - // intStream = intStream.take(3); - final subscription = stream.listen((event) { - stdout.write(event); - stdout.write(cursorToStartOfLine); - }); - - final results = []; - - //Printing results. - for (final fResult in fResults) { - fResult.then((result) { - final command = - '${result.executable} ${result.arguments.where((arg) => arg != '--define=isBenchmarkProcess=true').join(' ')}'; - print('\n\n\$ '.style(ColorProfile.dim) + command); - print(result.stdout.indentLines(2, indentMultiplierFirstLine: 2)); - if (isVerbose) { - print(result.stderr.indentLines(4, indentMultiplierFirstLine: 4)); - } - results.add(result); - }); - } - - // Composing exit message. - await Future.wait(fResults); - - subscription.cancel(); - final exitStatus = BenchmarkUtils.aggregatedExitStatus( - results: results, - duration: clock.elapsed, - isVerbose: isVerbose, - ); - - print(exitStatus.message); - exit(exitStatus.exitCode.code); -} +Future main(List args) async => BenchmarkRunner().run(args); diff --git a/gnuplot/sample_size.dart b/gnuplot/sample_size.dart index f9e5711..461f10e 100644 --- a/gnuplot/sample_size.dart +++ b/gnuplot/sample_size.dart @@ -6,6 +6,23 @@ import 'package:ansi_modifier/ansi_modifier.dart'; import 'package:benchmark_runner/benchmark_runner.dart' show BenchmarkHelper; final ticksList = [ + 1, + 2, + 4, + 6, + 8, + 10, + 15, + 25, + 40, + 70, + 100, + 180, + 260, + 380, + 490, + 600, + 800, 1001, 1011, 1070, @@ -67,6 +84,10 @@ final ticksList = [ 194471590, ]; +final xAxisInMicroSeconds = ticksList.map( + (e) => (e * 1e6) / BenchmarkHelper.frequency, +); + final gnuplotScript = ''' reset; set samples 1000; @@ -74,17 +95,17 @@ set term qt size 1000, 500 font "Sans, 14"; set grid lw 2; set logscale x; unset label; -set xlabel "Clock ticks"; -set xrange [ 1000 : 1.9e8 ] noreverse writeback; +set xlabel "Single Run time [us]"; +set xrange [ 0 : 1e6 ] noreverse writeback; set x2range [ * : * ] noreverse writeback; -set yrange [ 0 : 500 ] noreverse writeback; +set yrange [ 0 : 600 ] noreverse writeback; set y2range [ * : * ] noreverse writeback; -plot "sample_size.dat" using 1:2 with line lw 3 lt 2 lc "#0000FFFF" title "averaged over" at 0.3, 0.85, \ - "sample_size.dat" using 1:2 lw 3 lt 6 lc "#0000BBBB" title " " at 0.3, 0.85, \ - "sample_size.dat" using 1:3 with lines lw 3 lt 2 lc "#00FF8800" title "sample size" at 0.3, 0.77, \ - "sample_size.dat" using 1:3 lw 3 lt 6 lc "#00991100" title " " at 0.3, 0.77, \ - "sample_size.dat" using 1:4 with lines lw 3 lt 2 lc "#0000C77E" title "run time [ms]" at 0.3, 0.69, \ - "sample_size.dat" using 1:4 lw 3 lt 6 lc "#0000974e" title " " at 0.3, 0.69; +plot "sample_size.dat" using 1:2 with line lw 3 lt 2 lc "#0000FFFF" title "averaged over" at 0.6, 0.85, \ + "sample_size.dat" using 1:2 lw 3 lt 6 lc "#0000BBBB" title " " at 0.6, 0.85, \ + "sample_size.dat" using 1:3 with lines lw 3 lt 2 lc "#00FF8800" title "sample size" at 0.6, 0.77, \ + "sample_size.dat" using 1:3 lw 3 lt 6 lc "#00991100" title " " at 0.6, 0.77, \ + "sample_size.dat" using 1:4 with lines lw 3 lt 2 lc "#0000C77E" title "total sample generation time [ms]" at 0.6, 0.69, \ + "sample_size.dat" using 1:4 lw 3 lt 6 lc "#0000974e" title " " at 0.6, 0.69; # '''; @@ -101,17 +122,22 @@ void main(List args) async { return (inner: 10, outer: 10); // This is a stub! } + print(xAxisInMicroSeconds); + // Uncomment the line below to use your custom function: // BenchmarkHelper.sampleSize = customSampleSize; final b = StringBuffer(); b.writeln( - '# Ticks Inner-Iterations Outer-Iterations Run-Time [1 ms]'); + '# Microseconds Inner-Iterations Outer-Iterations Run-Time [1 ms]', + ); for (final ticks in ticksList) { final (inner: inner, outer: outer) = BenchmarkHelper.sampleSize(ticks); - b.writeln('$ticks $inner ' - '$outer ${ticks * inner * outer / 1000000}'); + b.writeln( + '${ticks * 1e6 / BenchmarkHelper.frequency} $inner ' + '$outer ${ticks * inner * outer * 1000 / BenchmarkHelper.frequency}', + ); } final file = await File('sample_size.dat').writeAsString(b.toString()); @@ -122,6 +148,8 @@ void main(List args) async { print(result.stdout); print(result.stderr); - print('Returning with gnuplot exit code:' - ' ${result.exitCode.toString().style(Ansi.green)}'); + print( + 'Returning with gnuplot exit code:' + ' ${result.exitCode.toString().style(Ansi.green)}', + ); } diff --git a/images/console_output.png b/images/console_output.png deleted file mode 100644 index 6f3006f..0000000 Binary files a/images/console_output.png and /dev/null differ diff --git a/images/console_output_single.png b/images/console_output_single.png deleted file mode 100644 index 34e8bf4..0000000 Binary files a/images/console_output_single.png and /dev/null differ diff --git a/images/runner_report.png b/images/runner_report.png new file mode 100644 index 0000000..13dcec5 Binary files /dev/null and b/images/runner_report.png differ diff --git a/images/sample_size.png b/images/sample_size.png index 19f8d20..c0e8844 100644 Binary files a/images/sample_size.png and b/images/sample_size.png differ diff --git a/images/sample_size.svg b/images/sample_size.svg index ecbec72..f3ccd45 100644 --- a/images/sample_size.svg +++ b/images/sample_size.svg @@ -9,2487 +9,2745 @@ - + - - + + - - - - - - 0 - - + - - + + - - - - - - 100 - - + - - + + - - - - - - 200 - - + - - + + - - - - - - 300 - - + - - + + - - - - - - 400 - - + - - + + - - - - - - 500 - - + - - + + - - - - - - 1000 + > 600 - - - - - - - - - - - - - - - - - - - - - + - - + + - - - - - - 10000 + > 0.1 - - - - - - - - - - - - - - - - - +font-family="Ubuntu Medium" font-size="15" font-weight="400" font-style="normal" +> + + + + + + + + + + + + + + + + - + - - + + - - - - - - 100000 + > 1 - - - - - - - - - - - - - - - - - +font-family="Ubuntu Medium" font-size="15" font-weight="400" font-style="normal" +> + + + + + + + + + + + + + + + + - + - - + + - - - - - - 1x10 + > 10 - - - + + + + + + + + + + + + + + + + + + + + - + + - - - -6 - - - - + 100 - - - - - - - - - - - - - - - - - +font-family="Ubuntu Medium" font-size="15" font-weight="400" font-style="normal" +> + + + + + + + + + + + + + + + + - + - - + + - - - - - - 1x10 + > 1000 - - - + + + + + + + + + + + + + + + + + + + + - + + - - - -7 - - - - + 10000 - - - - - - - - - - - - - - - - - +font-family="Ubuntu Medium" font-size="15" font-weight="400" font-style="normal" +> + + + + + + + + + + + + + + + + - + - - + + - - - - - - 1x10 + > 100000 - - - + + + + + + + + + + + + + + + + + + + + - + + - - - -8 - - - - + 1x10 - - - - - - - - +6 - - - -Clock ticks - - - + + + + - - - - + + + + + + + + + averaged over - - + - + - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + - - - - - - sample size - - + - + - - - - - - - - - - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - run time [ms] + >total sample generation time [ms] - - + - + - - - - - - - - - - + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + + + + + + + + + + + + + - + + + + + + + + + + + + + + + + + + + +Single Run time [us] + + + diff --git a/images/single_report.png b/images/single_report.png new file mode 100644 index 0000000..e04cdce Binary files /dev/null and b/images/single_report.png differ diff --git a/lib/benchmark_runner.dart b/lib/benchmark_runner.dart index 9a235d3..66ba76c 100644 --- a/lib/benchmark_runner.dart +++ b/lib/benchmark_runner.dart @@ -1,13 +1,17 @@ export 'src/base/async_benchmark.dart'; export 'src/base/benchmark.dart'; -export 'src/extensions/color_profile.dart'; -export 'src/extensions/duration_formatter.dart'; -export 'src/extensions/histogram.dart'; -export 'src/extensions/precision.dart'; -export 'src/extensions/string_utils.dart' hide benchmarkError, addErrorMark; -export 'src/base/group.dart'; export 'src/base/benchmark_process_result.dart'; -export 'src/enums/exit_code.dart'; -export 'src/utils/file_utils.dart'; -export 'src/utils/stats.dart'; -export 'src/extensions/benchmark_helper.dart'; +export 'src/emitter/score_emitter.dart'; +export 'src/base/group.dart'; +export 'src/base/score.dart'; +export 'src/command/benchmark_runner.dart'; +export 'src/command/export_command.dart'; +export 'src/enum/exit_code.dart'; +export 'src/extension/benchmark_helper.dart'; +export 'src/extension/color_profile.dart'; +export 'src/extension/duration_formatter.dart'; +export 'src/extension/histogram.dart'; +export 'src/extension/precision.dart'; +export 'src/extension/string_utils.dart' hide benchmarkError, addErrorMark; +export 'src/util/file_utils.dart'; +export 'src/util/stats.dart'; diff --git a/lib/src/base/async_benchmark.dart b/lib/src/base/async_benchmark.dart index 24a9f7f..4597641 100644 --- a/lib/src/base/async_benchmark.dart +++ b/lib/src/base/async_benchmark.dart @@ -2,235 +2,68 @@ import 'dart:async'; import 'dart:isolate'; import 'package:ansi_modifier/ansi_modifier.dart'; -import 'package:benchmark_harness/benchmark_harness.dart' - show AsyncBenchmarkBase; -import '../extensions/benchmark_helper.dart'; -import '../extensions/color_profile.dart'; -import '../extensions/duration_formatter.dart'; -import '../extensions/string_utils.dart'; -import '../utils/stats.dart'; -import 'color_print_emitter.dart'; +import '../emitter/score_emitter.dart'; +import '../extension/color_profile.dart'; +import '../extension/string_utils.dart'; +import 'async_score_generator.dart'; import 'group.dart'; -import 'score.dart'; -typedef AsyncFunction = Future Function(); - -/// An asynchronous function that does nothing. -Future futureDoNothing() async {} - -/// A class used to benchmark asynchronous functions. -/// The benchmarked function is provided as a constructor argument. -class AsyncBenchmark extends AsyncBenchmarkBase { - /// Constructs an [AsyncBenchmark] object using the following arguments: - /// * [description]: a [String] describing the benchmark, - /// * [run]: the asynchronous function to be benchmarked, - /// * [setup]: an asynchronous function that is executed - /// once before running the benchmark, - /// * [teardown]: an asynchronous function that is executed once after - /// the benchmark has completed. - const AsyncBenchmark({ - required String description, - required AsyncFunction run, - AsyncFunction? setup, - AsyncFunction? teardown, - ColorPrintEmitter emitter = const ColorPrintEmitter(), - }) : _run = run, - _setup = setup ?? futureDoNothing, - _teardown = teardown ?? futureDoNothing, - super(description, emitter: emitter); - - // static void main() { - // const GenericBenchmark().report(); - // } - - final AsyncFunction _run; - final AsyncFunction _setup; - final AsyncFunction _teardown; - - // The benchmark code. - @override - Future run() => _run(); - - // Not measured setup code executed prior to the benchmark runs. - @override - Future setup() => _setup(); - - // Not measures teardown code executed after the benchmark runs. - @override - Future teardown() => _teardown(); - - // To opt into the reporting the time per run() instead of per 10 run() calls. - @override - Future exercise() => run(); - - /// Returns the benchmark description (corresponds to the getter name). - String get description => name; - - /// Runs [measure] and emits the score and benchmark runtime. - @override - Future report() async { - final watch = Stopwatch()..start(); - final score = await measure(); - final runtime = watch.elapsed.msus.style(ColorProfile.dim); - emitter.emit('$runtime $description', score); - } - - /// Returns a sample of benchmark scores. - Future<({List scores, int innerIter})> sample() async { - await _setup(); - int warmupRuns = 3; - final sample = []; - final innerIters = []; - final overhead = []; - final watch = Stopwatch(); - var innerIterMean = 1; - - try { - // Warmup (Default: For 200 ms with 3 pre-runs). - final scoreEstimate = await watch.warmupAsync(_run); - final sampleSize = BenchmarkHelper.sampleSize( - scoreEstimate.ticks, - ); - - if (sampleSize.inner > 1) { - final durationAsTicks = sampleSize.inner * scoreEstimate.ticks; - for (var i = 0; i < sampleSize.outer + warmupRuns; i++) { - // Averaging each score over at least 25 runs. - // For details see function BenchmarkHelper.sampleSize. - final score = await watch.measureAsync( - _run, - durationAsTicks, - ); - sample.add(score.ticks); - innerIters.add(score.iter); - } - innerIterMean = innerIters.reduce((sum, element) => sum + element) ~/ - innerIters.length; - } else { - for (var i = 0; i < sampleSize.outer + warmupRuns; i++) { - watch.reset(); - await _run(); - // These scores are not averaged. - sample.add(watch.elapsedTicks); - watch.reset(); - overhead.add(watch.elapsedTicks); - } - for (var i = 0; i < sampleSize.outer; i++) { - // Removing overhead of calling elapsedTicks and adding list element. - // overhead scores are of the order of 0.1 us. - sample[i] = sample[i] - overhead[i]; - } - } - - // Rescale to microseconds. - // Note: frequency is expressed in Hz (ticks/second). - return ( - scores: sample - .map( - (e) => e * (1000000 / watch.frequency), - ) - .skip(warmupRuns) - .toList(), - innerIter: innerIterMean - ); - } finally { - await _teardown(); - } - } - - /// Returns a record holding the total benchmark duration - /// and a [Stats] object created from the score samples. - Future score() async { - final watch = Stopwatch()..start(); - final sample = await this.sample(); - watch.stop(); - //stats.removeOutliers(10); - return Score( - runtime: watch.elapsed, - sample: sample.scores, - innerIter: sample.innerIter, - ); - } - - /// Emits score statistics. - Future reportStats() async { - (emitter as ColorPrintEmitter).emitStats( - description: description, - score: await score(), - ); - } -} - -/// Defines an asynchronous benchmark. -/// * `run`: the benchmarked function, -/// * `setup`: exectued once before the benchmark, -/// * `teardown`: executed once after the benchmark runs. -/// * `emitStats`: Set to `false` to emit score as provided by benchmark_harness. -/// * `runInIsolate`: Set to `true` to run benchmarks in an isolate. +/// Runs an asynchronous benchmark. +/// * [run]: the benchmarked function, +/// * [setup]: executed once before the benchmark, +/// * [teardown]: executed once after the benchmark runs. +/// * [runInIsolate]: Set to `true` to run benchmark in a +/// separate isolate. +/// * [scoreEmitter]: A custom score emitter. +/// * [report]: A callback that calls the custom emitter. Future asyncBenchmark( String description, Future Function() run, { - Future Function()? setup, - Future Function()? teardown, - bool emitStats = true, + Future Function() setup = futureDoNothing, + Future Function() teardown = futureDoNothing, + ScoreEmitter scoreEmitter = const StatsEmitter(), bool runInIsolate = true, }) async { final group = Zone.current[#group] as Group?; final groupDescription = group == null ? '' : '${group.description.addSeparator(':')} '; - final instance = AsyncBenchmark( - description: groupDescription + - (hourGlass + description).style( - ColorProfile.asyncBenchmark, - ), + final scoreGenerator = AsyncScoreGenerator( run: run, setup: setup, teardown: teardown, ); + + description = + groupDescription + + (hourGlass + description).style(ColorProfile.asyncBenchmark); + final watch = Stopwatch()..start(); await runZonedGuarded( () async { try { - switch ((emitStats, runInIsolate)) { - case (true, true): - - /// Run method sample() in an isolate. - final score = await Isolate.run(instance.score); - (instance.emitter as ColorPrintEmitter).emitStats( - description: instance.description, - score: score, - ); - addSuccessMark(); - break; - case (true, false): - await instance.reportStats(); - addSuccessMark(); - break; - case (false, true): - - /// Run method measure() in an isolate. - final watch = Stopwatch()..start(); - final score = await Isolate.run(instance.measure); - final runtime = watch.elapsed.ssms.style(ColorProfile.dim); - instance.emitter.emit( - '$runtime ${instance.description}', - score, - ); - addSuccessMark(); - break; - case (false, false): - await instance.report(); - addSuccessMark(); + if (runInIsolate) { + await Isolate.run( + () async => scoreEmitter.emit( + description: description, + score: await scoreGenerator.score(), + ), + ); + } else { + scoreEmitter.emit( + description: description, + score: await scoreGenerator.score(), + ); } + addSuccessMark(); } catch (error, stack) { reportError( error, stack, - description: instance.description, - runtime: watch.elapsed, + description: description, + duration: watch.elapsed, errorMark: benchmarkError, ); } @@ -240,8 +73,8 @@ Future asyncBenchmark( reportError( error, stack, - description: instance.description, - runtime: watch.elapsed, + description: description, + duration: watch.elapsed, errorMark: benchmarkError, ); }), diff --git a/lib/src/base/async_score_generator.dart b/lib/src/base/async_score_generator.dart new file mode 100644 index 0000000..3f51a06 --- /dev/null +++ b/lib/src/base/async_score_generator.dart @@ -0,0 +1,134 @@ +import 'dart:async'; + +import '../extension/benchmark_helper.dart'; +import '../util/stats.dart'; +import 'score.dart'; + +typedef AsyncFunction = Future Function(); + +/// An asynchronous function that does nothing. +Future futureDoNothing() async {} + +/// A class used to benchmark asynchronous functions. +/// The benchmarked function is provided as a constructor argument. +class AsyncScoreGenerator { + /// Constructs an [AsyncScoreGenerator] object using the following arguments: + + /// * [run]: the asynchronous function to be benchmarked, + /// * [setup]: an asynchronous function that is executed + /// once before running the benchmark, + /// * [teardown]: an asynchronous function that is executed once after + /// the benchmark has completed. + const AsyncScoreGenerator({ + required AsyncFunction run, + AsyncFunction setup = futureDoNothing, + AsyncFunction teardown = futureDoNothing, + }) : _run = run, + _setup = setup, + _teardown = teardown; + + final AsyncFunction _run; + final AsyncFunction _setup; + final AsyncFunction _teardown; + + // The benchmark code. + Future run() => _run(); + + // Not measured setup code executed prior to the benchmark runs. + Future setup() => _setup(); + + // Not measures teardown code executed after the benchmark runs. + Future teardown() => _teardown(); + + // To opt into the reporting the time per run() instead of per 10 run() calls. + Future exercise() => run(); + + /// Returns a sample of benchmark scores. + /// The benchmark scores represent the run time in microseconds. The integer + /// `innerIter` is larger than 1 if each score entry was averaged over + /// `innerIter` runs. + /// + Future<({List scores, int innerIter})> sample({ + final int warmUpRuns = 3, + final Duration warmUpDuration = const Duration(milliseconds: 200), + }) async { + await _setup(); + final sample = []; + final innerIters = []; + final overhead = []; + final watch = Stopwatch(); + int innerIterMean = 1; + + try { + // Warmup (Default: For 200 ms with 3 pre-runs). + final scoreEstimate = await watch.warmUpAsync( + _run, + duration: warmUpDuration, + warmUpRuns: warmUpRuns, + ); + final sampleSize = BenchmarkHelper.sampleSize(scoreEstimate.ticks); + + if (sampleSize.inner > 1) { + final durationAsTicks = sampleSize.inner * scoreEstimate.ticks; + for (var i = 0; i < sampleSize.outer + warmUpRuns; i++) { + // Averaging each score over approx. sampleSize.inner runs. + // For details see function BenchmarkHelper.sampleSize. + final score = await watch.measureAsync(_run, durationAsTicks); + sample.add(score.ticks); + innerIters.add(score.iter); + } + innerIterMean = + innerIters.reduce((sum, element) => sum + element) ~/ + innerIters.length; + } else { + for (var i = 0; i < sampleSize.outer + warmUpRuns; i++) { + watch.reset(); + await _run(); + // These scores are not averaged. + sample.add(watch.elapsedTicks); + watch.reset(); + overhead.add(watch.elapsedTicks); + } + for (var i = 0; i < sampleSize.outer; i++) { + // Removing overhead of calling elapsedTicks and adding list element. + // overhead scores are of the order of 0.1 us. + sample[i] = sample[i] - overhead[i]; + } + } + + // Rescale to microseconds. + // Note: frequency is expressed in Hz (ticks/second). + return ( + scores: + sample + .map((e) => e * (1000000 / watch.frequency)) + .skip(warmUpRuns) + .toList(), + innerIter: innerIterMean, + ); + } finally { + await _teardown(); + } + } + + /// Returns an instance of [Score] holding the total benchmark duration + /// and a [Stats] object created from the score sample. + /// Note: The run time entries represent microseconds. + Future score({ + final int warmUpRuns = 3, + final Duration warmUpDuration = const Duration(microseconds: 200), + }) async { + final watch = Stopwatch()..start(); + final sample = await this.sample( + warmUpDuration: warmUpDuration, + warmUpRuns: warmUpRuns, + ); + watch.stop(); + //stats.removeOutliers(10); + return Score( + duration: watch.elapsed, + sample: sample.scores, + innerIter: sample.innerIter, + ); + } +} diff --git a/lib/src/base/benchmark.dart b/lib/src/base/benchmark.dart index 0a2b339..adc282e 100644 --- a/lib/src/base/benchmark.dart +++ b/lib/src/base/benchmark.dart @@ -1,184 +1,41 @@ import 'dart:async'; import 'package:ansi_modifier/ansi_modifier.dart'; -import 'package:benchmark_harness/benchmark_harness.dart' show BenchmarkBase; -import '../extensions/benchmark_helper.dart'; -import '../extensions/color_profile.dart'; -import '../extensions/duration_formatter.dart'; -import '../extensions/string_utils.dart'; -import '../utils/stats.dart'; -import 'color_print_emitter.dart'; +import '../emitter/score_emitter.dart'; +import '../extension/color_profile.dart'; +import '../extension/string_utils.dart'; import 'group.dart'; -import 'score.dart'; +import 'score_generator.dart'; -/// A synchronous function that does nothing. -void doNothing() {} - -/// A class used to benchmark synchronous functions. -/// The benchmarked function is provided as a constructor argument. -class Benchmark extends BenchmarkBase { - /// Constructs a [Benchmark] object using the following arguments: - /// * [description]: a [String] describing the benchmark, - /// * [run]: the synchronous function to be benchmarked, - /// * [setup]: a function that is executed once before running the benchmark, - /// * [teardown]: a function that is executed once after the benchmark has - /// completed. - const Benchmark({ - required String description, - required void Function() run, - void Function()? setup, - void Function()? teardown, - ColorPrintEmitter emitter = const ColorPrintEmitter(), - }) : _run = run, - _setup = setup ?? doNothing, - _teardown = teardown ?? doNothing, - super(description, emitter: emitter); - - final void Function() _run; - final void Function() _setup; - final void Function() _teardown; - - // The benchmark code. - @override - void run() => _run(); - - /// Not measured setup code executed prior to the benchmark runs. - @override - void setup() => _setup(); - - /// Not measures teardown code executed after the benchmark runs. - @override - void teardown() => _teardown(); - - /// To opt into the reporting the time per run() instead of per 10 run() calls. - @override - void exercise() => _run(); - - /// Returns the benchmark description (corresponds to the getter name). - String get description => name; - - ({List scores, int innerIter}) sample() { - _setup(); - final warmupRuns = 3; - final sample = []; - final innerIters = []; - final overhead = []; - final watch = Stopwatch(); - var innerIterMean = 1; - try { - // Warmup (Default: For 200 ms with 3 pre-runs). - final scoreEstimate = watch.warmup(_run); - final sampleSize = BenchmarkHelper.sampleSize( - scoreEstimate.ticks, - ); - - if (sampleSize.inner > 1) { - final durationAsTicks = sampleSize.inner * scoreEstimate.ticks; - for (var i = 0; i < sampleSize.outer + warmupRuns; i++) { - // Averaging each score over at least 25 runs. - // For details see function BenchmarkHelper.sampleSize. - final score = watch.measure( - _run, - durationAsTicks, - ); - sample.add(score.ticks); - innerIters.add(score.iter); - } - innerIterMean = innerIters.reduce((sum, element) => sum + element) ~/ - innerIters.length; - } else { - for (var i = 0; i < sampleSize.outer + warmupRuns; i++) { - watch.reset(); - _run(); - // These scores are not averaged. - sample.add(watch.elapsedTicks); - watch.reset(); - overhead.add(watch.elapsedTicks); - } - for (var i = 0; i < sampleSize.outer; i++) { - // Removing overhead of calling elapsedTicks and adding list element. - // overhead scores are of the order of 0.1 us. - sample[i] = sample[i] - overhead[i]; - } - } - - // Rescale to microseconds. - // Note: frequency is expressed in Hz (ticks/second). - return ( - scores: sample - .map( - (e) => e * (1000000 / watch.frequency), - ) - .skip(warmupRuns) - .toList(), - innerIter: innerIterMean - ); - } finally { - teardown(); - } - } - - /// Returns a [Score] object holding the total benchmark duration - /// and a [Stats] object created from the score samples. - Score score() { - final watch = Stopwatch()..start(); - final sample = this.sample(); - return Score( - runtime: watch.elapsed, - sample: sample.scores, - innerIter: sample.innerIter, - ); - } - - /// Runs the method [sample] and emits the benchmark score statistics. - void reportStats() { - //stats.removeOutliers(10); - (emitter as ColorPrintEmitter).emitStats( - description: description, - score: score(), - ); - } - - /// Runs the method [measure] and emits the benchmark score. - @override - void report() { - final watch = Stopwatch()..start(); - final score = measure(); - watch.stop(); - final runtime = watch.elapsed.msus.style(ColorProfile.dim); - emitter.emit('$runtime $description', score); - print(' '); - } -} - -/// Defines a benchmark for the synchronous function [run]. The benchmark -/// scores are emitted to stdout. +/// Runs a benchmark for the synchronous function [run]. +/// The benchmark scores are emitted to stdout. /// * `run`: the benchmarked function, /// * `setup`: exectued once before the benchmark, /// * `teardown`: executed once after the benchmark runs. -/// * `emitStats`: Set to `false` to emit score as provided by benchmark_harness. +/// * `report`: report to emit score as provided by benchmark_harness. +/// * `emitter`: An emitter for generating a custom benchmark report. +/// * `report`: A callback that can be used to call an emitter method. void benchmark( String description, void Function() run, { - void Function()? setup, - void Function()? teardown, - bool emitStats = true, + void Function() setup = doNothing, + void Function() teardown = doNothing, + ScoreEmitter scoreEmitter = const StatsEmitter(), + warmUpDuration = const Duration(), }) { final group = Zone.current[#group] as Group?; var groupDescription = group == null ? '' : '${group.description.addSeparator(':')} '; - final instance = Benchmark( - description: groupDescription + - description.style( - ColorProfile.benchmark, - ), + final scoreGenerator = ScoreGenerator( run: run, setup: setup, teardown: teardown, ); final watch = Stopwatch()..start(); + description = groupDescription + description.style(ColorProfile.benchmark); + try { if (run is Future Function()) { throw UnsupportedError('The callback "run" must not be marked async!'); @@ -187,8 +44,8 @@ void benchmark( reportError( error, stack, - description: instance.description, - runtime: watch.elapsed, + description: description, + duration: watch.elapsed, errorMark: benchmarkError, ); return; @@ -197,18 +54,17 @@ void benchmark( runZonedGuarded( () { try { - if (emitStats) { - instance.reportStats(); - } else { - instance.report(); - } + scoreEmitter.emit( + description: description, + score: scoreGenerator.score(), + ); addSuccessMark(); } catch (error, stack) { reportError( error, stack, - description: instance.description, - runtime: watch.elapsed, + description: description, + duration: watch.elapsed, errorMark: benchmarkError, ); } @@ -218,8 +74,8 @@ void benchmark( reportError( error, stack, - description: instance.description, - runtime: watch.elapsed, + description: description, + duration: watch.elapsed, errorMark: benchmarkError, ); }), diff --git a/lib/src/base/benchmark_process_result.dart b/lib/src/base/benchmark_process_result.dart index 21aedb4..f3ca0fc 100644 --- a/lib/src/base/benchmark_process_result.dart +++ b/lib/src/base/benchmark_process_result.dart @@ -1,22 +1,47 @@ +import 'dart:collection'; import 'dart:convert' show Encoding, Utf8Codec; -import 'dart:io' show ProcessResult, Process; +import 'dart:io' show File, Process, ProcessResult; import 'package:ansi_modifier/ansi_modifier.dart'; -import 'package:benchmark_runner/src/extensions/color_profile.dart'; -import 'package:benchmark_runner/src/extensions/duration_formatter.dart'; -import 'package:benchmark_runner/src/extensions/string_utils.dart'; +import '../enum/exit_code.dart'; +import '../extension/color_profile.dart'; +import '../extension/duration_formatter.dart'; +import '../extension/string_utils.dart'; -import '../enums/exit_code.dart'; - -/// A record holding: -/// * the name of the executable that was used to start the process, +/// A class holding: +/// * the name of the executable that was used to run the benchmark, /// * the list of arguments, /// * and the resulting [ProcessResult] object. -typedef BenchmarkProcessResult = ({ - String executable, - List arguments, - ProcessResult processResult -}); +class BenchmarkProcessResult { + BenchmarkProcessResult({ + required this.executable, + required List arguments, + required this.processResult, + required this.benchmarkFile, + }) : arguments = UnmodifiableListView(arguments); + + final String executable; + final UnmodifiableListView arguments; + final ProcessResult processResult; + final File benchmarkFile; + + /// Returns the command used to generate the benchmark scores. + /// + /// Set [isBrief] to true to strips the argument + /// `--define=isBenchmarkProcess=true`. + String command({bool isBrief = true}) { + final args = switch (isBrief) { + true => arguments + .where((arg) => arg != '--define=isBenchmarkProcess=true') + .join(' '), + false => arguments.join(' '), + }; + return executable + + (args.isEmpty + ? ' ${benchmarkFile.path}' + : ' $args ${benchmarkFile.path}'); + } +} /// A record holding: /// * an exit message, @@ -28,9 +53,10 @@ typedef ExitStatus = ({String message, ExitCode exitCode}); extension BenchmarkProcess on Process { /// Runs a benchmark and returns an instance of /// [BenchmarkProcessResult]. - static Future runBenchmark( - String executable, - List arguments, { + static Future runBenchmark({ + required String executable, + List arguments = const [], + required File benchmarkFile, String? workingDirectory, Map? environment, bool includeParentEnvironment = true, @@ -38,27 +64,27 @@ extension BenchmarkProcess on Process { Encoding? stdoutEncoding = const Utf8Codec(), // Enables histogram output Encoding? stderrEncoding = const Utf8Codec(), // on windows. }) { - return Process.run(executable, arguments, - workingDirectory: workingDirectory, - environment: environment, - includeParentEnvironment: includeParentEnvironment, - runInShell: runInShell, - stdoutEncoding: stdoutEncoding, - stderrEncoding: stderrEncoding) - .then((processResult) { - return ( + return Process.run( + executable, + [...arguments, benchmarkFile.path], + workingDirectory: workingDirectory, + environment: environment, + includeParentEnvironment: includeParentEnvironment, + runInShell: runInShell, + stdoutEncoding: stdoutEncoding, + stderrEncoding: stderrEncoding, + ).then((processResult) { + return BenchmarkProcessResult( executable: executable, arguments: arguments, processResult: processResult, + benchmarkFile: benchmarkFile, ); }); } } extension BenchmarkUtils on BenchmarkProcessResult { - /// Returns the command and the options used to start this process. - String get command => '$executable ${arguments.join(' ')}'; - /// Returns the process exit code. int get exitCode => processResult.exitCode; @@ -81,7 +107,7 @@ extension BenchmarkUtils on BenchmarkProcessResult { } /// Standard output from the process as [String]. - String get stdout => (processResult.stdout as String); + String get stdout => (processResult.stdout as String).trimRight(); /// Returns the standard error output from the process. String get stderr => (processResult.stderr as String) @@ -104,9 +130,7 @@ extension BenchmarkUtils on BenchmarkProcessResult { final out = StringBuffer(); out.writeln('------- Summary -------- '.style(ColorProfile.dim)); - out.writeln('Total run time: ${duration.ssms.style( - ColorProfile.success, - )}'); + out.writeln('Total run time: ${duration.ssms.style(ColorProfile.success)}'); for (final result in results) { numberOfFailedBenchmarks += result.numberOfFailedBenchmarks; @@ -114,50 +138,58 @@ extension BenchmarkUtils on BenchmarkProcessResult { numberOfCompletedBenchmarks += result.numberOfCompletedBenchmarks; } - out.writeln('Completed benchmarks: ' - '${numberOfCompletedBenchmarks.toString().style( - ColorProfile.success, - )}.'); + out.writeln( + 'Completed benchmarks: ' + '${numberOfCompletedBenchmarks.toString().style(ColorProfile.success)}.', + ); if (numberOfFailedBenchmarks > 0) { - out.writeln('Benchmarks with errors: ' - '${numberOfFailedBenchmarks.toString().style(ColorProfile.error)}.'); + out.writeln( + 'Benchmarks with errors: ' + '${numberOfFailedBenchmarks.toString().style(ColorProfile.error)}.', + ); exitCode = ExitCode.someBenchmarksFailed; } if (numberOfFailedGroups > 0) { - out.writeln('Groups with errors: ' - '${numberOfFailedGroups.toString().style(ColorProfile.error)}.\n' - 'Some benchmarks may have been skipped!'); + out.writeln( + 'Groups with errors: ' + '${numberOfFailedGroups.toString().style(ColorProfile.error)}.\n' + 'Some benchmarks may have been skipped!', + ); exitCode = ExitCode.someGroupsFailed; } if ((numberOfFailedBenchmarks > 0 || numberOfFailedGroups > 0) && !isVerbose) { - out.writeln('Try using the option ' - '${'--verbose'.style(ColorProfile.emphasize + Ansi.yellow)} or ' - '${'-v'.style(ColorProfile.emphasize + Ansi.yellow)} ' - 'for more details.'); + out.writeln( + 'Try using the option ' + '${'--verbose'.style(ColorProfile.emphasize + Ansi.yellow)} or ' + '${'-v'.style(ColorProfile.emphasize + Ansi.yellow)} ' + 'for more details.', + ); } switch (exitCode) { case ExitCode.someBenchmarksFailed: - out.writeln('Exiting with code ' - '${ExitCode.someBenchmarksFailed.code}: ' - '${ExitCode.someBenchmarksFailed.description.style( - ColorProfile.error, - )}'); + out.writeln( + 'Exiting with code ' + '${ExitCode.someBenchmarksFailed.code}: ' + '${ExitCode.someBenchmarksFailed.description.style(ColorProfile.error)}', + ); break; case ExitCode.allBenchmarksExecuted: - out.writeln('${'Completed successfully.'.style(ColorProfile.success)}\n' - 'Exiting with code: 0.'); + out.writeln( + '${'Completed successfully.'.style(ColorProfile.success)}\n' + 'Exiting with code: 0.', + ); break; case ExitCode.someGroupsFailed: - out.writeln('Exiting with code ' - '${ExitCode.someGroupsFailed.code}: ' - '${ExitCode.someGroupsFailed.description.style( - ColorProfile.error, - )}'); + out.writeln( + 'Exiting with code ' + '${ExitCode.someGroupsFailed.code}: ' + '${ExitCode.someGroupsFailed.description.style(ColorProfile.error)}', + ); break; default: } diff --git a/lib/src/base/color_print_emitter.dart b/lib/src/base/color_print_emitter.dart deleted file mode 100644 index 97fd24b..0000000 --- a/lib/src/base/color_print_emitter.dart +++ /dev/null @@ -1,56 +0,0 @@ -import 'package:ansi_modifier/ansi_modifier.dart'; -import 'package:benchmark_harness/benchmark_harness.dart'; -import '../extensions/histogram.dart'; -import '../extensions/precision.dart'; -import '../extensions/duration_formatter.dart'; -import '../extensions/color_profile.dart'; -import 'score.dart'; - -const plusMinus = '\u00B1'; - -class ColorPrintEmitter extends PrintEmitter { - const ColorPrintEmitter(); - - /// Prints a colorized benchmark score. - @override - void emit(String testName, double value) { - print('$testName(RunTime): ${'$value us.'.style(ColorProfile.mean)}\n'); - } - - /// Prints a colorized benchmark score report. - void emitStats({ - required String description, - required Score score, - }) { - //final indentCharacters = score.runtime.msus.length; - final indent = ' '; - final part1 = '${score.runtime.msus.style(ColorProfile.dim)} $description'; - - final mean = score.stats.mean / score.timeScale.factor; - final stdDev = score.stats.stdDev / score.timeScale.factor; - final median = score.stats.median / score.timeScale.factor; - final iqr = score.stats.iqr / score.timeScale.factor; - final unit = score.timeScale.unit; - - final part2 = '${indent}mean: ${mean.toStringAsFixedDigits()} $plusMinus ' - '${stdDev.toStringAsFixedDigits()} $unit, ' - .style(ColorProfile.mean) + - 'median: ${median.toStringAsFixedDigits()} $plusMinus ' - '${iqr.toStringAsFixedDigits()} $unit' - .style(ColorProfile.median); - - final part3 = '$indent${score.stats.blockHistogram()} ' - 'sample size: ${score.stats.sortedSample.length}'; - final part4 = - score.innerIter > 1 ? ' (averaged over ${score.innerIter} runs)' : ''; - - // final rulerLength = part1.clearStyle().length; - // final ruler = (' ' * (rulerLength~/4)).style(ColorProfile.dim); - // print(ruler); - print(part1); - print(part2); - print(part3 + part4.style(ColorProfile.dim)); - - print(''); - } -} diff --git a/lib/src/base/group.dart b/lib/src/base/group.dart index 63513be..940c7cb 100644 --- a/lib/src/base/group.dart +++ b/lib/src/base/group.dart @@ -2,8 +2,8 @@ import 'dart:async'; import 'package:ansi_modifier/ansi_modifier.dart'; -import '../extensions/color_profile.dart'; -import '../extensions/string_utils.dart'; +import '../extension/color_profile.dart'; +import '../extension/string_utils.dart'; class Group { const Group(this.description, this.body); @@ -20,11 +20,10 @@ class Group { final parentGroup = Zone.current[#group] as Group?; if (parentGroup != null) { throw UnsupportedError( - '${'Nested groups detected! '.style(ColorProfile.error)}' - 'Group ${description.style(ColorProfile.emphasize)} defined ' - 'within group ${parentGroup.description.style( - ColorProfile.emphasize, - )}'); + '${'Nested groups detected! '.style(ColorProfile.error)}' + 'Group ${description.style(ColorProfile.emphasize)} defined ' + 'within group ${parentGroup.description.style(ColorProfile.emphasize)}', + ); } } @@ -41,7 +40,7 @@ class Group { error, stack, description: description, - runtime: watch.elapsed, + duration: watch.elapsed, errorMark: groupErrorMark, ); } @@ -52,7 +51,7 @@ class Group { error, stack, description: description, - runtime: watch.elapsed, + duration: watch.elapsed, errorMark: groupErrorMark, ); }), @@ -64,39 +63,29 @@ class Group { void run() { _throwIfNested(); final watch = Stopwatch()..start(); - runZonedGuarded( - body, - ((error, stack) { - reportError( - error, - stack, - description: description, - runtime: watch.elapsed, - errorMark: groupErrorMark, - ); - }), - zoneValues: {#group: this}, - ); + runZonedGuarded(body, ((error, stack) { + reportError( + error, + stack, + description: description, + duration: watch.elapsed, + errorMark: groupErrorMark, + ); + }), zoneValues: {#group: this}); } } /// Defines a benchmark group. /// /// Note: Groups may not be nested. -FutureOr group( - String description, - FutureOr Function() body, -) async { +FutureOr group(String description, FutureOr Function() body) async { final isAsync = (body is Future Function()); if (isAsync) { description = hourGlass + description; } - final instance = Group( - description.style(ColorProfile.group), - body, - ); + final instance = Group(description.style(ColorProfile.group), body); if (isAsync) { return instance.runAsync(); } else { diff --git a/lib/src/base/score.dart b/lib/src/base/score.dart index f0a8bd6..a239298 100644 --- a/lib/src/base/score.dart +++ b/lib/src/base/score.dart @@ -1,15 +1,18 @@ -import '../utils/stats.dart'; +import '../util/stats.dart'; -/// Object holding sample stats and the sample generation runtime. +/// Object holding sample stats and the duration it took to generate the +/// score sample. class Score { - Score( - {required this.runtime, required List sample, required this.innerIter}) - : stats = Stats(sample); + Score({ + required this.duration, + required List sample, + required this.innerIter, + }) : stats = Stats(sample); - /// Micro-benchmark duration - final Duration runtime; + /// Measured micro-benchmark duration + final Duration duration; - /// Indicates if the a sample point was averaged over [iter] runs. + /// Indicates if the a sample entry was averaged over [iter] runs. final int innerIter; /// Scores and score stats (in microseconds). @@ -19,6 +22,6 @@ class Score { late final ({String unit, int factor}) timeScale = switch (stats.mean) { > 1000000 => (unit: 's', factor: 1000000), > 1000 => (unit: 'ms', factor: 1000), - _ => (unit: 'us', factor: 1) + _ => (unit: 'us', factor: 1), }; } diff --git a/lib/src/base/score_generator.dart b/lib/src/base/score_generator.dart new file mode 100644 index 0000000..f625539 --- /dev/null +++ b/lib/src/base/score_generator.dart @@ -0,0 +1,127 @@ +import '../extension/benchmark_helper.dart'; +import '../util/stats.dart'; +import 'score.dart'; + +/// A synchronous function that does nothing. +void doNothing() {} + +/// A class used to benchmark synchronous functions. +/// The benchmarked function is provided as a constructor argument. +class ScoreGenerator { + /// Constructs a [ScoreGenerator] object using the following arguments: + /// * [description]: a [String] describing the benchmark, + /// * [run]: the synchronous function to be benchmarked, + /// * [setup]: a function that is executed once before running the benchmark, + /// * [teardown]: a function that is executed once after the benchmark has + /// completed. + const ScoreGenerator({ + required void Function() run, + void Function() setup = doNothing, + void Function() teardown = doNothing, + }) : _run = run, + _setup = setup, + _teardown = teardown; + + final void Function() _run; + final void Function() _setup; + final void Function() _teardown; + + // The benchmark code. + void run() => _run(); + + /// Not measured setup code executed prior to the benchmark runs. + void setup() => _setup(); + + /// Not measures teardown code executed after the benchmark runs. + void teardown() => _teardown(); + + /// To opt into the reporting the time per run() instead of per 10 run() calls. + void exercise() => _run(); + + /// Generates a sample of benchmark scores. + /// * The benchmark score entries represent the run time in microseconds. + /// * The integer `innerIter` is larger than 1 + /// if each score entry was averaged over + /// `innerIter` runs. + ({List scores, int innerIter}) sample({ + final int warmUpRuns = 3, + final Duration warmUpDuration = const Duration(milliseconds: 200), + }) { + _setup(); + final sample = []; + final innerIters = []; + final overhead = []; + final watch = Stopwatch(); + // + int innerIterMean = 1; + try { + // Warmup (Default: For 200 ms with 3 pre-runs). + final scoreEstimate = watch.warmUp( + _run, + duration: warmUpDuration, + warmUpRuns: warmUpRuns, + ); + final sampleSize = BenchmarkHelper.sampleSize(scoreEstimate.ticks); + + if (sampleSize.inner > 1) { + final durationAsTicks = sampleSize.inner * scoreEstimate.ticks; + for (var i = 0; i < sampleSize.outer + warmUpRuns; i++) { + // Averaging each score over approx. sampleSize.inner runs. + // For details see function BenchmarkHelper.sampleSize. + final score = watch.measure(_run, durationAsTicks); + sample.add(score.ticks); + innerIters.add(score.iter); + } + innerIterMean = + innerIters.reduce((sum, element) => sum + element) ~/ + innerIters.length; + } else { + for (var i = 0; i < sampleSize.outer + warmUpRuns; i++) { + watch.reset(); + _run(); + // These scores are not averaged. + sample.add(watch.elapsedTicks); + watch.reset(); + overhead.add(watch.elapsedTicks); + } + for (var i = 0; i < sampleSize.outer; i++) { + // Removing overhead of calling elapsedTicks and adding list element. + // overhead scores are of the order of 0.1 us. + sample[i] = sample[i] - overhead[i]; + } + } + + // Rescale to microseconds. + // Note: frequency is expressed in Hz (ticks/second). + return ( + scores: + sample + .map((e) => e * (1000000 / watch.frequency)) + .skip(warmUpRuns) + .toList(), + innerIter: innerIterMean, + ); + } finally { + teardown(); + } + } + + /// Returns a [Score] object holding the total benchmark duration + /// and a [Stats] object created from the score sample. + /// Note: The run time entried represent durations in microseconds. + Score score({ + final int warmUpRuns = 3, + final Duration warmUpDuration = const Duration(microseconds: 200), + }) { + final watch = Stopwatch()..start(); + final sample = this.sample( + warmUpDuration: warmUpDuration, + warmUpRuns: warmUpRuns, + ); + return Score( + duration: watch.elapsed, + sample: sample.scores, + innerIter: sample.innerIter, + ); + } +} diff --git a/lib/src/command/benchmark_runner.dart b/lib/src/command/benchmark_runner.dart new file mode 100644 index 0000000..fcde665 --- /dev/null +++ b/lib/src/command/benchmark_runner.dart @@ -0,0 +1,41 @@ +import 'package:args/command_runner.dart'; + +import 'export_command.dart'; +import 'report_command.dart'; + +class BenchmarkRunner extends CommandRunner { + BenchmarkRunner._() + : super( + 'benchmark_runner', + 'Runs benchmarks. Prints and exports score reports.', + ); + + static BenchmarkRunner? _instance; + + @override + String? get usageFooter => + '\nNote: Benchmark files are dart files ending with \'_benchmark.dart\'.'; + + factory BenchmarkRunner() { + return _instance ?? BenchmarkRunner._() + ..addCommand(ExportCommand()) + ..addCommand(ReportCommand()) + ..argParser.addFlag( + 'verbose', + abbr: 'v', + defaultsTo: false, + negatable: false, + help: 'Enable to show more info and error messages.', + ) + ..argParser.addFlag( + 'isMonochrome', + abbr: 'm', + negatable: false, + help: 'Disables colorized reporting.', + ); + } + + @override + final String invocation = + 'dart run benchmark_runner [arguments] '; +} diff --git a/lib/src/command/export_command.dart b/lib/src/command/export_command.dart new file mode 100644 index 0000000..2870d14 --- /dev/null +++ b/lib/src/command/export_command.dart @@ -0,0 +1,121 @@ +// ignore_for_file: prefer_interpolation_to_compose_strings + +import 'dart:async'; +import 'dart:io'; + +import 'package:ansi_modifier/ansi_modifier.dart'; +import 'package:path/path.dart' as p; + +import '../base/benchmark_process_result.dart'; +import '../extension/color_profile.dart'; +import '../extension/path_helper.dart'; +import '../extension/string_utils.dart'; +import '../util/file_utils.dart'; +import 'report_command.dart'; + +class ExportCommand extends ReportCommand { + @override + String get name => 'export'; + + // @override + // final category = 'benchmark'; + + @override + String get description => + 'Exports benchmark scores. A custom file extension ' + 'and directory may be specified.'; + + static const _extension = 'extension'; + static const _outputDir = 'outputDir'; + + ExportCommand() { + argParser + ..addOption( + _extension, + abbr: 'e', + defaultsTo: 'txt', + help: 'Set file extension of exported files.', + ) + ..addOption( + _outputDir, + abbr: 'o', + defaultsTo: null, + help: 'Directory must exist. Score files will be written to it.', + ); + } + + @override + Future run() async { + final clock = Stopwatch()..start(); + + // Reading flags + final isVerbose = globalResults!.flag('verbose'); + final isMonochrome = globalResults!.flag('isMonochrome'); + + Ansi.status = isMonochrome ? AnsiOutput.disabled : AnsiOutput.enabled; + + final searchDirectory = + argResults!.rest.isEmpty ? 'benchmark' : argResults!.rest.first; + + final benchmarkFiles = await findBenchmarkFiles(); + + // Reading options + final outputDirectory = argResults!.option(_outputDir) ?? searchDirectory; + + // Starting processes. + final fResults = >[]; + for (final file in benchmarkFiles) { + fResults.add( + BenchmarkProcess.runBenchmark( + executable: 'dart', + arguments: [ + '--define=isBenchmarkProcess=true', + if (isVerbose) '--define=isVerbose=true', + if (isMonochrome) '--define=isMonochrome=true', + ], + benchmarkFile: file, + ), + ); + } + + // Start subscription to progress indicator. + final progressIndicator = progressIndicatorSubscription(); + + final results = await Future.wait(fResults); + + for (final result in results) { + print('\$ '.style(ColorProfile.dim) + result.command()); + if (isVerbose) { + print(result.stdout.indentLines(2, indentMultiplierFirstLine: 2)); + print('\n'); + } + + final outputFileName = p + .fromUri(result.benchmarkFile.uri) + .basename + .setExtension('.' + argResults!.option(_extension)!); + + final outputPath = outputDirectory.join(outputFileName); + + print('Writing scores to: '.style(ColorProfile.dim) + outputPath + '\n'); + + await writeTo(path: outputPath, contents: result.stdout); + + if (isVerbose) { + print(result.stderr.indentLines(4, indentMultiplierFirstLine: 4)); + } + } + + // Close subscription to progress indicator. + await progressIndicator.cancel(); + + final exitStatus = BenchmarkUtils.aggregatedExitStatus( + results: results, + duration: clock.elapsed, + isVerbose: isVerbose, + ); + + print(exitStatus.message); + exit(exitStatus.exitCode.code); + } +} diff --git a/lib/src/command/report_command.dart b/lib/src/command/report_command.dart new file mode 100644 index 0000000..e98c4f0 --- /dev/null +++ b/lib/src/command/report_command.dart @@ -0,0 +1,137 @@ +// ignore_for_file: prefer_interpolation_to_compose_strings +import 'dart:async'; +import 'dart:io'; + +import 'package:ansi_modifier/ansi_modifier.dart'; +import 'package:args/command_runner.dart'; + +import '../base/benchmark_process_result.dart'; +import '../enum/exit_code.dart'; +import '../extension/color_profile.dart'; +import '../extension/duration_formatter.dart'; +import '../extension/string_utils.dart'; +import '../util/file_utils.dart'; + +class ReportCommand extends Command { + @override + String get name => 'report'; + + @override + String get invocation => super.invocation + ' '; + + @override + String get description => + 'Runs benchmarks and prints a score report to stdout.'; + + StreamSubscription progressIndicatorSubscription() { + final stream = Stream.periodic( + const Duration(milliseconds: 250), + (i) => + 'Progress timer: '.style(ColorProfile.dim) + + Duration(milliseconds: i * 250).ssms.style(Ansi.green), + ); + const cursorToStartOfLine = Ansi.cursorToColumn(1); + + return stream.listen((event) { + stdout.write(cursorToStartOfLine); + stdout.write(event); + stdout.write(cursorToStartOfLine); + }); + } + + /// Attempts to find benchmark files and prints an error/success message. + /// * Uses `argResults!.rest.first` as path. + /// * If no path is provided, the directory `benchmark` is used. + Future> findBenchmarkFiles() async { + final searchDirectory = + argResults!.rest.isEmpty ? 'benchmark' : argResults!.rest.first; + + // Resolving test files. + final ( + benchmarkFiles: benchmarkFiles, + entityType: entityType, + ) = await resolveBenchmarkFiles(searchDirectory); + if (benchmarkFiles.isEmpty) { + print(''); + print( + 'Could not resolve any benchmark files using path: ' + '${searchDirectory.style(ColorProfile.highlight)}\n', + ); + exit(ExitCode.noBenchmarkFilesFound.index); + } else { + if (entityType == FileSystemEntityType.directory) { + print( + '\nFinding benchmark files in '.style(ColorProfile.dim) + + searchDirectory + + ' ...'.style(ColorProfile.dim), + ); + } else { + print('\nFinding benchmark files ... '.style(ColorProfile.dim)); + } + for (final file in benchmarkFiles) { + print(' ${file.path}'); + } + print(''); + } + return benchmarkFiles; + } + + @override + Future run() async { + final clock = Stopwatch()..start(); + + // Reading flags + final isVerbose = globalResults!.flag('verbose'); + final isMonochrome = globalResults!.flag('isMonochrome'); + + Ansi.status = isMonochrome ? AnsiOutput.disabled : AnsiOutput.enabled; + + final benchmarkFiles = await findBenchmarkFiles(); + + // Starting processes. + final fResults = >[]; + for (final file in benchmarkFiles) { + fResults.add( + BenchmarkProcess.runBenchmark( + executable: 'dart', + arguments: [ + '--define=isBenchmarkProcess=true', + if (isVerbose) '--define=isVerbose=true', + if (isMonochrome) '--define=isMonochrome=true', + ], + benchmarkFile: file, + ), + ); + } + + // Start subscription to progress indicator. + final progressIndicator = progressIndicatorSubscription(); + + // Printing benchmark scores. + for (final fResult in fResults) { + fResult.then((result) { + print('\$ '.style(ColorProfile.dim) + result.command()); + print(result.stdout.indentLines(2, indentMultiplierFirstLine: 2)); + print('\n'); + if (isVerbose) { + print(result.stderr.indentLines(4, indentMultiplierFirstLine: 4)); + } + }); + } + + // Close subscription to progress indicator. + final results = await Future.wait(fResults); + + await progressIndicator.cancel(); + + // Composing exit message. + final exitStatus = BenchmarkUtils.aggregatedExitStatus( + results: results, + duration: clock.elapsed, + isVerbose: isVerbose, + ); + + print(exitStatus.message); + exit(exitStatus.exitCode.code); + } +} diff --git a/lib/src/emitter/score_emitter.dart b/lib/src/emitter/score_emitter.dart new file mode 100644 index 0000000..3e66f0a --- /dev/null +++ b/lib/src/emitter/score_emitter.dart @@ -0,0 +1,71 @@ +// ignore_for_file: prefer_interpolation_to_compose_strings +import 'package:ansi_modifier/ansi_modifier.dart'; +import '../extension/histogram.dart'; +import '../extension/precision.dart'; +import '../extension/duration_formatter.dart'; +import '../extension/color_profile.dart'; +import '../base/score.dart'; + +const plusMinus = '\u00B1'; + +/// Implement this interface to create a custom score emitter. +abstract interface class ScoreEmitter { + void emit({required String description, required Score score}); +} + +/// Emits the mean of the benchmark scores. +/// The output generated is similar to the output generated by the +/// `PrintEmitter` of `benchmark_harness`. +class MeanEmitter implements ScoreEmitter { + const MeanEmitter(); + + /// Prints a colorized benchmark score. + @override + void emit({required String description, required Score score}) { + print( + '$description(RunTime): ' + + '${score.stats.mean.toStringAsFixedDigits()} us'.style( + ColorProfile.mean, + ) + + '\n', + ); + } +} + +/// Emits the benchmark duration, the score mean, score median, +/// and a score histogram. +class StatsEmitter implements ScoreEmitter { + const StatsEmitter(); + + /// Prints a colorized benchmark score report. + @override + void emit({required String description, required Score score}) { + //final indentCharacters = score.runtime.msus.length; + final indent = ' '; + final part1 = '${score.duration.msus.style(ColorProfile.dim)} $description'; + + final mean = score.stats.mean / score.timeScale.factor; + final stdDev = score.stats.stdDev / score.timeScale.factor; + final median = score.stats.median / score.timeScale.factor; + final iqr = score.stats.iqr / score.timeScale.factor; + final unit = score.timeScale.unit; + + final part2 = + '${indent}mean: ${mean.toStringAsFixedDigits()} $plusMinus ' + '${stdDev.toStringAsFixedDigits()} $unit, ' + .style(ColorProfile.mean) + + 'median: ${median.toStringAsFixedDigits()} $plusMinus ' + '${iqr.toStringAsFixedDigits()} $unit' + .style(ColorProfile.median); + + final part3 = + '$indent${score.stats.blockHistogram()} ' + 'sample size: ${score.stats.sortedSample.length}'; + final part4 = + score.innerIter > 1 ? ' (averaged over ${score.innerIter} runs)' : ''; + print(part1); + print(part2); + print(part3 + part4.style(ColorProfile.dim)); + print(''); + } +} diff --git a/lib/src/enums/exit_code.dart b/lib/src/enum/exit_code.dart similarity index 100% rename from lib/src/enums/exit_code.dart rename to lib/src/enum/exit_code.dart diff --git a/lib/src/extensions/benchmark_helper.dart b/lib/src/extension/benchmark_helper.dart similarity index 67% rename from lib/src/extensions/benchmark_helper.dart rename to lib/src/extension/benchmark_helper.dart index 8ea6df6..778d8a3 100644 --- a/lib/src/extensions/benchmark_helper.dart +++ b/lib/src/extension/benchmark_helper.dart @@ -1,7 +1,11 @@ import 'dart:math' show exp, log; +import 'package:exception_templates/exception_templates.dart'; + typedef SampleSizeEstimator = ({int outer, int inner}) Function(int clockTicks); +class TimeError extends ErrorType {} + extension BenchmarkHelper on Stopwatch { /// Measures the runtime of [f] for [ticks] clock ticks and /// reports the average runtime expressed as clock ticks. @@ -19,7 +23,9 @@ extension BenchmarkHelper on Stopwatch { /// Measures the runtime of [f] for [ticks] clock ticks and /// reports the average runtime expressed as clock ticks. Future<({int ticks, int iter})> measureAsync( - Future Function() f, int ticks) async { + Future Function() f, + int ticks, + ) async { var iter = 0; reset(); start(); @@ -32,14 +38,14 @@ extension BenchmarkHelper on Stopwatch { /// Measures the runtime of [f] for [duration] and /// reports the average runtime expressed as clock ticks. - ({int ticks, int iter}) warmup( + ({int ticks, int iter}) warmUp( void Function() f, { Duration duration = const Duration(milliseconds: 200), - int preRuns = 3, + int warmUpRuns = 3, }) { - var ticks = microsecondsToTicks(duration.inMicroseconds); - var iter = 0; - for (var i = 0; i < preRuns; i++) { + int ticks = durationToTicks(duration); + int iter = 0; + for (var i = 0; i < warmUpRuns; i++) { f(); } reset(); @@ -53,15 +59,15 @@ extension BenchmarkHelper on Stopwatch { /// Measures the runtime of [f] for [duration] and /// reports the average runtime expressed as clock ticks. - Future<({int ticks, int iter})> warmupAsync( + Future<({int ticks, int iter})> warmUpAsync( Future Function() f, { Duration duration = const Duration(milliseconds: 200), - int preRuns = 3, + int warmUpRuns = 3, }) async { - var ticks = microsecondsToTicks(duration.inMicroseconds); - var iter = 0; + int ticks = durationToTicks(duration); + int iter = 0; reset(); - for (var i = 0; i < preRuns; i++) { + for (var i = 0; i < warmUpRuns; i++) { await f(); } start(); @@ -78,6 +84,10 @@ extension BenchmarkHelper on Stopwatch { /// Converts clock [ticks] to seconds. static double ticksToSeconds(int ticks) => ticks / BenchmarkHelper.frequency; + /// Convert [duration] to clock ticks. + static int durationToTicks(Duration duration) => + microsecondsToTicks(duration.inMicroseconds); + /// Converts clock [ticks] to microseconds. static double ticksToMicroseconds(int ticks) => ticks / (BenchmarkHelper.frequency / 1000000); @@ -97,29 +107,9 @@ extension BenchmarkHelper on Stopwatch { static int microsecondsToTicks(int microseconds) => microseconds * frequency ~/ 1000000; - // /// Returns a record with type `({int outer, int inner})` - // /// holding the benchmark sample size `.outer` and the - // /// number of runs each score is averaged over: `.inner`. - // static ({int outer, int inner}) iterations(int clockTicks) => - // switch (clockTicks) { - // < 1000 => (outer: 100, inner: 300), - // < 10000 => (outer: 100, inner: 200), - // < 100000 => (outer: 100, inner: 25), - // < 1000000 => (outer: 100, inner: 10), - // < 10000000 => (outer: 100, inner: 0), - // < 100000000 => (outer: 20, inner: 0), - // _ => (outer: 10, inner: 0), - // }; - /// Returns the result of the linear interpolation between the /// points (x1,y1) and (x2, y2). - static double interpolateLin( - num x, - num x1, - num y1, - num x2, - num y2, - ) => + static double interpolateLin(num x, num x1, num y1, num x2, num y2) => y1 + ((y2 - y1) * (x - x1) / (x2 - x1)); /// Returns the result of the exponential interpolation between the @@ -138,10 +128,19 @@ extension BenchmarkHelper on Stopwatch { /// * number of runs each score is averaged over: `.inner`. /// /// Note: An estimate of the benchmark runtime in clock ticks is given by - /// `outer*inner*clockTicks`. The estimate does not include any setup, warmup, or - /// teardown functionality. + /// `outer*inner*clockTicks`. The estimate does not include any setup, + /// warm-up, or teardown functionality. static ({int outer, int inner}) sampleSizeDefault(int clockTicks) { // Estimates for the averaging used within `measure` and `measureAsync. + + if (clockTicks < 1) { + throw ErrorOfType( + message: 'Unsuitable duration detected.', + expectedState: 'clockTicks > 0', + invalidState: 'clockTicks: $clockTicks', + ); + } + const i1e3 = 200; const i1e4 = 100; const i1e5 = 15; @@ -165,35 +164,44 @@ extension BenchmarkHelper on Stopwatch { const t1e7 = 10000000; // 10 ms; const t1e8 = 100000000; // 100 ms; + // Rescale clock ticks for other platforms. For example, on the web + // 1 clock tick corresponds to 1 microsecond. + if (frequency < 1e9) { + clockTicks = 1e9 ~/ frequency * clockTicks; + } + return switch (clockTicks) { - <= t1e3 => ( - outer: s1e3, - inner: i1e3, - ), // 1 us + <= t1e3 => (outer: s1e3, inner: i1e3), // 1 us > t1e3 && <= t1e4 => ( - // 10 us - outer: interpolateExp(clockTicks, t1e3, s1e3, t1e4, s1e4).ceil(), - inner: interpolateExp(clockTicks, t1e3, i1e3, t1e4, i1e4).ceil() - ), + // 10 us + outer: interpolateExp(clockTicks, t1e3, s1e3, t1e4, s1e4).ceil(), + inner: interpolateExp(clockTicks, t1e3, i1e3, t1e4, i1e4).ceil(), + ), > t1e4 && <= t1e5 => ( - // 100 us - outer: interpolateExp(clockTicks, t1e4, s1e4, t1e5, s1e5).ceil(), - inner: interpolateExp(clockTicks, t1e4, i1e4, t1e5, i1e5).ceil() - ), + // 100 us + outer: interpolateExp(clockTicks, t1e4, s1e4, t1e5, s1e5).ceil(), + inner: interpolateExp(clockTicks, t1e4, i1e4, t1e5, i1e5).ceil(), + ), > t1e5 && <= t1e6 => ( - // 1ms - outer: interpolateExp(clockTicks, t1e5, s1e5 * i1e5 / 2, t1e6, s1e6) - .ceil(), - inner: i1e6, - ), + // 1ms + outer: + interpolateExp( + clockTicks, + t1e5, + s1e5 * i1e5 / 2, + t1e6, + s1e6, + ).ceil(), + inner: i1e6, + ), > t1e6 && <= t1e7 => ( - outer: interpolateExp(clockTicks, t1e6, s1e6, t1e7, s1e7).ceil(), - inner: i1e7 - ), // 10 ms + outer: interpolateExp(clockTicks, t1e6, s1e6, t1e7, s1e7).ceil(), + inner: i1e7, + ), // 10 ms > t1e7 && <= t1e8 => ( - outer: interpolateExp(clockTicks, t1e7, s1e7, t1e8, s1e8).ceil(), - inner: i1e8 - ), // 100 ms + outer: interpolateExp(clockTicks, t1e7, s1e7, t1e8, s1e8).ceil(), + inner: i1e8, + ), // 100 ms _ => (outer: s1e8, inner: i1e8), }; } diff --git a/lib/src/extensions/color_profile.dart b/lib/src/extension/color_profile.dart similarity index 100% rename from lib/src/extensions/color_profile.dart rename to lib/src/extension/color_profile.dart diff --git a/lib/src/extensions/duration_formatter.dart b/lib/src/extension/duration_formatter.dart similarity index 100% rename from lib/src/extensions/duration_formatter.dart rename to lib/src/extension/duration_formatter.dart diff --git a/lib/src/extensions/histogram.dart b/lib/src/extension/histogram.dart similarity index 88% rename from lib/src/extensions/histogram.dart rename to lib/src/extension/histogram.dart index 7215dca..8c18ed8 100644 --- a/lib/src/extensions/histogram.dart +++ b/lib/src/extension/histogram.dart @@ -1,6 +1,6 @@ -import '../utils/stats.dart'; -import '../extensions/root.dart'; -import '../extensions/color_profile.dart'; +import '../util/stats.dart'; +import 'root.dart'; +import 'color_profile.dart'; import 'dart:math' as math show min, max; import 'package:ansi_modifier/ansi_modifier.dart'; @@ -65,32 +65,32 @@ extension Histogram on Stats { static final blocks = switch (Ansi.status) { AnsiOutput.enabled => [ - '_'.style(ColorProfile.dim), - '_', - // '\u2581'.colorize(AnsiModifier.grey), - '\u2581', - // '\u2582'.colorize(AnsiModifier.grey), - '\u2582', - '\u2583', - '\u2584', - '\u2585', - '\u2586', - '\u2587', - '\u2588', - '\u2589', - ], + '_'.style(ColorProfile.dim), + '_', + // '\u2581'.colorize(AnsiModifier.grey), + '\u2581', + // '\u2582'.colorize(AnsiModifier.grey), + '\u2582', + '\u2583', + '\u2584', + '\u2585', + '\u2586', + '\u2587', + '\u2588', + '\u2589', + ], AnsiOutput.disabled => [ - '_', - '\u2581', - '\u2582', - '\u2583', - '\u2584', - '\u2585', - '\u2586', - '\u2587', - '\u2588', - '\u2589', - ], + '_', + '\u2581', + '\u2582', + '\u2583', + '\u2584', + '\u2585', + '\u2586', + '\u2587', + '\u2588', + '\u2589', + ], }; /// Returns a block histogram in the form of a [String]. @@ -114,10 +114,7 @@ extension Histogram on Stats { /// ▉▂__________________ 177 ____________________ /// /// - String blockHistogram({ - bool normalize = false, - int intervalNumber = 0, - }) { + String blockHistogram({bool normalize = false, int intervalNumber = 0}) { intervalNumber = intervalNumber < 2 ? intervalNumberFreedman : intervalNumber; @@ -142,8 +139,9 @@ extension Histogram on Stats { counts[i] = counts[i] / (sampleSize * intervalSize); } - final countsMax = - counts.reduce((value, element) => math.max(value, element)); + final countsMax = counts.reduce( + (value, element) => math.max(value, element), + ); final deltaCounts = countsMax / (blocks.length - 1); final result = List.filled(gridPoints, ' '); final blockCount = blocks.length; diff --git a/lib/src/extension/path_helper.dart b/lib/src/extension/path_helper.dart new file mode 100644 index 0000000..af7bf99 --- /dev/null +++ b/lib/src/extension/path_helper.dart @@ -0,0 +1,53 @@ +import 'package:path/path.dart' as p; + +extension PathHelper on String { + String join([ + String? part2, + String? part3, + String? part4, + String? part5, + String? part6, + String? part7, + String? part8, + String? part9, + String? part10, + String? part11, + String? part12, + String? part13, + String? part14, + String? part15, + String? part16, + ]) => p.join( + this, + part2, + part3, + part4, + part5, + part6, + part7, + part8, + part9, + part10, + part11, + part12, + part13, + part14, + part15, + part16, + ); + + /// Returns the part the of path before the last separator. + String get dirname => p.dirname(this); + + /// Returns the part of the path after the last separator. + String get basename => p.basename(this); + + /// Returns the extension of the current path. + String get extension => p.extension(this); + + /// Return the path with the trailing extension set to [extension]. + String setExtension(String extension) => p.setExtension(this, extension); + + /// Removes the trailing extension from the last part of `this`. + String get withoutExtension => p.withoutExtension(this); +} diff --git a/lib/src/extensions/precision.dart b/lib/src/extension/precision.dart similarity index 91% rename from lib/src/extensions/precision.dart rename to lib/src/extension/precision.dart index 6f77375..195aa40 100644 --- a/lib/src/extensions/precision.dart +++ b/lib/src/extension/precision.dart @@ -25,7 +25,5 @@ extension Precision on double { /// print(d.toStringAsFixedDigits(4)) // Prints: 0.000007899 /// ``` String toStringAsFixedDigits([int nonZeroFractionalDigits = 2]) => - toStringAsFixed( - leadingZeros + nonZeroFractionalDigits, - ); + toStringAsFixed(leadingZeros + nonZeroFractionalDigits); } diff --git a/lib/src/extensions/root.dart b/lib/src/extension/root.dart similarity index 80% rename from lib/src/extensions/root.dart rename to lib/src/extension/root.dart index 358e056..17ee92b 100644 --- a/lib/src/extensions/root.dart +++ b/lib/src/extension/root.dart @@ -18,9 +18,10 @@ extension Root on num { double root(num n) { if (isNegative) { throw ErrorOf( - message: 'Error in extension function root($this).', - invalidState: '$this < 0', - expectedState: 'A positive function argument.'); + message: 'Error in extension function root($this).', + invalidState: '$this < 0', + expectedState: 'A positive function argument.', + ); } return pow(this, 1 / n).toDouble(); } diff --git a/lib/src/extensions/string_utils.dart b/lib/src/extension/string_utils.dart similarity index 93% rename from lib/src/extensions/string_utils.dart rename to lib/src/extension/string_utils.dart index 1b7de43..1462d0a 100644 --- a/lib/src/extensions/string_utils.dart +++ b/lib/src/extension/string_utils.dart @@ -1,9 +1,9 @@ import 'dart:io'; import 'package:ansi_modifier/ansi_modifier.dart'; -import 'package:benchmark_runner/src/extensions/duration_formatter.dart'; -import '../utils/environment.dart'; +import '../util/environment.dart'; import 'color_profile.dart'; +import 'duration_formatter.dart'; const benchmarkError = '. .-. .-. --- .-.'; const groupErrorMark = r'.--. ..- --- .-. --. / .-. --- .-. .-. .'; @@ -27,15 +27,16 @@ void addSuccessMark([String mark = successMark]) { } /// Reports an error and adds an error mark +/// [duration] refers to the measured benchmark duration. void reportError( error, StackTrace stack, { required String description, - required Duration runtime, + required Duration duration, required String errorMark, }) { print( - '${runtime.ssms.style(ColorProfile.dim)} ' + '${duration.ssms.style(ColorProfile.dim)} ' '$description' '${errorMark == groupErrorMark ? ':' : ''} ' '${error.toString().style(ColorProfile.error)}\n ', diff --git a/lib/src/utils/environment.dart b/lib/src/util/environment.dart similarity index 72% rename from lib/src/utils/environment.dart rename to lib/src/util/environment.dart index bb416c5..4b24229 100644 --- a/lib/src/utils/environment.dart +++ b/lib/src/util/environment.dart @@ -1,4 +1,5 @@ // const String marker = 'Z5GnUZnw7'; + const isBenchmarkProcess = bool.fromEnvironment('isBenchmarkProcess'); const isVerbose = bool.fromEnvironment('isVerbose'); -const isMonochrome = bool.fromEnvironment('isMonochrome'); +//const isMonochrome = bool.fromEnvironment('isMonochrome'); diff --git a/lib/src/util/file_utils.dart b/lib/src/util/file_utils.dart new file mode 100644 index 0000000..7507d0e --- /dev/null +++ b/lib/src/util/file_utils.dart @@ -0,0 +1,49 @@ +import 'dart:io'; + +/// Returns a list of resolved benchmark files. If [path] represents +/// a directory, the directory +/// * Benchmark files must end with `_benchmark.dart`. +/// * Returns an empty list if no benchmark files were found. +Future<({List benchmarkFiles, FileSystemEntityType entityType})> +resolveBenchmarkFiles(String path) async { + final benchmarkFiles = []; + final entityType = await FileSystemEntity.type(path); + if ((entityType == FileSystemEntityType.directory)) { + final directory = Directory(path); + await for (final entity in directory.list()) { + if (entity is File) { + if (entity.path.endsWith('_benchmark.dart')) { + benchmarkFiles.add(entity); + } + } + } + } else if ((entityType == FileSystemEntityType.file)) { + benchmarkFiles.add(File(path)); + } + return (benchmarkFiles: benchmarkFiles, entityType: entityType); +} + +/// Opens a file using [path], writes [contents], and closes the file. +/// Returns `Future` on success. Throws a [FileSystemException] +/// on failure. +Future writeTo({ + required String path, + String contents = '', + FileMode mode = FileMode.write, +}) async { + final entityType = await FileSystemEntity.type(path); + switch (entityType) { + case FileSystemEntityType.file || + FileSystemEntityType.notFound || + FileSystemEntityType.pipe: + final file = File(path); + return await file.writeAsString(contents, mode: mode); + case FileSystemEntityType.directory: + throw FileSystemException('Could not write to $path. It is a directory!'); + default: + throw FileSystemException( + 'Could not write to file with path: $path.', + path, + ); + } +} diff --git a/lib/src/utils/stats.dart b/lib/src/util/stats.dart similarity index 98% rename from lib/src/utils/stats.dart rename to lib/src/util/stats.dart index 8fc03ed..1ca4471 100644 --- a/lib/src/utils/stats.dart +++ b/lib/src/util/stats.dart @@ -113,7 +113,8 @@ class Stats { double get quartile3 => _quartile3(); late final _iqr = Lazy( - () => _quartile3(updateCache: true) - _quartile1(updateCache: true)); + () => _quartile3(updateCache: true) - _quartile1(updateCache: true), + ); /// Returns the inter quartile range. double get iqr => _iqr(); diff --git a/lib/src/utils/file_utils.dart b/lib/src/utils/file_utils.dart deleted file mode 100644 index 0d4669a..0000000 --- a/lib/src/utils/file_utils.dart +++ /dev/null @@ -1,22 +0,0 @@ -import 'dart:io'; - -/// Returns a list of resolved benchmark files. -/// * Benchmark files must end with `_benchmark.dart`. -/// * Returns an empty list if no benchmark files were found. -Future> resolveBenchmarkFiles(String path) async { - final benchmarkFiles = []; - final entityType = FileSystemEntity.typeSync(path); - if ((entityType == FileSystemEntityType.directory)) { - final directory = Directory(path); - await for (final entity in directory.list()) { - if (entity is File) { - if (entity.path.endsWith('_benchmark.dart')) { - benchmarkFiles.add(entity); - } - } - } - } else if ((entityType == FileSystemEntityType.file)) { - benchmarkFiles.add(File(path)); - } - return benchmarkFiles; -} diff --git a/pubspec.yaml b/pubspec.yaml index a0e8b43..a240d0a 100755 --- a/pubspec.yaml +++ b/pubspec.yaml @@ -1,13 +1,14 @@ name: benchmark_runner -description: A library for writing inline micro-benchmarks, reporting score - statistics, and running sync/async benchmarks. +description: A library for writing inline micro-benchmarks, running sync/async + benchmarks, reporting and exporting score statistics. -version: 0.1.9 +version: 2.0.0 homepage: https://github.com/simphotonics/benchmark_runner executables: + # : benchmark_runner: benchmark_runner topics: @@ -18,14 +19,16 @@ topics: - statistics environment: - sdk: '^3.0.0' + sdk: '^3.7.0' dependencies: - ansi_modifier: ^0.1.2 - benchmark_harness: ^2.2.2 + ansi_modifier: ^0.1.4 + args: ^2.7.0 exception_templates: ^0.3.1 lazy_memo: ^0.2.3 + path: ^1.9.0 dev_dependencies: - lints: ^4.0.0 - test: ^1.25.7 + lints: ^5.1.1 + test: ^1.25.8 + test_process: ^2.1.1 diff --git a/test/runner_test.dart b/test/runner_test.dart new file mode 100644 index 0000000..a142479 --- /dev/null +++ b/test/runner_test.dart @@ -0,0 +1,111 @@ +import 'package:test/test.dart'; +import 'package:test_process/test_process.dart'; + +void main() { + group('Main command:', () { + test('usage', () async { + final process = await TestProcess.start('dart', [ + 'run', + 'benchmark_runner', + ]); + + final usage = await process.stdout.rest.join('\n'); + + expect( + usage, + 'Runs benchmarks. Prints and exports score reports.\n' + '\n' + 'Usage: dart run benchmark_runner [arguments] \n' + '\n' + 'Global options:\n' + '-h, --help Print this usage information.\n' + '-v, --verbose Enable to show more info and error messages.\n' + '-m, --isMonochrome Disables colorized reporting.\n' + '\n' + 'Available commands:\n' + ' export Exports benchmark scores. A custom file extension and directory may be specified.\n' + ' report Runs benchmarks and prints a score report to stdout.\n' + '\n' + 'Run "benchmark_runner help " for more information about a command.\n' + '\n' + 'Note: Benchmark files are dart files ending with \'_benchmark.dart\'.', + ); + + // Assert that the process exits with code 0. + await process.shouldExit(0); + }); + }); + group('Sub-command report:', () { + test('usage', () async { + final process = await TestProcess.start('dart', [ + 'run', + 'benchmark_runner', + 'report', + '-h', + ]); + + final usage = await process.stdout.rest.join('\n'); + + expect( + usage, + equals( + 'Runs benchmarks and prints a score report to stdout.\n' + '\n' + 'Usage: benchmark_runner report [arguments] \n' + '-h, --help Print this usage information.\n' + '\n' + 'Run "benchmark_runner help" to see global options.', + ), + ); + + // Assert that the process exits with code 0. + await process.shouldExit(0); + }); + test('run benchmark file', () async { + final process = await TestProcess.start('dart', [ + 'run', + 'benchmark_runner', + 'report', + 'test/samples/sample_benchmark.dart', + ]); + await process.shouldExit(0); + }); + test('scan directory', () async { + final process = await TestProcess.start('dart', [ + 'run', + 'benchmark_runner', + 'report', + 'test/samples', + ]); + await process.shouldExit(0); + }); + }); + group('Sub-command export:', () { + test('usage', () async { + final process = await TestProcess.start('dart', [ + 'run', + 'benchmark_runner', + 'export', + '-h', + ]); + + final usage = await process.stdout.rest.join('\n'); + + expect( + usage, + 'Exports benchmark scores. A custom file extension and directory may be specified.\n' + '\n' + 'Usage: benchmark_runner export [arguments] \n' + '-h, --help Print this usage information.\n' + '-e, --extension Set file extension of exported files.\n' + ' (defaults to "txt")\n' + '-o, --outputDir Directory must exist. Score files will be written to it.\n' + '\n' + 'Run "benchmark_runner help" to see global options.', + ); + + // Assert that the process exits with code 0. + await process.shouldExit(0); + }); + }); +} diff --git a/test/samples/sample_benchmark.dart b/test/samples/sample_benchmark.dart new file mode 100644 index 0000000..12f169c --- /dev/null +++ b/test/samples/sample_benchmark.dart @@ -0,0 +1,16 @@ +// ignore_for_file: unused_local_variable +import 'package:benchmark_runner/benchmark_runner.dart'; + +void main(List args) { + group('List:', () { + final originalList = [for (var i = 0; i < 1000; ++i) i]; + + benchmark('construct', () { + var list = [for (var i = 0; i < 1000; ++i) i]; + }); + + benchmark('construct', () { + var list = [for (var i = 0; i < 1000; ++i) i]; + }, scoreEmitter: MeanEmitter()); + }); +} diff --git a/test/stats_test.dart b/test/stats_test.dart index 06d14d5..c427ed0 100644 --- a/test/stats_test.dart +++ b/test/stats_test.dart @@ -46,8 +46,10 @@ void main() { normalize: true, ); var sum = hist.values.fold(0.0, (sum, current) => sum + current); - expect(sum * (stats.max - stats.min) / numberOfIntervals, - closeTo(1.0, 1e-12)); + expect( + sum * (stats.max - stats.min) / numberOfIntervals, + closeTo(1.0, 1e-12), + ); }); test('total count (non-normalized histograms)', () { final hist = stats.histogram(normalize: false); diff --git a/tool/actions.sh b/tool/actions.sh index 69277a8..a5a0d4c 100755 --- a/tool/actions.sh +++ b/tool/actions.sh @@ -54,7 +54,7 @@ echo echo -e "${BLUE}=== Running Example $PWD...${RESET}" echo -dart run benchmark_runner +dart run benchmark_runner report echo echo -e "${BLUE}=== Script finished successfully ${RESET}"