Legend:
Page
Library
Module
Module type
Parameter
Class
Class type
Source
Source file bench_command.ml
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286(* This module makes the command line interface for bench. *)openCoretypecallback_bench=?run_config:Run_config.t->?analysis_configs:Analysis_config.tlist->?display_config:Display_config.t->?save_to_file:(Measurement.t->string)->?libname:string->Test.tlist->unittypecallback_load_analyze_and_display=filenames:stringlist->?analysis_configs:Analysis_config.tlist->?display_config:Display_config.t->unit->unitletwrapper_param=letopenCommand.Let_syntaxin[%map_openletlimit_width_to=flag"-width"(optional_with_defaultDefaults.limit_width_toint)~doc:(sprintf"WIDTH width limit on column display (default %d)."Defaults.limit_width_to)anddisplay_style=flag"-display"(optional_with_defaultDefaults.display_as_stringstring)~doc:(sprintf"STYLE Table style (short, tall, line, blank or column). Default %s."Defaults.display_as_string)andverbosity=flag"-v"no_arg~doc:" High verbosity level."andquota=flag"-quota"(optional_with_defaultDefaults.quotaQuota.arg_type)~doc:(sprintf"<INT>x|<SPAN> Quota allowed per test. May be a number of runs \
(e.g. 1000x or 1e6x) or a time span (e.g. 10s or 500ms). \
Default %s."(Quota.to_stringDefaults.quota))andfork_each_benchmark=flag"-fork"no_arg~doc:" Fork and run each benchmark in separate child-process"andshow_all_values=flag"-all-values"no_arg~doc:" Show all column values, including very small ones."andno_compactions=flag"-no-compactions"no_arg~doc:" Disable GC compactions."andshow_overheads=flag"-overheads"no_arg~doc:" Show measurement overheads, when applicable."andsampling_type=choose_one~if_nothing_chosen:(Default_to(`GeometricDefaults.geometric_scale))[flag"-linear"(optionalint)~doc:"INCREMENT Use linear sampling to explore number of runs, example 1."|>map~f:(Option.map~f:(funk->`Lineark));flag"-geometric"(optionalfloat)~doc:(sprintf"SCALE Use geometric sampling. (default %.2f)"Defaults.geometric_scale)|>map~f:(Option.map~f:(funs->`Geometrics))]andsave_sample_data=flag"-save"no_arg~doc:" Save benchmark data to <test name>.txt files."andshow_output_as_sexp=flag"-sexp"no_arg~doc:" Output as sexp."andminimal_tables=flag"-ascii"no_arg~doc:" Display data in simple ascii based tables."andreduced_bootstrap=flag"-reduced-bootstrap"no_arg~doc:" Reduce the number of bootstrapping iterations"andshow_absolute_ci=flag"-ci-absolute"no_arg~doc:" Display 95% confidence interval in absolute numbers"andstabilize_gc_between_runs=flag"-stabilize-gc"no_arg~doc:" Stabilize GC between each sample capture."andclear_columns=flag"-clear-columns"no_arg~doc:" Don't display default columns. Only show \ user specified ones."andanalyze_files=flag"-load"(listedFilename.arg_type)~doc:"FILE Analyze previously saved data files and don't run tests. \
[-load] can be specified multiple times."andregressions=flag"-regression"(listedstring)~doc:"REGR Specify additional regressions (See -? help). "andthin_overhead=flag"-thin-overhead"(optionalfloat)~doc:"INT If given, just run the test function(s) N times; skip \
measurements and regressions. Float lexemes like \"1e6\" are allowed."|>map~f:(Option.map~f:Float.to_int)andanon_columns=anon(sequence("COLUMN"%:Bench_command_column.arg))infun~main()->letsanitize_namestr=String.mapstr~f:(func->ifChar.is_alphanumc||String.mem"-_."cthencelse'_')inletdisplay=Defaults.string_to_displaydisplay_styleinletdisplay,ascii_table=ifminimal_tablesthenAscii_table.Display.column_titles,trueelsedisplay,falseinletverbosity:Verbosity.t=ifshow_output_as_sexpthenQuietelseifverbositythenHighelseLowinletcolumns=ifclear_columnsthen[]elseDefaults.command_columnsinletcolumns=columns@anon_columnsinletanalysis_configs,columns=letf=letopenBench_command_columninfunction|Analysisanalysis->`Fstanalysis|Display_columncol->`SndcolinList.partition_mapcolumns~finletanalysis_configs=List.concatanalysis_configsinletanalysis_configs=letto_namei=sprintf" [%d]"(i+1)inanalysis_configs@(List.mapiregressions~f:(funireg->letregression_name=to_nameiinprintf"Regression%s = %s\n%!"regression_namereg;Analysis_config.parsereg~regression_name))inletanalysis_configs=ifreduced_bootstrapthenList.mapanalysis_configs~f:(Analysis_config.reduce_bootstrap~bootstrap_trials:Analysis_config.default_reduced_bootstrap_trials)elseanalysis_configsinletsave=ifsave_sample_datathenbeginprintf"Measurements will be saved.\n%!";lettime_str=Time.format(Time.now())"%F-%R"~zone:(forceTime.Zone.local)inSome(funmeas->letname=Measurement.namemeasinletfn=sprintf"%s-%s-%s.txt"(sanitize_namename)time_str(Quota.to_stringquota)inprintf"Saving to: %s.\n%!"fn;fn)endelseNoneinletrun_config=Run_config.create~verbosity~quota~sampling_type~stabilize_gc_between_runs~no_compactions~fork_each_benchmark?thin_overhead()inletdisplay_config=Display_config.create~limit_width_to~show_samples:(List.memcolumns`Samples~equal:Display_column.equal)~show_percentage:(List.memcolumns`Percentage~equal:Display_column.equal)~show_speedup:(List.memcolumns`Speedup~equal:Display_column.equal)~show_all_values~show_absolute_ci~show_overheads~display~ascii_table~show_output_as_sexp()inletconfigs=matchanalyze_fileswith|[]->(analysis_configs,display_config,`Run(save,run_config))|filenames->(analysis_configs,display_config,`From_filefilenames)inmainconfigs]letreadme()=sprintf"\
Columns that can be specified are:
\t%s
Columns with no significant values will not be displayed. The
following columns will be displayed by default:
\t%s
Error Estimates
===============
To display error estimates, prefix the column name (or
regression) with a '+'. Example +time.
(1) R^2 is the fraction of the variance of the responder (such as
runtime) that is accounted for by the predictors (such as number of
runs). More informally, it describes how good a fit we're getting,
with R^2 = 1 indicating a perfect fit and R^2 = 0 indicating a
horrible fit. Also see:
http://en.wikipedia.org/wiki/Coefficient_of_determination
(2) Bootstrapping is used to compute 95%% confidence intervals
for each estimate.
Because we expect runtime to be very highly correlated with number of
runs, values very close to 1 are typical; an R^2 value for 'time' that
is less than 0.99 should cause some suspicion, and a value less than
0.9 probably indicates either a shortage of data or that the data is
erroneous or peculiar in some way.
Specifying additional regressions
=================================
The builtin in columns encode common analysis that apply to most
functions. Bench allows the user to specify custom analysis to help
understand relationships specific to a particular function using the
flag \"-regression\" . It is worth noting that this feature requires
some understanding of both linear regression and how various quatities
relate to each other in the OCaml runtime. To specify a regression
one must specify the responder variable and a command separated list
of predictor variables.
For example: +Time:Run,mjGC,Comp
which asks bench to estimate execution time using three predictors
namely the number of runs, major GCs and compaction stats and display
error estimates. Drop the prefix '+' to suppress error estimation. The
variables available for regression include:
\t%s
"Bench_command_column.column_description_table(String.concat~sep:" "Defaults.columns_as_string)(Variable.summarize())letmake_ext~summarymain_param=letopenCommand.Let_syntaxinCommand.basic~readme~summary[%map_openletwrapper=wrapper_paramandmain=main_paraminwrapper~main]letmake~(bench:callback_bench)~(analyze:callback_load_analyze_and_display)~(tests:Test.tlist)=make_ext~summary:(sprintf"Benchmark for %s"(String.concat~sep:", "(List.maptests~f:(funtest->letlen=List.length(Test.teststest)iniflen=1thenTest.nametestelsesprintf"%s (%d tests)"(Test.nametest)len))))(Command.Param.return(funargs->matchargswith|(analysis_configs,display_config,`Run(save_to_file,run_config))->bench~analysis_configs~display_config~run_config?save_to_filetests|(analysis_configs,display_config,`From_filefilenames)->analyze~analysis_configs~display_config~filenames()))