Legend:
Page
Library
Module
Module type
Parameter
Class
Class type
Source
Source file interfaces.ml
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995(* File: interfaces.ml
OCaml-GPR - Gaussian Processes for OCaml
Copyright (C) 2009- Markus Mottl
email: markus.mottl@gmail.com
WWW: http://www.ocaml.info
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation,
Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*)openCoreopenLacaml.DopenUtils(** {6 Representations of (sparse) derivative matrices} *)(** Representation of indices into sparse matrices *)moduleSparse_indices=Int_vec(** Derivative representations for both symmetric and unsymmetric matrices.
- Dense: matrix is dense.
- Sparse_rows: matrix is zero everywhere except for rows whose
index is stored in the sparse index argument. The rows in the
matrix correspond to the given indices.
- Const: matrix is constant everywhere.
- Factor: matrix is the non-derived matrix times the given factor
(useful with exponential functions).
*)typecommon_mat_deriv=[|`Denseofmat|`Sparse_rowsofmat*Sparse_indices.t|`Constoffloat|`Factoroffloat](** Only general matrices support sparse column representations.
- Sparse_cols: matrix is zero everywhere except for columns whose
index is stored in the sparse index argument. The columns in
the matrix correspond to the given indices.
*)typemat_deriv=[|common_mat_deriv|`Sparse_colsofmat*Sparse_indices.t](** Only symmetric (square) matrices support diagonal vectors and
diagonal constants as derivatives.
- Diag_vec: matrix is zero everywhere except for the diagonal
whose values are given in the argument.
- Diag_const: matrix is zero everywhere except for the diagonal
whose values are set to the given constant.
Note that sparse rows do not need to compute or store all elements
for symmetric matrices. Entries that have already appeared in
previous rows by symmetry can be left uninitialized.
*)typesymm_mat_deriv=[|common_mat_deriv|`Diag_vecofvec|`Diag_constoffloat](** Derivatives of diagonal matrices.
- Vec: the derivatives of the diagonal given in a dense vector.
- Sparse_vec: matrix is zero everywhere except at those indices
along the diagonal that are mentioned in the sparse indices
argument. The element associated with such an index is stored
in the vector argument.
- Const: the derivative of the diagonal matrix is a constant.
- Factor: the derivative of the diagonal is the the non-derived
diagonal matrix times the given factor (useful with exponential
functions).
*)typediag_deriv=[|`Vecofvec|`Sparse_vecofvec*Sparse_indices.t|`Constoffloat|`Factoroffloat](** Specifications of covariance functions (= kernels) and their derivatives *)moduleSpecs=struct(** Signature of kernels and their parameters *)moduletypeKernel=sig(** Type of kernel *)typet(** Type of kernel parameters *)typeparams(** [create params] @return kernel given parameters [params]. *)valcreate:params->t(** [get_params kernel] @return parameters used to parameterize [kernel]. *)valget_params:t->paramsend(** Evaluation of covariance functions *)moduletypeEval=sig(** Kernel used for evaluation *)moduleKernel:Kernel(** Signature for evaluating inducing inputs *)moduleInducing:sigtypet(** [get_n_points inducing] @return number of inducing points. *)valget_n_points:t->int(** [calc_upper kernel inducing] @return upper triangle of
covariance matrix of [inducing] inputs given [kernel]. *)valcalc_upper:Kernel.t->t->matend(** Signature for evaluating single inputs *)moduleInput:sigtypet(** Type of input point *)(** [eval kernel input inducing] @return (row) vector of covariance
evaluations between [input] and [inducing] inputs given
[kernel]. *)valeval:Kernel.t->t->Inducing.t->vec(** [weighted_eval kernel input inducing ~coeffs] @return
[coeff]-weighted sum of covariances between [input] and
[inducing] inputs given [kernel]. *)valweighted_eval:Kernel.t->t->Inducing.t->coeffs:vec->float(** [eval_one kernel point] @return variance of [point] given [kernel]. *)valeval_one:Kernel.t->t->floatend(** Signature for evaluating multiple inputs *)moduleInputs:sigtypet(** Type of input points *)(** [create inputs] @return inputs given an array of single [inputs]. *)valcreate:Input.tarray->t(** [get_n_points inputs] @return number of input points. *)valget_n_points:t->int(** [choose_subset inputs indexes] @return subset of input
points from [inputs] having [indexes]. *)valchoose_subset:t->Int_vec.t->t(** [create_inducing kernel inputs] @return inducing points
made from [inputs] and given [kernel]. *)valcreate_inducing:Kernel.t->t->Inducing.t(** [create_default_kernel_params inputs ~n_inducing] @return
default kernel parameters to be used with [n_inducing]
inducing points and [inputs]. *)valcreate_default_kernel_params:t->n_inducing:int->Kernel.params(** [calc_upper kernel inputs] @return upper triangle of
covariance matrix of [inputs] given [kernel]. *)valcalc_upper:Kernel.t->t->mat(** [calc_diag kernel inputs] @return diagonal of
covariance matrix of [inputs] given [kernel]. *)valcalc_diag:Kernel.t->t->vec(** [calc_cross kernel ~inputs ~inducing] @return cross-covariance
matrix of [inputs] (indexing rows) and [inducing] points
(indexing columns). *)valcalc_cross:Kernel.t->inputs:t->inducing:Inducing.t->mat(** [weighted_eval kernel ~inputs ~inducing ~coeffs] @return
vector of [coeff]-weighted sums of covariances between
[inputs] and [inducing] inputs given [kernel]. *)valweighted_eval:Kernel.t->inputs:t->inducing:Inducing.t->coeffs:vec->vecendend(** Derivatives of covariance functions *)moduletypeDeriv=sig(** Derivatives always require evaluation functions *)moduleEval:Eval(** Hyper parameters that have derivatives *)moduleHyper:sigtypet(** Type of hyper parameter *)(** [get_all kernel inducing inputs] @return array of all hyper
parameters of [kernel] and/or ([inducing]) [inputs] for which
derivatives can be computed. *)valget_all:Eval.Kernel.t->Eval.Inducing.t->Eval.Inputs.t->tarray(** [get_value kernel inducing inputs hyper] @return value of hyper
parameter [hyper] of [kernel] and/or ([inducing]) [inputs]. *)valget_value:Eval.Kernel.t->Eval.Inducing.t->Eval.Inputs.t->t->float(** [set_values kernel inducing inputs hypers values] @return triple
of [(kernel, inducing, inputs)] in which [hypers] have been
substituted with [values] position-wise. *)valset_values:Eval.Kernel.t->Eval.Inducing.t->Eval.Inputs.t->tarray->vec->Eval.Kernel.t*Eval.Inducing.t*Eval.Inputs.tend(** Derivatives of the covariance matrix of inducing inputs *)moduleInducing:sigtypeupper(** Representation of precomputed data for calculating the
upper triangle of the derivative of the covariance matrix
of inducing inputs. *)(** [calc_shared_upper kernel inducing] @return the pair [(eval, upper)],
where [eval] is the upper triangle of the covariance matrix of
inducing inputs for [kernel], and [upper] is the precomputed data
needed for taking derivatives. *)valcalc_shared_upper:Eval.Kernel.t->Eval.Inducing.t->mat*upper(** [calc_deriv_upper upper hyper] @return the derivative of the
(symmetric) covariance matrix of inducing inputs given precomputed
data [upper] and the [hyper]-variable. *)valcalc_deriv_upper:upper->Hyper.t->symm_mat_derivend(** Derivatives of the (cross-) covariance matrix of inputs. *)moduleInputs:sigtypediag(** Representation of precomputed data for calculating the
derivative of the diagonal of the covariance matrix of
inputs. *)typecross(** Representation of precomputed data for calculating the
derivative of the cross-covariance matrix between inputs
and inducing inputs. *)(** [calc_shared_diag kernel inputs] @return the pair [(eval, diag)],
where [eval] is the diagonal of the covariance matrix of [inputs] for
[kernel], and [diag] is the precomputed data needed for taking
derivatives. *)valcalc_shared_diag:Eval.Kernel.t->Eval.Inputs.t->vec*diag(** [calc_shared_cross kernel ~inputs ~inducing] @return the pair [(eval,
cross)], where [eval] is the cross-covariance matrix of inputs and
inducing inputs for [kernel], and [diag] is the precomputed data
needed for taking derivatives. *)valcalc_shared_cross:Eval.Kernel.t->inputs:Eval.Inputs.t->inducing:Eval.Inducing.t->mat*cross(** [calc_deriv_diag diag hyper] @return the derivative of the
diagonal of the covariance matrix of inputs given precomputed data
[diag] and the [hyper]-variable. *)valcalc_deriv_diag:diag->Hyper.t->diag_deriv(** [calc_deriv_cross cross hyper] @return the derivative of the
cross-covariance matrix of the inputs and inducing inputs given
precomputed data [cross] and the [hyper]-variable. *)valcalc_deriv_cross:cross->Hyper.t->mat_derivendend(** Derivatives of inputs for global optimization. *)moduletypeOptimizer=sig(** Derivatives always require evaluation functions *)moduleEval:Eval(** Input parameters that have derivatives *)moduleVar:sigtypet(** Type of input parameter *)endmoduleInput:sig(** [get_vars input] @return array of all input parameters for which
derivatives can be computed given [input]. *)valget_vars:Eval.Input.t->Var.tarray(** [get_value input var] @return value of input parameter [var] for
[input]. *)valget_value:Eval.Input.t->Var.t->float(** [set_values input vars values] @return input in which [vars] have been
substituted with [values] position-wise. *)valset_values:Eval.Input.t->Var.tarray->vec->Eval.Input.tendmoduleInputs:sig(** [get_vars inputs] @return array of all input parameters for which
derivatives can be computed given [inputs]. *)valget_vars:Eval.Inputs.t->Var.tarray(** [get_value inputs var] @return value of input parameter [var] for
[inputs]. *)valget_value:Eval.Inputs.t->Var.t->float(** [set_values inputs vars values] @return inputs in which [vars] have
been substituted with [values] position-wise. *)valset_values:Eval.Inputs.t->Var.tarray->vec->Eval.Inputs.tendendend(** Signatures for learning sparse Gaussian processes with inducing inputs *)moduleSigs=struct(** Modules for learning without derivatives of covariance functions. *)moduletypeEval=sig(** Specification of covariance function *)moduleSpec:Specs.Eval(** Evaluating inducing inputs *)moduleInducing:sigtypet(** Type of inducing inputs *)(** [choose_n_first_inputs kernel inputs ~n_inducing] @return the first
[n_inducing] inputs in [inputs] as inducing points given [kernel]. *)valchoose_n_first_inputs:Spec.Kernel.t->Spec.Inputs.t->n_inducing:int->Spec.Inducing.t(** [choose_n_random_inputs ?rnd_state kernel inputs ~n_inducing] @return
[n_inducing] random inputs in [inputs] as inducing points given
[kernel] and (optional) random state [rnd_state].
@param rnd_state default = default used by the Random module
*)valchoose_n_random_inputs:?rnd_state:Random.State.t->Spec.Kernel.t->Spec.Inputs.t->n_inducing:int->Spec.Inducing.t(** [calc kernel inducing_points] @return inducing inputs (= precomputed
data) prepared using [inducing_points] and [kernel]. *)valcalc:Spec.Kernel.t->Spec.Inducing.t->t(** [get_points kernel inducing] @return inducing points associated with
the prepared [inducing] inputs. *)valget_points:t->Spec.Inducing.tend(** Evaluating single inputs *)moduleInput:sigtypet(** Type of single input *)(** [calc inducing point] @return input (= precomputed
data) prepared using [inducing] inputs and input [point]. *)valcalc:Inducing.t->Spec.Input.t->tend(** Evaluating (multiple) inputs *)moduleInputs:sigtypet(** Type of (multiple) inputs *)(** [create_default_kernel points] @return a default kernel given input
[points] and [n_inducing] inducing inputs. *)valcreate_default_kernel:Spec.Inputs.t->n_inducing:int->Spec.Kernel.t(** [create points inducing] @return inputs (= precomputed
data) prepared using [inducing] inputs and input [points]. *)valcalc:Spec.Inputs.t->Inducing.t->t(** [get_points kernel inputs] @return points associated with
the prepared [inputs]. *)valget_points:t->Spec.Inputs.tend(** (Untrained) model - does not require targets *)moduleModel:sigtypet(** Type of models *)typeco_variance_coeffs(** Type of covariance coefficients *)(** [calc inputs ~sigma2] @return model given [inputs] and noise level
[sigma2] (= variance, i.e. squared standard deviation). *)valcalc:Inputs.t->sigma2:float->t(** [update_sigma2 model sigma2] @return model by updating [model] with
new noise level [sigma2]. *)valupdate_sigma2:t->float->t(** [calc_log_evidence model] @return the contribution to the log evidence
(= log marginal likelihood) of [model]. *)valcalc_log_evidence:t->float(** [calc_co_variance_coeffs model] @return the coefficients
required for computing posterior (co-)variances for [model]. *)valcalc_co_variance_coeffs:t->co_variance_coeffs(** [get_kernel model] @return the kernel associated with [model]. *)valget_kernel:t->Spec.Kernel.t(** [get_sigma2 model] @return the noise level associated with [model]. *)valget_sigma2:t->float(** [get_inputs model] @return the inputs associated with [model]. *)valget_inputs:t->Inputs.t(** [get_inputs model] @return the inducing inputs associated with
[model]. *)valget_inducing:t->Inducing.tend(** Trained model - requires targets *)moduleTrained:sigtypet(** Type of trained models *)(** [calc model ~targets] @return trained model given [model] and
[targets]. *)valcalc:Model.t->targets:vec->t(** [calc_mean_coeffs trained] @return the vector of coefficients for
computing posterior means. *)valcalc_mean_coeffs:t->vec(** [calc_log_evidence trained] @return the log evidence for the trained
model (includes contribution to log evidence by underlying model). *)valcalc_log_evidence:t->float(** [get_model trained] @return the model associated with the [trained]
model. *)valget_model:t->Model.t(** [get_targets trained] @return targets used for training [trained]. *)valget_targets:t->vecend(** Statistics derived from trained models *)moduleStats:sig(** Type of full statistics *)typet={n_samples:int;(** Number of samples used for training *)target_variance:float;(** Variance of targets *)sse:float;(** Sum of squared errors *)mse:float;(** Mean sum of squared errors *)rmse:float;(** Root mean sum of squared errors *)smse:float;(** Standardized mean squared error *)msll:float;(** Mean standardized log loss *)mad:float;(** Mean absolute deviation *)maxad:float;(** Maximum absolute deviation *)}(** [calc_n_samples trained] @return number of samples used for training
[trained]. *)valcalc_n_samples:Trained.t->int(** [calc_target_variance trained] @return variance of targets used for
training [trained]. *)valcalc_target_variance:Trained.t->float(** [calc_sse trained] @return the sum of squared errors of the [trained]
model. *)valcalc_sse:Trained.t->float(** [calc_mse trained] @return the mean sum of squared errors of the
[trained] model. *)valcalc_mse:Trained.t->float(** [calc_sse trained] @return the root of the mean sum of squared errors
of the [trained] model. *)valcalc_rmse:Trained.t->float(** [calc_smse trained] @return the standardized mean squared error of the
[trained] model. This is equivalent to the mean squared error divided
by the target variance. *)valcalc_smse:Trained.t->float(** [calc_msll trained] @return the mean standardized log loss. This
is equivalent to subtracting the log evidence of the trained model
from the log evidence of a normal distribution fit to the targets, and
dividing the result by the number of samples. *)valcalc_msll:Trained.t->float(** [calc_mad trained] @return the mean absolute deviation
of the [trained] model. *)valcalc_mad:Trained.t->float(** [calc_mad trained] @return the maximum absolute deviation
of the [trained] model. *)valcalc_maxad:Trained.t->float(** [calc trained] @return the full set of statistics associated with
the [trained] model. *)valcalc:Trained.t->tend(** Module for making mean predictions *)moduleMean_predictor:sigtypet(** Type of mean predictors *)(** [calc inducing_points ~coeffs] @return a mean predictor given
[inducing_points] and coefficients [coeffs]. *)valcalc:Spec.Inducing.t->coeffs:vec->t(** [calc_trained trained] @return a mean predictor given the [trained]
model. *)valcalc_trained:Trained.t->t(** [get_inducing mean_predictor] @return inducing points associated with
[mean_predictor]. *)valget_inducing:t->Spec.Inducing.t(** [get_coeffs mean_predictor] @return coefficients associated with
[mean_predictor]. *)valget_coeffs:t->vecend(** Posterior mean for a single input *)moduleMean:sigtypet(** Type of mean *)(** [calc mean_predictor input] @return mean for [input] given
[mean_predictor]. *)valcalc:Mean_predictor.t->Input.t->t(** [get mean] @return the mean as a float. *)valget:t->floatend(** Posterior means for (multiple) inputs *)moduleMeans:sigtypet(** Type of means *)(** [calc mean_predictor inputs] @return means for [inputs] given
[mean_predictor]. *)valcalc:Mean_predictor.t->Inputs.t->t(** [get means] @return the means as a vector. *)valget:t->vecend(** Module for making (co-)variance predictions *)moduleCo_variance_predictor:sigtypet(** Type of (co-)variance predictor *)(** [calc kernel inducing_points co_variance_coeffs] @return (co-)variance
predictor given [kernel], [inducing_points], and the (co-)variance
coefficients [co_variance_coeffs]. *)valcalc:Spec.Kernel.t->Spec.Inducing.t->Model.co_variance_coeffs->t(** [calc_model model] @return (co-)variance predictor given the
(untrained) [model]. *)valcalc_model:Model.t->tend(** Posterior variance for a single input *)moduleVariance:sigtypet(** Type of variance *)(** [calc co_variance_predictor ~sigma2 input] @return variance for
[input] given [mean_predictor] and noise level [sigma2]. *)valcalc:Co_variance_predictor.t->sigma2:float->Input.t->t(** [get ?predictive variance] @return the [variance] as a float.
If [predictive] is [true], then the noise level will be added.
@param predictive default = [true]
*)valget:?predictive:bool->t->floatend(** Posterior variances for (multiple) inputs *)moduleVariances:sigtypet(** Type of variances *)(** [calc_model_inputs model] @return variances for all inputs used in
[model]. *)valcalc_model_inputs:Model.t->t(** [calc co_variance_predictor ~sigma2 inputs] @return variances for
[inputs] given [co_variance_predictor] and noise level [sigma2]. *)valcalc:Co_variance_predictor.t->sigma2:float->Inputs.t->t(** [get ?predictive variances] @return the [variances] as a vector.
If [predictive] is [true], then the noise level will be added.
@param predictive default = [true]
*)valget:?predictive:bool->t->vecend(** Posterior covariances *)moduleCovariances:sigtypet(** Type of covariances *)(** [calc_model_inputs model] @return covariances for all inputs used in
[model]. This may be extremely expensive (O(N^2)) for large numbers
of model inputs. *)valcalc_model_inputs:Model.t->t(** [calc co_variance_predictor ~sigma2 inputs] @return posterior
covariances for [inputs] given [co_variance_predictor] and noise level
[sigma2]. This may be extremely expensive (O(N^2)) for large numbers
of inputs. *)valcalc:Co_variance_predictor.t->sigma2:float->Inputs.t->t(** [get ?predictive covariances] @return the [covariances] as a matrix.
If [predictive] is [true], then the noise level will be added (to the
diagonal only).
@param predictive default = [true]
*)valget:?predictive:bool->t->mat(** [get_variances covariances] @return the variances in [covariances]. *)valget_variances:t->Variances.tend(** Module for sampling single points from the posterior distribution *)moduleSampler:sigtypet(** Type of sampler *)(** [calc ?predictive mean variance] @return sampler given [mean] and
[variance]. If [predictive] is true, the samples will be noisy. *)valcalc:?predictive:bool->Mean.t->Variance.t->t(** [sample ?rng sampler] @return a sample from the posterior distribution
given [sampler] and GSL random number generator [rng].
@param rng default = GSL default
*)valsample:?rng:Gsl.Rng.t->t->float(** [samples ?rng sampler ~n] @return [n] samples from the posterior
distribution given [sampler].
@param rng default = GSL default
*)valsamples:?rng:Gsl.Rng.t->t->n:int->vecend(** Module for sampling (multiple) points from the posterior distribution
accounting for their covariance *)moduleCov_sampler:sigtypet(** Type of covariance sampler *)(** [calc ?predictive mean variance] @return sampler given [means] and
[covariances]. If [predictive] is true, the samples will be noisy. *)valcalc:?predictive:bool->Means.t->Covariances.t->t(** [sample ?rng sampler] @return a sample vector from the posterior
distribution given [sampler] and GSL random number generator [rng].
@param rng default = GSL default
*)valsample:?rng:Gsl.Rng.t->t->vec(** [samples ?rng sampler ~n] @return matrix of [n] sample vectors (stored
row-wise) from the posterior distribution given [sampler].
@param rng default = GSL default
*)valsamples:?rng:Gsl.Rng.t->t->n:int->matendend(** Modules for learning with derivatives of the log evidence (evidence
maximization framework) *)moduletypeDeriv=sig(** Sub-modules for learning without derivatives. *)moduleEval:Eval(** Sub-modules for learning with derivatives. *)moduleDeriv:sig(** Specification of covariance function derivatives *)moduleSpec:Specs.DerivwithmoduleEval=Eval.Spec(** Module for inducing inputs with derivatives *)moduleInducing:sigtypet(** Type of inducing inputs with derivatives *)(** [calc kernel inducing_points] @return inducing inputs with
derivative information given [kernel] and [inducing_points]. *)valcalc:Eval.Spec.Kernel.t->Eval.Spec.Inducing.t->t(** [calc_eval inducing] @return inducing inputs without derivative
information. *)valcalc_eval:t->Eval.Inducing.tend(** Module for inputs with derivatives *)moduleInputs:sigtypet(** Type of inputs with derivatives *)(** [calc inducing points] @return inputs with derivative information
given [inducing] inputs and input [points]. *)valcalc:Inducing.t->Eval.Spec.Inputs.t->t(** [calc_eval inputs] @return inputs without derivative information. *)valcalc_eval:t->Eval.Inputs.tend(** (Untrained) model with derivative information *)moduleModel:sig(** Type of models with derivatives *)typet(** Type of models for general hyper parameters *)typehyper_t(** [calc inputs ~sigma2] @return model with derivative information
given [inputs] and noise level [sigma2]. *)valcalc:Inputs.t->sigma2:float->t(** [update_sigma2 model sigma2] @return model with derivative
information by updating [model] with new noise level [sigma2]. *)valupdate_sigma2:t->float->t(** [calc_eval model] @return model without derivative information given
[model]. *)valcalc_eval:t->Eval.Model.t(** [calc_log_evidence_sigma2 model] @return the derivative of the
log evidence of [model] with respect to the noise level (sigma2). *)valcalc_log_evidence_sigma2:t->float(** [prepare_hyper model] @return the model prepared for calculating
derivatives for arbitrary hyper parameters. *)valprepare_hyper:t->hyper_t(** [calc_log_evidence hyper_t hyper] @return the derivative of the log
evidence given prepared model [hyper_t] with respect to the [hyper]
variable. *)valcalc_log_evidence:hyper_t->Spec.Hyper.t->floatend(** Trained model with derivative information *)moduleTrained:sig(** Type of trained models with derivatives *)typet(** Type of trained models for general hyper parameters *)typehyper_t(** [calc model ~targets] @return trained model with derivative
information given the untrained [model] and [targets]. *)valcalc:Model.t->targets:vec->t(** [calc_eval trained] @return trained model without derivative
information given [trained]. *)valcalc_eval:t->Eval.Trained.t(** [calc_log_evidence_sigma2 trained] @return the derivative of the
log evidence for the [trained] model with respect to the noise level
(sigma2). This includes the contribution to the derivative by
[model]. *)valcalc_log_evidence_sigma2:t->float(** [prepare_hyper trained] @return the trained model prepared for
calculating derivatives for arbitrary hyper parameters. *)valprepare_hyper:t->hyper_t(** [calc_log_evidence hyper_t hyper] @return the derivative of the log
evidence given prepared, trained model [hyper_t] with respect to the
[hyper] variable. *)valcalc_log_evidence:hyper_t->Spec.Hyper.t->floatend(** Module for testing derivative code *)moduleTest:sig(** [check_deriv_hyper ?eps ?tol kernel inducing_points points hyper]
will raise [Failure] if the derivative code provided in the
specification of the covariance function given parameter [hyper],
the [kernel], [inducing_points] and input [points] exceeds the
tolerance [tol] when compared to finite differences using epsilon
[eps]. The failure exception will contain details on which
derivative matrix was incorrect and indicate the matrix element.
@param eps default = [1e-8]
@param tol default = [1e-2]
*)valcheck_deriv_hyper:?eps:float->?tol:float->Eval.Spec.Kernel.t->Eval.Spec.Inducing.t->Eval.Spec.Inputs.t->Spec.Hyper.t->unit(** [self_test ?eps ?tol kernel inducing_points points ~sigma2 ~targets
hyper] will raise [Failure] if the internal derivative code for the
log evidence given parameter [hyper], the [kernel],
[inducing_points], input [points], noise level [sigma2] and
[targets] exceeds the tolerance [tol] when compared to finite
differences using epsilon [eps].
@param eps default = [1e-8]
@param tol default = [1e-2]
*)valself_test:?eps:float->?tol:float->Eval.Spec.Kernel.t->Eval.Spec.Inducing.t->Eval.Spec.Inputs.t->sigma2:float->targets:vec->[`Sigma2|`HyperofSpec.Hyper.t]->unitend(** Optimization module for evidence maximization *)moduleOptim:sig(** Optimization with the GNU Scientific library (GSL) *)moduleGsl:sig(** [Optim_exception exn] is raised when an internal exception occurs,
e.g. because GSL fails, or because a callback raised it. *)exceptionOptim_exceptionofexn(** [train ?step ?tol ?epsabs ?report_trained_model
?report_gradient_norm ?kernel ?sigma2 ?inducing ?n_rand_inducing
?learn_sigma2 ?hypers ~inputs ~targets ()] takes the optional
initial optimizer step size [step], the optimizer line search
tolerance [tol], the minimum gradient norm [epsabs] to achieve by
the optimizer, callbacks for reporting intermediate results
[report_trained_model] and [report_gradient_norm], an optional
[kernel], noise level [sigma2], inducing inputs [inducing], number
of randomly chosen inducing inputs [n_rand_inducing], a flag for
whether the noise level should be learnt [learn_sigma2], an array
of optional hyper parameters [hypers] which should be optimized,
and the [inputs] and [targets].
@return the trained model obtained by evidence maximization (=
type II maximum likelihood).
@param step default = [1e-1]
@param tol default = [1e-1]
@param epsabs default = [1e-1]
@param report_trained_model default = ignored
@param report_gradient_norm default = ignored
@param kernel default = default kernel computed from specification
@param sigma2 default = target variance
@param inducing default = randomly selected subset of inputs
@param n_rand_inducing default = 10% of inputs, at most 1000
@param learn_sigma2 default = [true]
@param hypers default = all hyper parameters
*)valtrain:?step:float->?tol:float->?epsabs:float->?report_trained_model:(iter:int->Eval.Trained.t->unit)->?report_gradient_norm:(iter:int->float->unit)->?kernel:Eval.Spec.Kernel.t->?sigma2:float->?inducing:Eval.Spec.Inducing.t->?n_rand_inducing:int->?learn_sigma2:bool->?hypers:Spec.Hyper.tarray->inputs:Eval.Spec.Inputs.t->targets:vec->unit->Eval.Trained.tendmoduleSGD:sigtypetvalcreate:?tau:float->?eta0:float->?step:int->?kernel:Eval.Spec.Kernel.t->?sigma2:float->?inducing:Eval.Spec.Inducing.t->?n_rand_inducing:int->?learn_sigma2:bool->?hypers:Spec.Hyper.tarray->inputs:Eval.Spec.Inputs.t->targets:vec->unit->tvalstep:t->tvalgradient_norm:t->floatvalget_trained:t->Eval.Trained.tvalget_eta:t->floatvalget_step:t->intvaltest:?epsabs:float->?max_iter:int->?report:(t->unit)->t->tendmoduleSMD:sigtypetvalcreate:?eps:float->?lambda:float->?mu:float->?eta0:vec->?nu0:vec->?kernel:Eval.Spec.Kernel.t->?sigma2:float->?inducing:Eval.Spec.Inducing.t->?n_rand_inducing:int->?learn_sigma2:bool->?hypers:Spec.Hyper.tarray->inputs:Eval.Spec.Inputs.t->targets:vec->unit->tvalstep:t->tvalgradient_norm:t->floatvalget_trained:t->Eval.Trained.tvalget_eta:t->vecvalget_nu:t->vecvaltest:?epsabs:float->?max_iter:int->?report:(t->unit)->t->tendend(*
(** Online learning *)
module Online : sig
type t
val sgd :
?capacity : int ->
?eta0 : float -> ?tau : float -> Spec.Eval.Kernel.t -> t
val smd :
?capacity : int ->
?eta0 : vec -> ?mu : float -> ?lam : float -> Spec.Eval.Kernel.t -> t
val train : t -> Spec.Eval.Input.t -> target : float -> t
val calc_mean_predictor : t -> Eval.Mean_predictor.t
val calc_co_variance_predictor : t -> Eval.Co_variance_predictor.t
end
*)endend(** Modules for global optimization *)moduletypeOptimizer=sig(** Sub-modules for learning without derivatives. *)moduleEval:Eval(** Sub-modules for global optimization. *)moduleOptimizer:sigmoduleSpec:Specs.OptimizerwithmoduleEval=Eval.Spectypetvalcreate:?max_memory:int->Spec.Eval.Kernel.t->tvallearn:t->(Spec.Eval.Input.t*float)array->tvalcalc_mpi_criterion:t->Spec.Eval.Input.t->floatvalcalc_mpi_deriv:t->Spec.Eval.Input.tendendend