Legend:
Page
Library
Module
Module type
Parameter
Class
Class type
Source
Source file owl_regression_generic.ml
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133# 1 "src/owl/optimise/owl_regression_generic.ml"(*
* OWL - OCaml Scientific and Engineering Computing
* Copyright (c) 2016-2019 Liang Wang <liang.wang@cl.cam.ac.uk>
*)moduleMake(Optimise:Owl_optimise_generic_sig.Sig)=structmoduleOptimise=OptimiseopenOptimiseopenOptimise.Algodiff(* iterative sovler for linear regression *)let_linear_regbiasparamsxy=lets=A.shapexinletl,m=s.(0),s.(1)inletn=A.col_numyinleto=ifbias=truethenm+1elseminletx=ifbias=truethenA.concatenate~axis:1[|x;A.ones[|l;1|]|]elsexin(* initialise the matrices according to fan_in/out *)letr=1./.(float_of_into)inletp=ArrA.(uniform~a:(float_to_elt(-.r))~b:(float_to_eltr)[|o;n|])in(* make the function to minimise *)letfwx=letw=Mat.reshapeonwinMaths.(x*@w)in(* get the result, reshape, then return *)letw=minimise_weightparamsf(Maths.flattenp)(Arrx)(Arry)|>snd|>Mat.reshapeon|>unpack_arrinmatchbiaswith|true->A.split~axis:0[|m;1|]w|false->[|w|]letols?(i=false)xy=letparams=Params.config~batch:(Batch.Full)~learning_rate:(Learning_Rate.Adagrad1.)~gradient:(Gradient.GD)~loss:(Loss.Quadratic)~verbosity:false~stopping:(Stopping.Const1e-16)1000.in_linear_regiparamsxyletridge?(i=false)?(alpha=0.001)xy=letparams=Params.config~batch:(Batch.Full)~learning_rate:(Learning_Rate.Adagrad1.)~gradient:(Gradient.GD)~loss:(Loss.Quadratic)~regularisation:(Regularisation.L2normalpha)~verbosity:false~stopping:(Stopping.Const1e-16)1000.in_linear_regiparamsxyletlasso?(i=false)?(alpha=0.001)xy=letparams=Params.config~batch:(Batch.Full)~learning_rate:(Learning_Rate.Adagrad1.)~gradient:(Gradient.GD)~loss:(Loss.Quadratic)~regularisation:(Regularisation.L1normalpha)~verbosity:false~stopping:(Stopping.Const1e-16)1000.in_linear_regiparamsxyletelastic_net?(i=false)?(alpha=1.0)?(l1_ratio=0.5)xy=leta=alpha*.l1_ratioinletb=alpha*.(1.-.l1_ratio)/.2.inletparams=Params.config~batch:(Batch.Full)~learning_rate:(Learning_Rate.Adagrad1.)~gradient:(Gradient.GD)~loss:(Loss.Quadratic)~regularisation:(Regularisation.Elastic_net(a,b))~verbosity:false~stopping:(Stopping.Const1e-16)1000.in_linear_regiparamsxyletsvm?(i=false)?(a=0.001)xy=letparams=Params.config~batch:(Batch.Full)~learning_rate:(Learning_Rate.Adagrad1.)~gradient:(Gradient.GD)~loss:(Loss.Hinge)~regularisation:(Regularisation.L2norma)~verbosity:true~stopping:(Stopping.Const1e-16)1000.in_linear_regiparamsxyletlogistic?(i=false)xy=letparams=Params.config~batch:(Batch.Full)~learning_rate:(Learning_Rate.Adagrad1.)~gradient:(Gradient.GD)~loss:(Loss.Cross_entropy)~verbosity:false~stopping:(Stopping.Const1e-16)1000.in_linear_regiparamsxyletexponential?(_i=false)xy=leta=Owl_stats.std_uniform_rvs()inletl=Owl_stats.std_uniform_rvs()inletb=Owl_stats.std_uniform_rvs()inletfwx=leta=Mat.getw00inletl=Mat.getw01inletb=Mat.getw02inMaths.(a*exp(negl*x)+b)inletparams=Params.config~batch:(Batch.Full)~learning_rate:(Learning_Rate.Const0.1)~gradient:(Gradient.Newton)~loss:(Loss.Quadratic)~verbosity:false~stopping:(Stopping.Const1e-16)1000.inleta,l,b=A.(float_to_elta,float_to_eltl,float_to_eltb)inletw=minimise_weightparamsf(Mat.of_arrays[|[|a;l;b|]|])(Arrx)(Arry)|>snd|>unpack_arrinA.(getw[|0;0|],getw[|0;1|],getw[|0;2|])letpolyxyn=letz=Array.init(n+1)(funi->A.(pow_scalarx(float_of_inti|>float_to_elt)))inletx=A.concatenate~axis:1zinletparams=Params.config~batch:(Batch.Full)~learning_rate:(Learning_Rate.Const1.)~gradient:(Gradient.Newton)~loss:(Loss.Quadratic)~verbosity:false~stopping:(Stopping.Const1e-16)100.in(_linear_regfalseparamsxy).(0)end