package torch
PyTorch bindings for OCaml
Install
Dune Dependency
Authors
Maintainers
Sources
0.4.tar.gz
md5=9547e9e025dacd52e405ff699539c582
sha512=23fd9bef6f5f11c55171f2383a2f7ca57330511af6521a4579410e002d8667a91e764aecc2deb1cf8d7bf3b0e988cd3020850fa4a5a1ec713dfb110ec7352892
doc/src/torch.vision/mobilenet.ml.html
Source file mobilenet.ml
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
open Base open Torch let sub = Var_store.sub let relu6 xs = Tensor.(min (relu xs) (f 6.)) let conv_bn sub_vs ~ksize ~stride ~input_dim output_dim = let ksize, padding = match ksize with `k1 -> 1, 0 | `k3 -> 3, 1 in let conv = Layer.conv2d_ (sub_vs 0) ~ksize ~stride ~padding ~use_bias:false ~input_dim output_dim in let bn = Layer.batch_norm2d (sub_vs 1) output_dim in Layer.of_fn_ (fun xs ~is_training -> Layer.apply conv xs |> Layer.apply_ bn ~is_training ) let inverted_residual vs ~stride ~expand_ratio ~input_dim output_dim = let sub_vs ~base i = sub vs (Int.to_string (base + i)) in let = input_dim * expand_ratio in let use_residual = input_dim = output_dim && stride = 1 in let conv0 = if expand_ratio = 1 then Layer.id_ else Layer.fold_ [ conv_bn (sub_vs ~base:0) ~ksize:`k1 ~stride:1 ~input_dim hidden_dim ; Layer.of_fn_ (fun xs ~is_training:_ -> relu6 xs) ] in let base = if expand_ratio = 1 then 0 else 3 in let conv1 = conv_bn (sub_vs ~base) ~ksize:`k3 ~stride ~input_dim hidden_dim in let conv2 = conv_bn (sub_vs ~base:(base + 3)) ~ksize:`k1 ~stride:1 ~input_dim:hidden_dim output_dim in Layer.of_fn_ (fun xs ~is_training -> Layer.apply_ conv0 xs ~is_training |> Layer.apply_ conv1 ~is_training |> relu6 |> Layer.apply_ conv2 ~is_training |> fun ys -> if use_residual then Tensor.(xs + ys) else ys ) let v2 vs ~num_classes = let input_dim = 32 in let vs_features = sub vs "features" in let sub_vs d i = Var_store.(vs_features / Int.to_string d / Int.to_string i) in let initial_conv = conv_bn (sub_vs 0) ~ksize:`k3 ~stride:2 ~input_dim:3 input_dim in let last_dim, layers = let layer_idx = ref 0 in (* t, c, n, s *) [ 1, 16, 1, 1 ; 6, 24, 2, 2 ; 6, 32, 3, 2 ; 6, 64, 4, 2 ; 6, 96, 3, 1 ; 6, 160, 3, 2 ; 6, 320, 1, 1 ] |> List.fold_map ~init:input_dim ~f:(fun input_dim (t, c, nn, s) -> let layer = List.init nn ~f:(fun idx -> Int.incr layer_idx; let sub_vs = Var_store.(vs_features / Int.to_string !layer_idx / "conv") in let input_dim, stride = if idx = 0 then input_dim, s else c, 1 in inverted_residual sub_vs ~stride ~expand_ratio:t ~input_dim c ) |> Layer.fold_ in c, layer ) in let layers = Layer.fold_ layers in let final_conv = conv_bn (sub_vs 18) ~ksize:`k1 ~stride:1 ~input_dim last_dim in let final_linear = Layer.linear (sub (sub vs "classifier") "1") ~input_dim:last_dim num_classes in Layer.of_fn_ (fun xs ~is_training -> let batch_size = Tensor.shape xs |> List.hd_exn in Layer.apply_ initial_conv xs ~is_training |> relu6 |> Layer.apply_ layers ~is_training |> Layer.apply_ final_conv ~is_training |> relu6 |> Tensor.dropout ~p:0.2 ~is_training |> Tensor.view ~size:[batch_size; last_dim] |> Layer.apply final_linear )
sectionYPositions = computeSectionYPositions($el), 10)"
x-init="setTimeout(() => sectionYPositions = computeSectionYPositions($el), 10)"
>