Organisationsahrefsocannl77b2ea ()(lint-fmt)

(lint-fmt)

Link Copied
Code Copied

Logs

2025-12-11 12:35.00: New job: test ahrefs/ocannl https://github.com/ahrefs/ocannl.git#refs/heads/master (77b2ea7e4d82e46e825b57eba2c90f1ebdce76f2) (linux-x86_64:(lint-fmt))
Base: ocaml/opam:debian-13-ocaml-4.08@sha256:e3cc4e8fe5c00f48c72a719e3551b1d8a51c2862349a0f7507e8aa29fdf72321
ocamlformat version: version 0.28.1 (from opam)


To reproduce locally:


git clone --recursive "https://github.com/ahrefs/ocannl.git" -b "master" && cd "ocannl" && git reset --hard 77b2ea7e
cat > Dockerfile <<'END-OF-DOCKERFILE'
FROM ocaml/opam:debian-13-ocaml-4.08@sha256:e3cc4e8fe5c00f48c72a719e3551b1d8a51c2862349a0f7507e8aa29fdf72321
USER 1000:1000
RUN cd ~/opam-repository && (git cat-file -e 6c1b38620288b5bf349067f089a7b1fc91185d94 || git fetch origin master) && git reset -q --hard 6c1b38620288b5bf349067f089a7b1fc91185d94 && git log --no-decorate -n1 --oneline && opam update -u
RUN opam depext -i dune
WORKDIR /src
RUN opam depext -i ocamlformat=0.28.1
COPY --chown=1000:1000 . /src/
RUN opam exec -- dune build @fmt --ignore-promoted-rules || (echo "dune build @fmt failed"; exit 2)


END-OF-DOCKERFILE
docker build .
END-REPRO-BLOCK


2025-12-11 12:35.00: Using cache hint "ahrefs/ocannl-ocaml/opam:debian-13-ocaml-4.08@sha256:e3cc4e8fe5c00f48c72a719e3551b1d8a51c2862349a0f7507e8aa29fdf72321-debian-13-4.08_opam-2.4-ocamlformat-6c1b38620288b5bf349067f089a7b1fc91185d94"
2025-12-11 12:35.00: Using OBuilder spec:
((from ocaml/opam:debian-13-ocaml-4.08@sha256:e3cc4e8fe5c00f48c72a719e3551b1d8a51c2862349a0f7507e8aa29fdf72321)
(user (uid 1000) (gid 1000))
(run (cache (opam-archives (target /home/opam/.opam/download-cache)))
(network host)
(shell "cd ~/opam-repository && (git cat-file -e 6c1b38620288b5bf349067f089a7b1fc91185d94 || git fetch origin master) && git reset -q --hard 6c1b38620288b5bf349067f089a7b1fc91185d94 && git log --no-decorate -n1 --oneline && opam update -u"))
(run (cache (opam-archives (target /home/opam/.opam/download-cache)))
(network host)
(shell "opam depext -i dune"))
(workdir /src)
(run (cache (opam-archives (target /home/opam/.opam/download-cache)))
(network host)
(shell "opam depext -i ocamlformat=0.28.1"))
(copy (src .) (dst /src/))
(run (shell "opam exec -- dune build @fmt --ignore-promoted-rules || (echo \"dune build @fmt failed\"; exit 2)"))
)


2025-12-11 12:35.00: Waiting for resource in pool OCluster
2025-12-11 12:35.00: Waiting for worker…
2025-12-11 12:38.55: Got resource from pool OCluster
Building on bremusa.ocamllabs.io
All commits already cached
HEAD is now at 77b2ea7e Document projection slot detection by naming convention in ppx_cd


(from ocaml/opam:debian-13-ocaml-4.08@sha256:e3cc4e8fe5c00f48c72a719e3551b1d8a51c2862349a0f7507e8aa29fdf72321)
2025-12-11 12:38.57 ---> using "d458486dd7823c592e7ea9c88366c5f90e1939c3b51f3abbd6760272096f8a3e" from cache


/: (user (uid 1000) (gid 1000))


/: (run (cache (opam-archives (target /home/opam/.opam/download-cache)))
(network host)
(shell "cd ~/opam-repository && (git cat-file -e 6c1b38620288b5bf349067f089a7b1fc91185d94 || git fetch origin master) && git reset -q --hard 6c1b38620288b5bf349067f089a7b1fc91185d94 && git log --no-decorate -n1 --oneline && opam update -u"))
6c1b386202 Merge pull request #28774 from Julow/release-ocamlformat-0.28.1


<><> Updating package repositories ><><><><><><><><><><><><><><><><><><><><><><>
[default] Initialised
default (at git+file:///home/opam/opam-repository):
[INFO] opam 2.1 and 2.2 include many performance and security improvements over 2.0; please consider upgrading (https://opam.ocaml.org/doc/Install.html)


Everything as up-to-date as possible (run with --verbose to show unavailable upgrades).
However, you may "opam upgrade" these packages explicitly, which will ask permission to downgrade or uninstall the conflicting packages.
Nothing to do.
# Run eval $(opam env) to update the current shell environment
2025-12-11 12:38.57 ---> using "a7d3c7d9f6aff7dc059c465a33e7ef3fda4b4a1ee9c79bef8645b5cd4da72b96" from cache


/: (run (cache (opam-archives (target /home/opam/.opam/download-cache)))
(network host)
(shell "opam depext -i dune"))
# Detecting depexts using vars: arch=x86_64, os=linux, os-distribution=debian, os-family=debian
# No extra OS packages requirements found.
# All required OS packages found.
# Now letting opam install the packages
The following actions will be performed:
- install dune 3.20.2


<><> Gathering sources ><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
[dune.3.20.2] found in cache


<><> Processing actions <><><><><><><><><><><><><><><><><><><><><><><><><><><><>
-> installed dune.3.20.2
Done.
# Run eval $(opam env) to update the current shell environment
2025-12-11 12:38.57 ---> using "b8799a0f87a66bd49a9341889a0027044c03db80ad17a5edb3adaf72f166d8fd" from cache


/: (workdir /src)


/src: (run (cache (opam-archives (target /home/opam/.opam/download-cache)))
(network host)
(shell "opam depext -i ocamlformat=0.28.1"))
# Detecting depexts using vars: arch=x86_64, os=linux, os-distribution=debian, os-family=debian
# No extra OS packages requirements found.
# All required OS packages found.
# Now letting opam install the packages
The following actions will be performed:
- install sexplib0          v0.14.0  [required by base]
- install ocamlbuild        0.16.1   [required by fpath, astring, uuseg]
- install either            1.0.0    [required by ocamlformat-lib]
- install menhirLib         20250912 [required by ocamlformat-lib]
- install csexp             1.5.2    [required by ocamlformat]
- install camlp-streams     5.0.1    [required by ocamlformat-lib]
- install seq               base     [required by re]
- install menhirSdk         20250912 [required by ocamlformat-lib]
- install fix               20250919 [required by ocamlformat-lib]
- install menhirCST         20250912 [required by menhir]
- install ocamlfind         1.9.8    [required by ocp-indent, astring, fpath, uuseg]
- install dune-build-info   3.20.2   [required by ocamlformat-lib]
- install cmdliner          2.0.0    [required by ocamlformat]
- install ocaml-version     4.0.3    [required by ocamlformat-lib]
- install dune-configurator 3.20.2   [required by base]
- install re                1.11.0   [required by ocamlformat]
- install menhir            20250912 [required by ocamlformat-lib]
- install topkg             1.1.1    [required by fpath, astring, uuseg]
- install ocp-indent        1.9.0    [required by ocamlformat-lib]
- install base              v0.14.3  [required by ocamlformat-lib]
- install uutf              1.0.4    [required by ocamlformat-lib]
- install astring           0.8.5    [required by ocamlformat-lib]
- install stdio             v0.14.0  [required by ocamlformat-lib]
- install uucp              15.0.0   [required by uuseg]
- install fpath             0.7.3    [required by ocamlformat-lib]
- install uuseg             15.0.0   [required by ocamlformat-lib]
- install ocamlformat-lib   0.28.1   [required by ocamlformat]
- install ocamlformat       0.28.1
===== 28 to install =====


<><> Gathering sources ><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
[astring.0.8.5] found in cache
[base.v0.14.3] found in cache
[camlp-streams.5.0.1] found in cache
[cmdliner.2.0.0] found in cache
[csexp.1.5.2] found in cache
[dune-build-info.3.20.2] found in cache
[dune-configurator.3.20.2] found in cache
[either.1.0.0] found in cache
[fix.20250919] found in cache
[fpath.0.7.3] found in cache
[menhir.20250912] found in cache
[menhirCST.20250912] found in cache
[menhirLib.20250912] found in cache
[menhirSdk.20250912] found in cache
[ocaml-version.4.0.3] found in cache
[ocamlbuild.0.16.1] found in cache
[ocamlfind.1.9.8] found in cache
[ocamlformat.0.28.1] found in cache
[ocamlformat-lib.0.28.1] found in cache
[ocp-indent.1.9.0] found in cache
[re.1.11.0] found in cache
[sexplib0.v0.14.0] found in cache
[stdio.v0.14.0] found in cache
[topkg.1.1.1] found in cache
[uucp.15.0.0] found in cache
[uuseg.15.0.0] found in cache
[uutf.1.0.4] found in cache


<><> Processing actions <><><><><><><><><><><><><><><><><><><><><><><><><><><><>
-> installed seq.base
-> installed camlp-streams.5.0.1
-> installed csexp.1.5.2
-> installed either.1.0.0
-> installed fix.20250919
-> installed menhirCST.20250912
-> installed menhirLib.20250912
-> installed menhirSdk.20250912
-> installed ocaml-version.4.0.3
-> installed sexplib0.v0.14.0
-> installed cmdliner.2.0.0
-> installed re.1.11.0
-> installed dune-build-info.3.20.2
-> installed dune-configurator.3.20.2
-> installed ocamlfind.1.9.8
-> installed ocp-indent.1.9.0
-> installed ocamlbuild.0.16.1
-> installed base.v0.14.3
-> installed topkg.1.1.1
-> installed stdio.v0.14.0
-> installed uutf.1.0.4
-> installed astring.0.8.5
-> installed menhir.20250912
-> installed fpath.0.7.3
-> installed uucp.15.0.0
-> installed uuseg.15.0.0
-> installed ocamlformat-lib.0.28.1
-> installed ocamlformat.0.28.1
Done.


<><> ocp-indent.1.9.0 installed successfully ><><><><><><><><><><><><><><><><><>
=> This package requires additional configuration for use in editors. Install package 'user-setup', or manually:


* for Emacs, add these lines to ~/.emacs:
(add-to-list 'load-path "/home/opam/.opam/4.08/share/emacs/site-lisp")
(require 'ocp-indent)


* for Vim, add this line to ~/.vimrc:
set rtp^="/home/opam/.opam/4.08/share/ocp-indent/vim"
# Run eval $(opam env) to update the current shell environment
2025-12-11 12:38.57 ---> using "7b71439e6ac9917292b28dc59ecc075d01d03dd2ef11c372809a6cf99e594a22" from cache


/src: (copy (src .) (dst /src/))
2025-12-11 12:38.58 ---> saved as "20879b91aeb708c93abf36755f39e4eb78437131a8bcaa5fd6692b2fbd36d7e6"


/src: (run (shell "opam exec -- dune build @fmt --ignore-promoted-rules || (echo \"dune build @fmt failed\"; exit 2)"))
Warning: Invalid documentation comment:
File "tensor/einsum_types.ml", line 38, characters 0-0:
End of text is not allowed in '[...]' (code).
File "datasets/circles.ml", line 1, characters 0-0:
diff --git a/_build/default/datasets/circles.ml b/_build/default/datasets/.formatted/circles.ml
index 1c640a3..4fae3df 100644
--- a/_build/default/datasets/circles.ml
+++ b/_build/default/datasets/.formatted/circles.ml
@@ -11,21 +11,19 @@ module Config = struct
seed : int option;  (** Optional random seed for reproducibility *)
}


-  let default =
-    { image_size = 32; max_radius = 8; min_radius = 2; max_circles = 5; seed = None }
+  let default = { image_size = 32; max_radius = 8; min_radius = 2; max_circles = 5; seed = None }
end


module Random = Rand.Random_for_tests


-(** Draw a filled circle on the image at (cx, cy) with radius r.
-    Values are clamped to [0, 1] range. *)
+(** Draw a filled circle on the image at (cx, cy) with radius r. Values are clamped to [0, 1] range.
+*)
let draw_circle ~image_size image cx cy r =
for y = 0 to image_size - 1 do
for x = 0 to image_size - 1 do
let dx = x - cx in
let dy = y - cy in
-      if (dx * dx) + (dy * dy) <= r * r then
-        Genarray.set image [| y; x; 0 |] 1.0
+      if (dx * dx) + (dy * dy) <= r * r then Genarray.set image [| y; x; 0 |] 1.0
done
done


@@ -36,7 +34,8 @@ let draw_circle ~image_size image cx cy r =
@param len Number of images to generate
@return
A tuple of (images, labels) where:
-      - images is a bigarray of shape [len; image_size; image_size; 1] (batch, height, width, channels)
+      - images is a bigarray of shape [len; image_size; image_size; 1] (batch, height, width,
+        channels)
- labels is a bigarray of shape [len; 1] (batch, output) containing the circle count *)
let generate_with_kind kind ?(config = Config.default) ~len () =
(match config.seed with Some seed -> Random.init seed | None -> ());
File "tensor/einsum_types.ml", line 1, characters 0-0:
diff --git a/_build/default/tensor/einsum_types.ml b/_build/default/tensor/.formatted/einsum_types.ml
index 084d9ac..e357e66 100644
--- a/_build/default/tensor/einsum_types.ml
+++ b/_build/default/tensor/.formatted/einsum_types.ml
@@ -4,18 +4,16 @@


open Base


-(** Use_padding specification for convolutions. *)
type use_padding_spec = [ `True | `False | `Unspecified ] [@@deriving compare, sexp]
+(** Use_padding specification for convolutions. *)


-(** Convolution component for affine axis specifications.
-    Note: [dilation] is a string because it can be an identifier at parse time,
-    and is resolved to an int at runtime. *)
type conv_spec = { dilation : string; kernel_label : string; use_padding : use_padding_spec }
[@@deriving compare, sexp]
+(** Convolution component for affine axis specifications. Note: [dilation] is a string because it
+    can be an identifier at parse time, and is resolved to an int at runtime. *)


-(** Specification for individual axes in the einsum notation.
-    Note: [stride] is a string because it can be an identifier at parse time,
-    and is resolved to an int at runtime. *)
+(** Specification for individual axes in the einsum notation. Note: [stride] is a string because it
+    can be an identifier at parse time, and is resolved to an int at runtime. *)
type axis_spec =
| Label of string  (** A variable axis label. *)
| Fixed_index of int  (** A fixed index, used for projection. *)
@@ -25,8 +23,8 @@ type axis_spec =
conv : conv_spec option;  (** Optional convolution: dilation*kernel. *)
stride_offset : int;  (** Constant offset added after stride*over. *)
}
-      (** Affine axis specification: stride*over + stride_offset [+ dilation*kernel].
-          Corresponds to [Row.Affine] in shape inference. *)
+      (** Affine axis specification: stride*over + stride_offset [+ dilation*kernel]. Corresponds to
+          [Row.Affine] in shape inference. *)
[@@deriving compare, sexp]


(** An index pointing to any of a shape's axes, including the kind of the axis ([Batch, Input,
@@ -75,8 +73,8 @@ type parsed_axis_labels = {
(** The labels are strings assigned to [AxisKey] axes. Moreover the [bcast_] fields represent
whether additional leading/middle axes are allowed (corresponding to the dot-ellipsis syntax for
broadcasting). The string can be used to identify a row variable, and defaults to ["batch"],
-    ["input"], ["output"] respectively when parsing ["..."]. The [given_] fields are lists of
-    axis specs of the corresponding kind in [labels] where [from_end=true], [given_beg_] where
+    ["input"], ["output"] respectively when parsing ["..."]. The [given_] fields are lists of axis
+    specs of the corresponding kind in [labels] where [from_end=true], [given_beg_] where
[from_end=false]. *)


let axis_labels parsed = parsed.labels
File "datasets/rand.ml", line 1, characters 0-0:
diff --git a/_build/default/datasets/rand.ml b/_build/default/datasets/.formatted/rand.ml
index 22f8f7f..84ab9a6 100644
--- a/_build/default/datasets/rand.ml
+++ b/_build/default/datasets/.formatted/rand.ml
@@ -24,6 +24,7 @@ module Random_for_tests : Random = struct
(raw /. 10000. *. (high -. low)) +. low


let char () = Char.chr @@ Int32.(to_int @@ rem (rand_int32 ()) 256l)
+
let int high =
(* Use abs to handle negative random values from xor-shift RNG *)
Int32.(to_int @@ rem (abs (rand_int32 ())) @@ of_int high)
File "tensor/shape.mli", line 1, characters 0-0:
diff --git a/_build/default/tensor/shape.mli b/_build/default/tensor/.formatted/shape.mli
index d7533f0..b3e6eba 100644
--- a/_build/default/tensor/shape.mli
+++ b/_build/default/tensor/.formatted/shape.mli
@@ -49,8 +49,9 @@


Adding [<] after the output label (e.g., [stride*output<+kernel]) indicates no-padding mode,
where indices must stay within the input bounds. In this mode, the input dimension must satisfy:
-    [(input - effective_kernel_span) mod stride = 0], where [effective_kernel_span = 1 + (kernel - 1) * dilation].
-    Without [<], padding is applied and there is no such divisibility constraint.
+    [(input - effective_kernel_span) mod stride = 0], where
+    [effective_kernel_span = 1 + (kernel - 1) * dilation]. Without [<], padding is applied and there
+    is no such divisibility constraint.


Note: currently, OCANNL shapes always allow broadcasting. Row variables track the broadcasted
axes -- if there is no row variable, broadcasted axes are not tracked. In the notation case
@@ -242,9 +243,9 @@ val to_padding : t -> (Ir.Ops.axis_padding array * float) option
val propagate_shapes : update_step -> unit


val get_projections : update_step -> Ir.Indexing.projections
-(** Returns the projections for this update step, computing them if not already done.
-    This triggers [finish_inference] and then retrieves the projections from
-    [unsafe_projections]. Use this instead of [derive_projections] directly. *)
+(** Returns the projections for this update step, computing them if not already done. This triggers
+    [finish_inference] and then retrieves the projections from [unsafe_projections]. Use this
+    instead of [derive_projections] directly. *)


val of_spec : ?deduced:deduce_within_shape -> debug_name:string -> id:int -> string -> t
val default_display_indices : t -> int array
@@ -253,5 +254,5 @@ val to_labels : t -> string array
(** Uses the matrix convention of putting the input axes last. *)


val parse_n5_layout : string -> int array
-(** Parse a N5_layout priority string (e.g., "0,1,2") into display indices.
-    Only supports integer labels (Fixed_index). *)
+(** Parse a N5_layout priority string (e.g., "0,1,2") into display indices. Only supports integer
+    labels (Fixed_index). *)
File "lib/nn_blocks.ml", line 1, characters 0-0:
diff --git a/_build/default/lib/nn_blocks.ml b/_build/default/lib/.formatted/nn_blocks.ml
index 470123e..ace55f8 100644
--- a/_build/default/lib/nn_blocks.ml
+++ b/_build/default/lib/.formatted/nn_blocks.ml
@@ -231,8 +231,9 @@ let%op transformer_with_loss ~label:_ ~model () ~train_step ~src ~tgt_input ~tgt


When [use_padding=true], there is no such restriction and output size is [input_size / stride].


-    @param out_channels Optional number of output channels. If not provided, must be inferred from
-    context (e.g., from a downstream operation that constrains the output shape). *)
+    @param out_channels
+      Optional number of output channels. If not provided, must be inferred from context (e.g., from
+      a downstream operation that constrains the output shape). *)
let%op conv2d ~label ?(kernel_size = 3) ?(stride = 1) ?(use_padding = true) ?out_channels () x =
(* Notation: kernel height (kh), kernel width (kw), input channels (ic), output channels (oc),
output height (oh), output width (ow) *)
@@ -268,7 +269,8 @@ let%op depthwise_separable_conv2d ~label ?(kernel_size = 3) ?(stride = 1) ?(use_
(** Max pooling for 2D spatial data - reduces spatial dimensions by taking maximum values.


The input spatial dimensions must satisfy: [(input_size - window_size) mod stride = 0],
-    otherwise shape inference will fail. The output size is [(input_size - window_size) / stride + 1].
+    otherwise shape inference will fail. The output size is
+    [(input_size - window_size) / stride + 1].


Note: The [<] in the einsum spec indicates no-padding mode (indices stay within bounds). *)
let%op max_pool2d ?(stride = 2) ?(window_size = 2) () x =
File "test/einsum/test_einsum_parser.ml", line 1, characters 0-0:
diff --git a/_build/default/test/einsum/test_einsum_parser.ml b/_build/default/test/einsum/.formatted/test_einsum_parser.ml
index c305169..ac88c8a 100644
--- a/_build/default/test/einsum/test_einsum_parser.ml
+++ b/_build/default/test/einsum/.formatted/test_einsum_parser.ml
@@ -12,8 +12,7 @@ let test_single_char () =
(* Test 2: With batch and input *)
let spec2 = "b|i->o" in
let labels2 = Einsum_parser.axis_labels_of_spec spec2 in
-  printf "  'b|i->o' -> batch:%d input:%d output:%d\n"
-    (List.length labels2.given_batch)
+  printf "  'b|i->o' -> batch:%d input:%d output:%d\n" (List.length labels2.given_batch)
(List.length labels2.given_input)
(List.length labels2.given_output);


@@ -21,13 +20,9 @@ let test_single_char () =
let spec3 = "ij;jk=>ik" in
let l1, l2_opt, l3 = Einsum_parser.einsum_of_spec spec3 in
let l2 = Option.value_exn l2_opt in
-  printf "  'ij;jk=>ik' -> (%d,%d);(%d,%d)=>(%d,%d)\n"
-    (List.length l1.given_input)
-    (List.length l1.given_output)
-    (List.length l2.given_input)
-    (List.length l2.given_output)
-    (List.length l3.given_input)
-    (List.length l3.given_output);
+  printf "  'ij;jk=>ik' -> (%d,%d);(%d,%d)=>(%d,%d)\n" (List.length l1.given_input)
+    (List.length l1.given_output) (List.length l2.given_input) (List.length l2.given_output)
+    (List.length l3.given_input) (List.length l3.given_output);


printf "\n"


File "test/einsum/test_conv_syntax.ml", line 1, characters 0-0:
diff --git a/_build/default/test/einsum/test_conv_syntax.ml b/_build/default/test/einsum/.formatted/test_conv_syntax.ml
index bb97681..028dbb4 100644
--- a/_build/default/test/einsum/test_conv_syntax.ml
+++ b/_build/default/test/einsum/.formatted/test_conv_syntax.ml
@@ -8,43 +8,50 @@ let test_conv_parsing () =
let spec1 = "2*o+3*k" in
let labels1 = Einsum_parser.axis_labels_of_spec spec1 in
printf "Test 1: Parsed '%s' successfully\n%!" spec1;
-  printf "  Structure: %s\n\n%!" (Sexp.to_string_hum (Einsum_parser.sexp_of_parsed_axis_labels labels1));
+  printf "  Structure: %s\n\n%!"
+    (Sexp.to_string_hum (Einsum_parser.sexp_of_parsed_axis_labels labels1));


(* Test 2: Simple conv expression without coefficients (multichar - requires commas) *)
let spec2 = "o+k" in
let labels2 = Einsum_parser.axis_labels_of_spec spec2 in
printf "Test 2: Parsed '%s' successfully\n%!" spec2;
-  printf "  Structure: %s\n\n%!" (Sexp.to_string_hum (Einsum_parser.sexp_of_parsed_axis_labels labels2));
+  printf "  Structure: %s\n\n%!"
+    (Sexp.to_string_hum (Einsum_parser.sexp_of_parsed_axis_labels labels2));


(* Test 3: Mixed spec with comma (multichar mode) *)
let spec3 = "a, 2*b+c" in
let labels3 = Einsum_parser.axis_labels_of_spec spec3 in
printf "Test 3: Parsed '%s' successfully\n%!" spec3;
-  printf "  Structure: %s\n\n%!" (Sexp.to_string_hum (Einsum_parser.sexp_of_parsed_axis_labels labels3));
+  printf "  Structure: %s\n\n%!"
+    (Sexp.to_string_hum (Einsum_parser.sexp_of_parsed_axis_labels labels3));


(* Test 4: Conv expression with multiple identifiers (multichar - requires commas) *)
let spec4 = "i, o+k, j" in
let labels4 = Einsum_parser.axis_labels_of_spec spec4 in
printf "Test 4: Parsed '%s' successfully (multichar mode)\n%!" spec4;
-  printf "  Structure: %s\n\n%!" (Sexp.to_string_hum (Einsum_parser.sexp_of_parsed_axis_labels labels4));
+  printf "  Structure: %s\n\n%!"
+    (Sexp.to_string_hum (Einsum_parser.sexp_of_parsed_axis_labels labels4));


(* Test 5: Conv expression with multi-char identifiers (multichar) *)
let spec5 = "a+bc" in
let labels5 = Einsum_parser.axis_labels_of_spec spec5 in
printf "Test 5: Parsed '%s' successfully (multichar mode)\n%!" spec5;
-  printf "  Structure: %s\n\n%!" (Sexp.to_string_hum (Einsum_parser.sexp_of_parsed_axis_labels labels5));
+  printf "  Structure: %s\n\n%!"
+    (Sexp.to_string_hum (Einsum_parser.sexp_of_parsed_axis_labels labels5));


(* Test 6: Test in einsum notation with multichar conv *)
let spec6 = "i, j -> 2*i+j" in
let labels6 = Einsum_parser.axis_labels_of_spec spec6 in
printf "Test 6: Parsed '%s' successfully\n%!" spec6;
-  printf "  Structure: %s\n\n%!" (Sexp.to_string_hum (Einsum_parser.sexp_of_parsed_axis_labels labels6));
+  printf "  Structure: %s\n\n%!"
+    (Sexp.to_string_hum (Einsum_parser.sexp_of_parsed_axis_labels labels6));


(* Test 7: Complex batch-input-output spec with conv *)
let spec7 = "batch|input->3*output+1*kernel," in
let labels7 = Einsum_parser.axis_labels_of_spec spec7 in
printf "Test 7: Parsed '%s' successfully\n%!" spec7;
-  printf "  Structure: %s\n\n%!" (Sexp.to_string_hum (Einsum_parser.sexp_of_parsed_axis_labels labels7));
+  printf "  Structure: %s\n\n%!"
+    (Sexp.to_string_hum (Einsum_parser.sexp_of_parsed_axis_labels labels7));


printf "All conv syntax parsing tests passed!\n%!"


@@ -55,25 +62,29 @@ let test_strided_iteration_parsing () =
let spec1 = "2*output" in
let labels1 = Einsum_parser.axis_labels_of_spec spec1 in
printf "Test 1: Parsed strided iteration '%s' successfully\n%!" spec1;
-  printf "  Structure: %s\n\n%!" (Sexp.to_string_hum (Einsum_parser.sexp_of_parsed_axis_labels labels1));
+  printf "  Structure: %s\n\n%!"
+    (Sexp.to_string_hum (Einsum_parser.sexp_of_parsed_axis_labels labels1));


(* Test 2: Strided iteration with single-char identifier (multichar mode) *)
let spec2 = "3*i" in
let labels2 = Einsum_parser.axis_labels_of_spec spec2 in
printf "Test 2: Parsed strided iteration '%s' successfully\n%!" spec2;
-  printf "  Structure: %s\n\n%!" (Sexp.to_string_hum (Einsum_parser.sexp_of_parsed_axis_labels labels2));
+  printf "  Structure: %s\n\n%!"
+    (Sexp.to_string_hum (Einsum_parser.sexp_of_parsed_axis_labels labels2));


(* Test 3: Strided iteration in einsum context (multichar due to multiplication) *)
let spec3 = "input -> 2*output" in
let labels3 = Einsum_parser.axis_labels_of_spec spec3 in
printf "Test 3: Parsed einsum with strided iteration '%s' successfully\n%!" spec3;
-  printf "  Structure: %s\n\n%!" (Sexp.to_string_hum (Einsum_parser.sexp_of_parsed_axis_labels labels3));
+  printf "  Structure: %s\n\n%!"
+    (Sexp.to_string_hum (Einsum_parser.sexp_of_parsed_axis_labels labels3));


(* Test 4: Mixed regular labels and strided iteration (multichar due to comma) *)
let spec4 = "regular, 3*strided" in
let labels4 = Einsum_parser.axis_labels_of_spec spec4 in
printf "Test 4: Parsed mixed labels with strided iteration '%s' successfully\n%!" spec4;
-  printf "  Structure: %s\n\n%!" (Sexp.to_string_hum (Einsum_parser.sexp_of_parsed_axis_labels labels4));
+  printf "  Structure: %s\n\n%!"
+    (Sexp.to_string_hum (Einsum_parser.sexp_of_parsed_axis_labels labels4));


printf "\nAll strided iteration parsing tests completed!\n%!"


@@ -138,37 +149,43 @@ let test_use_padding_syntax () =
let spec1 = "o=+k" in
let labels1 = Einsum_parser.axis_labels_of_spec spec1 in
printf "Test 1: Parsed '%s' (use_padding=true)\n%!" spec1;
-  printf "  Structure: %s\n\n%!" (Sexp.to_string_hum (Einsum_parser.sexp_of_parsed_axis_labels labels1));
+  printf "  Structure: %s\n\n%!"
+    (Sexp.to_string_hum (Einsum_parser.sexp_of_parsed_axis_labels labels1));


(* Test 2: use_padding=false with < syntax *)
let spec2 = "o<+k" in
let labels2 = Einsum_parser.axis_labels_of_spec spec2 in
printf "Test 2: Parsed '%s' (use_padding=false)\n%!" spec2;
-  printf "  Structure: %s\n\n%!" (Sexp.to_string_hum (Einsum_parser.sexp_of_parsed_axis_labels labels2));
+  printf "  Structure: %s\n\n%!"
+    (Sexp.to_string_hum (Einsum_parser.sexp_of_parsed_axis_labels labels2));


(* Test 3: use_padding with stride *)
let spec3 = "2*o=+k" in
let labels3 = Einsum_parser.axis_labels_of_spec spec3 in
printf "Test 3: Parsed '%s' (stride with use_padding=true)\n%!" spec3;
-  printf "  Structure: %s\n\n%!" (Sexp.to_string_hum (Einsum_parser.sexp_of_parsed_axis_labels labels3));
+  printf "  Structure: %s\n\n%!"
+    (Sexp.to_string_hum (Einsum_parser.sexp_of_parsed_axis_labels labels3));


(* Test 4: use_padding with dilation *)
let spec4 = "o<+3*k" in
let labels4 = Einsum_parser.axis_labels_of_spec spec4 in
printf "Test 4: Parsed '%s' (dilation with use_padding=false)\n%!" spec4;
-  printf "  Structure: %s\n\n%!" (Sexp.to_string_hum (Einsum_parser.sexp_of_parsed_axis_labels labels4));
+  printf "  Structure: %s\n\n%!"
+    (Sexp.to_string_hum (Einsum_parser.sexp_of_parsed_axis_labels labels4));


(* Test 5: use_padding with stride and dilation *)
let spec5 = "2*o=+3*k" in
let labels5 = Einsum_parser.axis_labels_of_spec spec5 in
printf "Test 5: Parsed '%s' (stride, dilation, use_padding=true)\n%!" spec5;
-  printf "  Structure: %s\n\n%!" (Sexp.to_string_hum (Einsum_parser.sexp_of_parsed_axis_labels labels5));
+  printf "  Structure: %s\n\n%!"
+    (Sexp.to_string_hum (Einsum_parser.sexp_of_parsed_axis_labels labels5));


(* Test 6: unspecified use_padding (legacy syntax) *)
let spec6 = "o+k" in
let labels6 = Einsum_parser.axis_labels_of_spec spec6 in
printf "Test 6: Parsed '%s' (unspecified use_padding)\n%!" spec6;
-  printf "  Structure: %s\n\n%!" (Sexp.to_string_hum (Einsum_parser.sexp_of_parsed_axis_labels labels6));
+  printf "  Structure: %s\n\n%!"
+    (Sexp.to_string_hum (Einsum_parser.sexp_of_parsed_axis_labels labels6));


printf "All use_padding syntax tests completed!\n%!"


File "test/einsum/test_conv_padding.ml", line 1, characters 0-0:
diff --git a/_build/default/test/einsum/test_conv_padding.ml b/_build/default/test/einsum/.formatted/test_conv_padding.ml
index 2415a61..bbdc511 100644
--- a/_build/default/test/einsum/test_conv_padding.ml
+++ b/_build/default/test/einsum/.formatted/test_conv_padding.ml
@@ -128,16 +128,15 @@ let test_conv2d_stride_with_padding_backprop () =


(** Test conv2d with stride=2 and use_padding=false.


-    With stride=2 and use_padding=false, output dims are (input - kernel) / stride + 1.
-    IMPORTANT: For no-padding convolutions, (input - kernel) must be divisible by stride.
-    For 9x9 input, kernel_size=3, stride=2: (9-3)/2 + 1 = 4, so output should be 4x4. *)
+    With stride=2 and use_padding=false, output dims are (input - kernel) / stride + 1. IMPORTANT:
+    For no-padding convolutions, (input - kernel) must be divisible by stride. For 9x9 input,
+    kernel_size=3, stride=2: (9-3)/2 + 1 = 4, so output should be 4x4. *)
let test_conv2d_stride_without_padding () =
printf "Testing conv2d with stride=2 and use_padding=false...\n%!";
Tensor.unsafe_reinitialize ();


-  (* Create a 9x9 input with 1 channel - sized for stride=2, kernel=3 without padding.
-     For no-padding conv: (input - kernel) must be divisible by stride.
-     (9 - 3) = 6, 6 % 2 = 0 ✓ *)
+  (* Create a 9x9 input with 1 channel - sized for stride=2, kernel=3 without padding. For
+     no-padding conv: (input - kernel) must be divisible by stride. (9 - 3) = 6, 6 % 2 = 0 ✓ *)
let input = TDSL.range_of_shape ~output_dims:[ 9; 9; 1 ] () in


(* Apply conv2d with kernel_size=3, stride=2, use_padding=false, out_channels=4 *)
@@ -164,16 +163,15 @@ let test_conv2d_stride_without_padding () =
This tests that shape inference works correctly during backpropagation for strided convolutions
without padding.


-    IMPORTANT: For no-padding convolutions, (input - kernel) must be divisible by stride,
-    otherwise shape inference will fail with "incompatible stride" error. *)
+    IMPORTANT: For no-padding convolutions, (input - kernel) must be divisible by stride, otherwise
+    shape inference will fail with "incompatible stride" error. *)
let test_conv2d_stride_without_padding_backprop () =
printf "\nTesting backprop for conv2d with stride=2 and use_padding=false...\n%!";
Tensor.unsafe_reinitialize ();


-  (* Create a 9x9 input with 1 channel - sized for stride=2, kernel=3 without padding.
-     For no-padding conv: (input - kernel) must be divisible by stride.
-     (9 - 3) = 6, 6 % 2 = 0 ✓
-     Output size: (9 - 3) / 2 + 1 = 4, so 4x4 output. *)
+  (* Create a 9x9 input with 1 channel - sized for stride=2, kernel=3 without padding. For
+     no-padding conv: (input - kernel) must be divisible by stride. (9 - 3) = 6, 6 % 2 = 0 ✓ Output
+     size: (9 - 3) / 2 + 1 = 4, so 4x4 output. *)
let input = TDSL.range_of_shape ~output_dims:[ 9; 9; 1 ] () in


(* Apply conv2d with kernel_size=3, stride=2, use_padding=false, out_channels=4 *)
File "test/training/circles_conv.ml", line 1, characters 0-0:
diff --git a/_build/default/test/training/circles_conv.ml b/_build/default/test/training/.formatted/circles_conv.ml
index fb025c0..357625f 100644
--- a/_build/default/test/training/circles_conv.ml
+++ b/_build/default/test/training/.formatted/circles_conv.ml
@@ -1,7 +1,7 @@
(** Circle counting training test using synthetic dataset.


-    This test trains a model to classify images by the number of circles they contain.
-    Uses cross-entropy loss for classification.
+    This test trains a model to classify images by the number of circles they contain. Uses
+    cross-entropy loss for classification.


{2 Known Issues with conv2d in Training}


@@ -17,8 +17,8 @@
hidden dimension(s)" errors during SGD update compilation, as the gradient tensors cannot
determine their shapes.


-    3. {b Workaround}: Use an MLP instead - OCANNL's matrix multiplication handles
-    multi-dimensional inputs automatically without explicit flattening.
+    3. {b Workaround}: Use an MLP instead - OCANNL's matrix multiplication handles multi-dimensional
+    inputs automatically without explicit flattening.


These issues suggest that [conv2d] may need:
- An explicit [out_channels] parameter to constrain output shape
@@ -44,7 +44,8 @@ let () =
(* Configuration for circle dataset *)
let image_size = 16 in
let max_circles = 3 in
-  let num_classes = max_circles in (* Classes: 1, 2, 3 circles -> indices 0, 1, 2 *)
+  let num_classes = max_circles in
+  (* Classes: 1, 2, 3 circles -> indices 0, 1, 2 *)
let config =
Datasets.Circles.Config.
{ image_size; max_radius = 4; min_radius = 2; max_circles; seed = Some seed }
@@ -83,9 +84,10 @@ let () =
let%op batch_images = images @| batch_n in
let%op batch_labels = labels_one_hot @| batch_n in


-  (* Try lenet - this will likely fail due to conv2d shape inference issues.
-     Fallback to MLP if needed. *)
-  let use_lenet = false in (* Set to true to test lenet - currently fails *)
+  (* Try lenet - this will likely fail due to conv2d shape inference issues. Fallback to MLP if
+     needed. *)
+  let use_lenet = false in
+  (* Set to true to test lenet - currently fails *)


let logits =
if use_lenet then (
@@ -111,7 +113,6 @@ let () =
let%op sample_loss = neg (log correct_prob) in
let%op batch_loss = (sample_loss ++ "...|... => 0") /. !..batch_size in


-
(* Training setup *)
let epochs = 10 in
let total_steps = epochs * n_batches in
@@ -133,7 +134,6 @@ let () =
printf "\nStarting training for %d epochs (%d steps)...\n%!" epochs total_steps;


let open Operation.At in
-
for epoch = 1 to epochs do
let epoch_loss = ref 0. in
Train.sequential_loop (Context.bindings sgd_routine) ~f:(fun () ->
File "arrayjit/lib/indexing.ml", line 1, characters 0-0:
diff --git a/_build/default/arrayjit/lib/indexing.ml b/_build/default/arrayjit/lib/.formatted/indexing.ml
index 018c9e9..0d03161 100644
--- a/_build/default/arrayjit/lib/indexing.ml
+++ b/_build/default/arrayjit/lib/.formatted/indexing.ml
@@ -139,8 +139,8 @@ type projections = {
*)
product_iterators : symbol array;
(** The product space iterators (concatentation of the relevant batch, output, input axes) for
-          iterating over the [product_space] axes, where same axes are at same array indices.
-          These may be shared; lowering creates fresh symbols for loop indices. *)
+          iterating over the [product_space] axes, where same axes are at same array indices. These
+          may be shared; lowering creates fresh symbols for loop indices. *)
project_lhs : axis_index array;
(** A projection that takes an [product_space]-bound index and produces an index into the
result of an operation. *)
File "test/einsum/test_max_pool2d.ml", line 1, characters 0-0:
diff --git a/_build/default/test/einsum/test_max_pool2d.ml b/_build/default/test/einsum/.formatted/test_max_pool2d.ml
index 8c0fd1f..99d9e1b 100644
--- a/_build/default/test/einsum/test_max_pool2d.ml
+++ b/_build/default/test/einsum/.formatted/test_max_pool2d.ml
@@ -7,8 +7,7 @@ let max_pool2d = Nn_blocks.max_pool2d


(** Test basic max_pool2d operation with default parameters.


-    Default: stride=2, window_size=2
-    For 4x4 input, output should be 2x2. *)
+    Default: stride=2, window_size=2 For 4x4 input, output should be 2x2. *)
let test_max_pool2d_basic () =
printf "Testing max_pool2d with default parameters (stride=2, window=2)...\n%!";
Tensor.unsafe_reinitialize ();
@@ -34,9 +33,8 @@ let test_max_pool2d_basic () =


(** Test max_pool2d with stride=2, window=3.


-    For 7x7 input with stride=2, window=3 (no padding):
-    Output size = (7 - 3) / 2 + 1 = 3.
-    Valid convolution requires: input = stride * (output - 1) + window = 2 * 2 + 3 = 7 *)
+    For 7x7 input with stride=2, window=3 (no padding): Output size = (7 - 3) / 2 + 1 = 3. Valid
+    convolution requires: input = stride * (output - 1) + window = 2 * 2 + 3 = 7 *)
let test_max_pool2d_window3 () =
printf "Testing max_pool2d with stride=2, window=3...\n%!";
Tensor.unsafe_reinitialize ();
@@ -61,9 +59,8 @@ let test_max_pool2d_window3 () =


(** Test max_pool2d with output dimension 1.


-    For 3x3 input with stride=2, window=3 (no padding):
-    Output size = (3 - 3) / 2 + 1 = 1.
-    This tests the edge case where the kernel exactly covers the input. *)
+    For 3x3 input with stride=2, window=3 (no padding): Output size = (3 - 3) / 2 + 1 = 1. This
+    tests the edge case where the kernel exactly covers the input. *)
let test_max_pool2d_output_dim_1 () =
printf "Testing max_pool2d with output dimension 1...\n%!";
Tensor.unsafe_reinitialize ();
@@ -118,8 +115,8 @@ let test_max_pool2d_channels () =
for backprop. The error shows dimension mismatch between input (4) and output (2) dimensions.
This is the same class of issue as strided conv2d without padding - the backpropagation shape
inference fails when output dimensions don't match input dimensions due to striding. This is
-    also the root cause of LeNet training failures. See circles_conv.ml and test_conv_padding.ml
-    for related discussion. *)
+    also the root cause of LeNet training failures. See circles_conv.ml and test_conv_padding.ml for
+    related discussion. *)
let test_max_pool2d_backprop () =
printf "\nTesting backprop for max_pool2d...\n%!";
Tensor.unsafe_reinitialize ();
File "arrayjit/lib/assignments.ml", line 1, characters 0-0:
diff --git a/_build/default/arrayjit/lib/assignments.ml b/_build/default/arrayjit/lib/.formatted/assignments.ml
index 069170f..a198727 100644
--- a/_build/default/arrayjit/lib/assignments.ml
+++ b/_build/default/arrayjit/lib/.formatted/assignments.ml
@@ -153,9 +153,9 @@ let sequence l =


let%track4_sexp to_low_level code =
let open Indexing in
-  (* Apply left padding offsets to convert from semantic to buffer indices.
-     Semantic indices can be negative (e.g., -1 for convolution padding), but buffer
-     indices must be non-negative. Adding left_padding converts semantic to buffer space. *)
+  (* Apply left padding offsets to convert from semantic to buffer indices. Semantic indices can be
+     negative (e.g., -1 for convolution padding), but buffer indices must be non-negative. Adding
+     left_padding converts semantic to buffer space. *)
let apply_padding_offset (tn : Tn.t) (idcs : Indexing.axis_index array) :
Indexing.axis_index array =
match Tn.get_padding tn with
@@ -214,10 +214,10 @@ let%track4_sexp to_low_level code =
=
let projections : Indexing.projections = Lazy.force projections in
let basecase rev_iters =
-      (* Create a substitution from product iterators to loop iterators.
-         Fresh loop symbols are needed because product_iterators may be shared across
-         different operations/tensors, but each lowered operation needs private loop symbols
-         to avoid conflicts in low_level.ml's symbol-to-tensor tracking. *)
+      (* Create a substitution from product iterators to loop iterators. Fresh loop symbols are
+         needed because product_iterators may be shared across different operations/tensors, but
+         each lowered operation needs private loop symbols to avoid conflicts in low_level.ml's
+         symbol-to-tensor tracking. *)
let subst_map =
let loop_iters = Array.of_list_rev rev_iters in
Array.mapi projections.product_iterators ~f:(fun i prod_iter ->
File "tensor/ppx_shared.ml", line 1, characters 0-0:
diff --git a/_build/default/tensor/ppx_shared.ml b/_build/default/tensor/.formatted/ppx_shared.ml
index bee7b1b..7225cf0 100644
--- a/_build/default/tensor/ppx_shared.ml
+++ b/_build/default/tensor/.formatted/ppx_shared.ml
@@ -115,12 +115,12 @@ let ndarray_constant expr =


(** Convert an einsum spec string to an OCaml expression that constructs the runtime string.


-    This function parses the einsum spec using the Einsum_parser, then reconstructs a runtime
-    string expression, handling:
-    - stride and dilation values: if they look like integer literals, emit them directly;
-      otherwise emit [Int.to_string identifier] to convert at runtime
-    - use_padding: if unspecified (legacy syntax), emit [if use_padding then "=" else "<"]
-      to read the value from [Row.use_padding] at runtime
+    This function parses the einsum spec using the Einsum_parser, then reconstructs a runtime string
+    expression, handling:
+    - stride and dilation values: if they look like integer literals, emit them directly; otherwise
+      emit [Int.to_string identifier] to convert at runtime
+    - use_padding: if unspecified (legacy syntax), emit [if use_padding then "=" else "<"] to read
+      the value from [Row.use_padding] at runtime


Example: ["stride*x=+k; y => z"] where [stride] is a variable, generates an expression that
evaluates to e.g. ["2*x=+k; y => z"] if [stride = 2]. *)
@@ -216,11 +216,12 @@ let substitute_identifiers_in_einsum_spec ~loc str_input =
let output_segments =
row_to_segments ~kind:"output" parsed.bcast_output parsed.given_beg_output parsed.given_output
in
-    let has_batch = not (List.is_empty batch_segments) || Option.is_some parsed.bcast_batch in
-    let has_input = not (List.is_empty input_segments) || Option.is_some parsed.bcast_input in
+    let has_batch = (not (List.is_empty batch_segments)) || Option.is_some parsed.bcast_batch in
+    let has_input = (not (List.is_empty input_segments)) || Option.is_some parsed.bcast_input in
let segments =
if has_batch then
-        batch_segments @ [ estring ~loc "|" ]
+        batch_segments
+        @ [ estring ~loc "|" ]
@ (if has_input then input_segments @ [ estring ~loc "->" ] else [])
@ output_segments
else if has_input then input_segments @ [ estring ~loc "->" ] @ output_segments
@@ -248,33 +249,34 @@ let substitute_identifiers_in_einsum_spec ~loc str_input =
let combined =
String.concat
(List.filter_map all_segments ~f:(fun e ->
-               match e.pexp_desc with Pexp_constant (Pconst_string (s, _, _)) -> Some s | _ -> None))
+               match e.pexp_desc with
+               | Pexp_constant (Pconst_string (s, _, _)) -> Some s
+               | _ -> None))
in
estring ~loc combined
else [%expr String.concat ~sep:"" [%e elist ~loc all_segments]]
-  with Parse_error _ ->
+  with Parse_error _ -> (
(* If parsing fails, try as axis_labels_spec *)
-    (try
-       let parsed = axis_labels_of_spec str_input in
-       let segments = parsed_to_segments parsed in
-       let all_literals =
-         List.for_all segments ~f:(fun e ->
-             match e.pexp_desc with Pexp_constant (Pconst_string _) -> true | _ -> false)
-       in
-       if all_literals then
-         let combined =
-           String.concat
-             (List.filter_map segments ~f:(fun e ->
-                  match e.pexp_desc with
-                  | Pexp_constant (Pconst_string (s, _, _)) -> Some s
-                  | _ -> None))
-         in
-         estring ~loc combined
-       else [%expr String.concat ~sep:"" [%e elist ~loc segments]]
-     with Parse_error msg ->
-       (* Fall back to returning the original string with an error note *)
-       pexp_extension ~loc
-       @@ Location.error_extensionf ~loc "Failed to parse einsum spec: %s" msg)
+    try
+      let parsed = axis_labels_of_spec str_input in
+      let segments = parsed_to_segments parsed in
+      let all_literals =
+        List.for_all segments ~f:(fun e ->
+            match e.pexp_desc with Pexp_constant (Pconst_string _) -> true | _ -> false)
+      in
+      if all_literals then
+        let combined =
+          String.concat
+            (List.filter_map segments ~f:(fun e ->
+                 match e.pexp_desc with
+                 | Pexp_constant (Pconst_string (s, _, _)) -> Some s
+                 | _ -> None))
+        in
+        estring ~loc combined
+      else [%expr String.concat ~sep:"" [%e elist ~loc segments]]
+    with Parse_error msg ->
+      (* Fall back to returning the original string with an error note *)
+      pexp_extension ~loc @@ Location.error_extensionf ~loc "Failed to parse einsum spec: %s" msg)


let string_expr ~loc s = Ast_helper.Exp.constant @@ Pconst_string (s, loc, None)


@@ -546,11 +548,7 @@ let let_opt ~loc vbs expr =
(* Check for duplicates and create nested let bindings preserving definition order *)
let seen = Hashtbl.create (module String) in
List.fold_right vbs ~init:expr ~f:(fun vb acc ->
-      let name =
-        match vb.pvb_pat.ppat_desc with
-        | Ppat_var { txt; _ } -> txt
-        | _ -> "_"
-      in
+      let name = match vb.pvb_pat.ppat_desc with Ppat_var { txt; _ } -> txt | _ -> "_" in
match Hashtbl.add seen ~key:name ~data:() with
| `Ok -> Ast_helper.Exp.let_ ~loc Nonrecursive [ vb ] acc
| `Duplicate ->
@@ -565,7 +563,6 @@ let let_opt ~loc vbs expr =
Ast_helper.Exp.let_ ~loc Nonrecursive [ { vb with pvb_expr = error_expr } ] acc)


let no_vbs = []
-
let reduce_vbss vbss = List.concat vbss


let expr_expander_with_punning translate ~loc ~path:_ payload =
File "tensor/tensor.ml", line 1, characters 0-0:
diff --git a/_build/default/tensor/tensor.ml b/_build/default/tensor/.formatted/tensor.ml
index 0376308..a9f685c 100644
--- a/_build/default/tensor/tensor.ml
+++ b/_build/default/tensor/.formatted/tensor.ml
@@ -299,7 +299,8 @@ let%track7_sexp op ~(label : string list) ?(ternary_op = Shape.Pointwise_tern)
let v =
match terminal_op with
| Some (Shape.Data (Asgns.Reshape data)) ->
-        Tn.create_with_reshape ~id ~label ~unpadded_dims ~padding ~from_padded:false ~base_ndarray:data ()
+        Tn.create_with_reshape ~id ~label ~unpadded_dims ~padding ~from_padded:false
+          ~base_ndarray:data ()
| Some (Shape.Data (Asgns.Keep_shape_no_padding data)) ->
Tn.create_from_padded ~id ~label ~ndarray:data ~padding:None ()
| Some (Shape.Data (Asgns.Padded { data; padding = padding_spec; padded_value })) ->
@@ -329,8 +330,8 @@ let%track7_sexp op ~(label : string list) ?(ternary_op = Shape.Pointwise_tern)
assert false
in
let local_shape_updates =
-    List.map
-      ~f:(fun logic -> Shape.{ shape; logic; id = get_update_id (); unsafe_projections = None })
+    List.map ~f:(fun logic ->
+        Shape.{ shape; logic; id = get_update_id (); unsafe_projections = None })
@@ shape_logics orig_ts
in
List.iter ~f:Shape.propagate_shapes local_shape_updates;
File "arrayjit/lib/tnode.ml", line 1, characters 0-0:
diff --git a/_build/default/arrayjit/lib/tnode.ml b/_build/default/arrayjit/lib/.formatted/tnode.ml
index 5ce96c8..4ffc148 100644
--- a/_build/default/arrayjit/lib/tnode.ml
+++ b/_build/default/arrayjit/lib/.formatted/tnode.ml
@@ -710,8 +710,8 @@ let create_with_reshape ~id ~label ~base_ndarray ~unpadded_dims ~padding ~from_p
in
Some (Nd.apply_with_prec { f = f_reshape_with_prec } base_ndarray)
| Some _, false ->
-           (* Create new bigarray with padding and copy source into non-padding parts.
-              semantic_dims are the data area dimensions (without padding). *)
+           (* Create new bigarray with padding and copy source into non-padding parts. semantic_dims
+              are the data area dimensions (without padding). *)
let target = Nd.create_array ~debug prec_val ~dims:padded_dims ~padding:target_padding in
let source_dims = Nd.dims base_ndarray in
(* Check total elements match, allowing shape differences *)
dune build @fmt failed
"/usr/bin/env" "bash" "-c" "opam exec -- dune build @fmt --ignore-promoted-rules || (echo "dune build @fmt failed"; exit 2)" failed with exit status 2
2025-12-11 12:39.03: Job failed: Failed: Build failed