Organisationsahrefsocannl75b5fb ()(lint-fmt)

(lint-fmt)

Link Copied
Code Copied

Logs

2025-07-14 15:57.13: New job: test ahrefs/ocannl https://github.com/ahrefs/ocannl.git#refs/heads/master (75b5fb751ff73d57d1f00fa488c940761f07b022) (linux-x86_64:(lint-fmt))
Base: ocaml/opam:debian-12-ocaml-4.08@sha256:cc4b148b1fa1916574df02fcec0956fedbec6798bedacd9bfd4417c1c098ce8e
ocamlformat version: version 0.27.0 (from opam)


To reproduce locally:


git clone --recursive "https://github.com/ahrefs/ocannl.git" -b "master" && cd "ocannl" && git reset --hard 75b5fb75
cat > Dockerfile <<'END-OF-DOCKERFILE'
FROM ocaml/opam:debian-12-ocaml-4.08@sha256:cc4b148b1fa1916574df02fcec0956fedbec6798bedacd9bfd4417c1c098ce8e
USER 1000:1000
RUN cd ~/opam-repository && (git cat-file -e 0eea63ad71af2b1116c556023bedc6bf083e6125 || git fetch origin master) && git reset -q --hard 0eea63ad71af2b1116c556023bedc6bf083e6125 && git log --no-decorate -n1 --oneline && opam update -u
RUN opam depext -i dune
WORKDIR /src
RUN opam depext -i ocamlformat=0.27.0
COPY --chown=1000:1000 . /src/
RUN opam exec -- dune build @fmt --ignore-promoted-rules || (echo "dune build @fmt failed"; exit 2)


END-OF-DOCKERFILE
docker build .
END-REPRO-BLOCK


2025-07-14 15:57.13: Using cache hint "ahrefs/ocannl-ocaml/opam:debian-12-ocaml-4.08@sha256:cc4b148b1fa1916574df02fcec0956fedbec6798bedacd9bfd4417c1c098ce8e-debian-12-4.08_opam-2.3-ocamlformat-0eea63ad71af2b1116c556023bedc6bf083e6125"
2025-07-14 15:57.13: Using OBuilder spec:
((from ocaml/opam:debian-12-ocaml-4.08@sha256:cc4b148b1fa1916574df02fcec0956fedbec6798bedacd9bfd4417c1c098ce8e)
(user (uid 1000) (gid 1000))
(run (cache (opam-archives (target /home/opam/.opam/download-cache)))
(network host)
(shell "cd ~/opam-repository && (git cat-file -e 0eea63ad71af2b1116c556023bedc6bf083e6125 || git fetch origin master) && git reset -q --hard 0eea63ad71af2b1116c556023bedc6bf083e6125 && git log --no-decorate -n1 --oneline && opam update -u"))
(run (cache (opam-archives (target /home/opam/.opam/download-cache)))
(network host)
(shell "opam depext -i dune"))
(workdir /src)
(run (cache (opam-archives (target /home/opam/.opam/download-cache)))
(network host)
(shell "opam depext -i ocamlformat=0.27.0"))
(copy (src .) (dst /src/))
(run (shell "opam exec -- dune build @fmt --ignore-promoted-rules || (echo \"dune build @fmt failed\"; exit 2)"))
)


2025-07-14 15:57.13: Waiting for resource in pool OCluster
2025-07-14 15:57.13: Waiting for worker…
2025-07-14 15:57.13: Got resource from pool OCluster
Building on toxis.caelum.ci.dev
HEAD is now at 6634fe50 Full support for padding in ndarray.ml, by Claude Sonnet
HEAD is now at 75b5fb75 Control listings in print_accessible_headers


(from ocaml/opam:debian-12-ocaml-4.08@sha256:cc4b148b1fa1916574df02fcec0956fedbec6798bedacd9bfd4417c1c098ce8e)
2025-07-14 15:57.14 ---> using "4ea5038d254cfd14663698deb665a2dc4ce1e1383d544c063adebb02ed15ce16" from cache


/: (user (uid 1000) (gid 1000))


/: (run (cache (opam-archives (target /home/opam/.opam/download-cache)))
(network host)
(shell "cd ~/opam-repository && (git cat-file -e 0eea63ad71af2b1116c556023bedc6bf083e6125 || git fetch origin master) && git reset -q --hard 0eea63ad71af2b1116c556023bedc6bf083e6125 && git log --no-decorate -n1 --oneline && opam update -u"))
0eea63ad71 Merge pull request #27946 from mtelvers/opam-publish-ocaml-version.4.0.1


<><> Updating package repositories ><><><><><><><><><><><><><><><><><><><><><><>
[default] Initialised
default (at git+file:///home/opam/opam-repository):
[INFO] opam 2.1 and 2.2 include many performance and security improvements over 2.0; please consider upgrading (https://opam.ocaml.org/doc/Install.html)


Everything as up-to-date as possible (run with --verbose to show unavailable upgrades).
However, you may "opam upgrade" these packages explicitly, which will ask permission to downgrade or uninstall the conflicting packages.
Nothing to do.
# Run eval $(opam env) to update the current shell environment
2025-07-14 15:57.14 ---> using "ed14c208b709d50e26e290f8f669e0b0b6a2456751ee5b87b29d71221f0a9fc1" from cache


/: (run (cache (opam-archives (target /home/opam/.opam/download-cache)))
(network host)
(shell "opam depext -i dune"))
# Detecting depexts using vars: arch=x86_64, os=linux, os-distribution=debian, os-family=debian
# No extra OS packages requirements found.
# All required OS packages found.
# Now letting opam install the packages
The following actions will be performed:
- install dune 3.19.1


<><> Gathering sources ><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
[dune.3.19.1] found in cache


<><> Processing actions <><><><><><><><><><><><><><><><><><><><><><><><><><><><>
-> installed dune.3.19.1
Done.
# Run eval $(opam env) to update the current shell environment
2025-07-14 15:57.14 ---> using "249f8284e2625ce869f4c794eb6ff1eab40d82e0af762bb312d97ad81bf8d4f1" from cache


/: (workdir /src)


/src: (run (cache (opam-archives (target /home/opam/.opam/download-cache)))
(network host)
(shell "opam depext -i ocamlformat=0.27.0"))
# Detecting depexts using vars: arch=x86_64, os=linux, os-distribution=debian, os-family=debian
# No extra OS packages requirements found.
# All required OS packages found.
# Now letting opam install the packages
The following actions will be performed:
- install sexplib0          v0.14.0  [required by base]
- install cmdliner          1.3.0    [required by ocamlformat]
- install menhirLib         20240715 [required by ocamlformat-lib]
- install menhirCST         20240715 [required by menhir]
- install ocamlbuild        0.16.1   [required by fpath, astring, uuseg]
- install dune-build-info   3.19.1   [required by ocamlformat-lib]
- install menhirSdk         20240715 [required by ocamlformat-lib]
- install either            1.0.0    [required by ocamlformat-lib]
- install ocaml-version     4.0.1    [required by ocamlformat-lib]
- install camlp-streams     5.0.1    [required by ocamlformat-lib]
- install csexp             1.5.2    [required by ocamlformat]
- install seq               base     [required by re]
- install fix               20250428 [required by ocamlformat-lib]
- install ocamlfind         1.9.8    [required by ocp-indent, astring, fpath, uuseg]
- install menhir            20240715 [required by ocamlformat-lib]
- install dune-configurator 3.19.1   [required by base]
- install re                1.11.0   [required by ocamlformat]
- install topkg             1.0.8    [required by fpath, astring, uuseg]
- install base-bytes        base     [required by ocp-indent]
- install base              v0.14.3  [required by ocamlformat-lib]
- install uutf              1.0.4    [required by ocamlformat-lib]
- install astring           0.8.5    [required by ocamlformat-lib]
- install ocp-indent        1.8.1    [required by ocamlformat-lib]
- install stdio             v0.14.0  [required by ocamlformat-lib]
- install uucp              15.0.0   [required by uuseg]
- install fpath             0.7.3    [required by ocamlformat-lib]
- install uuseg             15.0.0   [required by ocamlformat-lib]
- install ocamlformat-lib   0.27.0   [required by ocamlformat]
- install ocamlformat       0.27.0
===== 29 to install =====


<><> Gathering sources ><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
[astring.0.8.5] found in cache
[base.v0.14.3] found in cache
[camlp-streams.5.0.1] found in cache
[cmdliner.1.3.0] found in cache
[csexp.1.5.2] found in cache
[dune-build-info.3.19.1] found in cache
[dune-configurator.3.19.1] found in cache
[either.1.0.0] found in cache
[fix.20250428] found in cache
[fpath.0.7.3] found in cache
[menhir.20240715] found in cache
[menhirCST.20240715] found in cache
[menhirLib.20240715] found in cache
[menhirSdk.20240715] found in cache
[ocaml-version.4.0.1] found in cache
[ocamlbuild.0.16.1] found in cache
[ocamlfind.1.9.8] found in cache
[ocamlformat.0.27.0] found in cache
[ocamlformat-lib.0.27.0] found in cache
[ocp-indent.1.8.1] found in cache
[re.1.11.0] found in cache
[sexplib0.v0.14.0] found in cache
[stdio.v0.14.0] found in cache
[topkg.1.0.8] found in cache
[uucp.15.0.0] found in cache
[uuseg.15.0.0] found in cache
[uutf.1.0.4] found in cache


<><> Processing actions <><><><><><><><><><><><><><><><><><><><><><><><><><><><>
-> installed seq.base
-> installed camlp-streams.5.0.1
-> installed csexp.1.5.2
-> installed either.1.0.0
-> installed fix.20250428
-> installed cmdliner.1.3.0
-> installed menhirCST.20240715
-> installed menhirLib.20240715
-> installed menhirSdk.20240715
-> installed ocaml-version.4.0.1
-> installed re.1.11.0
-> installed sexplib0.v0.14.0
-> installed dune-build-info.3.19.1
-> installed dune-configurator.3.19.1
-> installed ocamlfind.1.9.8
-> installed base-bytes.base
-> installed ocp-indent.1.8.1
-> installed ocamlbuild.0.16.1
-> installed base.v0.14.3
-> installed stdio.v0.14.0
-> installed topkg.1.0.8
-> installed uutf.1.0.4
-> installed astring.0.8.5
-> installed fpath.0.7.3
-> installed menhir.20240715
-> installed uucp.15.0.0
-> installed uuseg.15.0.0
-> installed ocamlformat-lib.0.27.0
-> installed ocamlformat.0.27.0
Done.


<><> ocp-indent.1.8.1 installed successfully ><><><><><><><><><><><><><><><><><>
=> This package requires additional configuration for use in editors. Install package 'user-setup', or manually:


* for Emacs, add these lines to ~/.emacs:
(add-to-list 'load-path "/home/opam/.opam/4.08/share/emacs/site-lisp")
(require 'ocp-indent)


* for Vim, add this line to ~/.vimrc:
set rtp^="/home/opam/.opam/4.08/share/ocp-indent/vim"
# Run eval $(opam env) to update the current shell environment
2025-07-14 15:57.14 ---> using "7fd5d4ac1bb4f93065934b08bd6aad3989793fd62a884f257d9f7ac1e147ec1a" from cache


/src: (copy (src .) (dst /src/))
2025-07-14 15:57.14 ---> saved as "c8783cd319d8cf96d7c9fac685cc43de5c2074b01c422eeeb45b5eb1c5fb84d7"


/src: (run (shell "opam exec -- dune build @fmt --ignore-promoted-rules || (echo \"dune build @fmt failed\"; exit 2)"))
File "arrayjit/bin/dune", line 6, characters 30-43:
6 |   (pps ppx_here ppx_minidebug ppx_sexp_conv))
^^^^^^^^^^^^^
Error: Library "ppx_sexp_conv" not found.
-> required by _build/default/arrayjit/bin/read_config.exe
-> required by %{dep:../../../arrayjit/bin/read_config.exe} at
test/operations/dune:17
-> required by _build/default/test/operations/config/ocannl_backend.txt
-> required by %{read:config/ocannl_backend.txt} at test/operations/dune:33
-> required by Computing directory contents of _build/default/test/operations
File "arrayjit/bin/dune", line 6, characters 7-15:
6 |   (pps ppx_here ppx_minidebug ppx_sexp_conv))
^^^^^^^^
Error: Library "ppx_here" not found.
-> required by _build/default/arrayjit/bin/.merlin-conf/exe-read_config
-> required by _build/default/arrayjit/bin/read_config.exe
-> required by %{dep:../../../arrayjit/bin/read_config.exe} at
test/operations/dune:17
-> required by _build/default/test/operations/config/ocannl_backend.txt
-> required by %{read:config/ocannl_backend.txt} at test/operations/dune:33
-> required by Computing directory contents of _build/default/test/operations
File "lib/row.mli", line 1, characters 0-0:
diff --git a/_build/default/lib/row.mli b/_build/default/lib/.formatted/row.mli
index b57358a..7d504b5 100644
--- a/_build/default/lib/row.mli
+++ b/_build/default/lib/.formatted/row.mli
@@ -86,9 +86,9 @@ type total_elems =
type row_constraint =
| Unconstrained
| Total_elems of { numerator : total_elems; divided_by : dim_var list }
-      (** The rows, inclusive of the further row spec, have this many elements.
-          The total is numerator / (product of divided_by variables).
-          divided_by has multiset semantics - the same variable can appear multiple times. *)
+      (** The rows, inclusive of the further row spec, have this many elements. The total is
+          numerator / (product of divided_by variables). divided_by has multiset semantics - the
+          same variable can appear multiple times. *)
| Exact of dim list  (** The concatenated rows have these axes. *)
[@@deriving equal, hash, compare, sexp_of]


File "datasets/half_moons.ml", line 1, characters 0-0:
diff --git a/_build/default/datasets/half_moons.ml b/_build/default/datasets/.formatted/half_moons.ml
index cd6bcd3..b40461a 100644
--- a/_build/default/datasets/half_moons.ml
+++ b/_build/default/datasets/.formatted/half_moons.ml
@@ -6,38 +6,33 @@ open Bigarray
module Config = struct
type t = {
noise_range : float;  (** Range of noise to add to the coordinates *)
-    seed : int option;    (** Optional random seed for reproducibility *)
+    seed : int option;  (** Optional random seed for reproducibility *)
}


-  let default = {
-    noise_range = 0.1;
-    seed = None;
-  }
+  let default = { noise_range = 0.1; seed = None }
end


(** Internal helper function to generate half moons with specified precision.
-
+
@param kind The bigarray kind (float32 or float64)
@param config Configuration for noise and randomization
@param len Number of samples per moon (total samples = len * 2)
-    @return A tuple of (coordinates, labels) where:
-            - coordinates is a bigarray of shape [len*2; 2] (batch_axis, output_axis)
-            - labels is a bigarray of shape [len*2; 1] (batch_axis, output_axis)
-            - First moon has label 1.0, second moon has label -1.0
-*)
+    @return
+      A tuple of (coordinates, labels) where:
+      - coordinates is a bigarray of shape [len*2; 2] (batch_axis, output_axis)
+      - labels is a bigarray of shape [len*2; 1] (batch_axis, output_axis)
+      - First moon has label 1.0, second moon has label -1.0 *)
let generate_with_kind kind ?(config = Config.default) ~len () =
(* Initialize random seed if specified *)
-  (match config.seed with
-  | Some seed -> Random.init seed
-  | None -> ());
+  (match config.seed with Some seed -> Random.init seed | None -> ());


let noise () = Random.float (2.0 *. config.noise_range) -. config.noise_range in
let total_samples = len * 2 in
-
+
(* Create bigarrays with batch axis first, then output axis *)
let coordinates = Genarray.create kind c_layout [| total_samples; 2 |] in
let labels = Genarray.create kind c_layout [| total_samples; 1 |] in
-
+
(* Generate first moon (label = 1.0) *)
for i = 0 to len - 1 do
let v = Float.of_int i *. Float.pi /. Float.of_int len in
@@ -46,9 +41,9 @@ let generate_with_kind kind ?(config = Config.default) ~len () =
let y = s +. noise () in
Genarray.set coordinates [| i; 0 |] x;
Genarray.set coordinates [| i; 1 |] y;
-    Genarray.set labels [| i; 0 |] 1.0;
+    Genarray.set labels [| i; 0 |] 1.0
done;
-
+
(* Generate second moon (label = -1.0) *)
for i = 0 to len - 1 do
let v = Float.of_int i *. Float.pi /. Float.of_int len in
@@ -58,50 +53,51 @@ let generate_with_kind kind ?(config = Config.default) ~len () =
let idx = len + i in
Genarray.set coordinates [| idx; 0 |] x;
Genarray.set coordinates [| idx; 1 |] y;
-    Genarray.set labels [| idx; 0 |] (-1.0);
+    Genarray.set labels [| idx; 0 |] (-1.0)
done;
-
+
(coordinates, labels)


(** Generate the half moons dataset with the specified parameters.
-
+
@param config Configuration for noise and randomization
@param len Number of samples per moon (total samples = len * 2)
-    @return A tuple of (coordinates, labels) where:
-            - coordinates is a bigarray of shape [len*2; 2] (batch_axis, output_axis)
-            - labels is a bigarray of shape [len*2; 1] (batch_axis, output_axis)
-            - First moon has label 1.0, second moon has label -1.0
-*)
-let generate ?(config = Config.default) ~len () =
-  generate_with_kind float64 ~config ~len ()
+    @return
+      A tuple of (coordinates, labels) where:
+      - coordinates is a bigarray of shape [len*2; 2] (batch_axis, output_axis)
+      - labels is a bigarray of shape [len*2; 1] (batch_axis, output_axis)
+      - First moon has label 1.0, second moon has label -1.0 *)
+let generate ?(config = Config.default) ~len () = generate_with_kind float64 ~config ~len ()


(** Generate the half moons dataset with single precision floats.
-
+
@param config Configuration for noise and randomization
@param len Number of samples per moon (total samples = len * 2)
-    @return A tuple of (coordinates, labels) where:
-            - coordinates is a bigarray of shape [len*2; 2] (batch_axis, output_axis) with float32 elements
-            - labels is a bigarray of shape [len*2; 1] (batch_axis, output_axis) with float32 elements
-            - First moon has label 1.0, second moon has label -1.0
-*)
+    @return
+      A tuple of (coordinates, labels) where:
+      - coordinates is a bigarray of shape [len*2; 2] (batch_axis, output_axis) with float32
+        elements
+      - labels is a bigarray of shape [len*2; 1] (batch_axis, output_axis) with float32 elements
+      - First moon has label 1.0, second moon has label -1.0 *)
let generate_single_prec ?(config = Config.default) ~len () =
generate_with_kind float32 ~config ~len ()


-(** Generate half moons dataset using the old array-based approach for compatibility.
-    This function is deprecated and provided for backwards compatibility.
-
+(** Generate half moons dataset using the old array-based approach for compatibility. This function
+    is deprecated and provided for backwards compatibility.
+
@param len Number of samples per moon
-    @param noise_range Range of noise to add
-    @return A tuple of (coordinates_array, labels_array) as flat arrays
-*)
+    @param noise_range Range of noise to add
+    @return A tuple of (coordinates_array, labels_array) as flat arrays *)
let generate_arrays ?(noise_range = 0.1) ~len () =
let noise () = Random.float (2.0 *. noise_range) -. noise_range in
-  let coordinates =
-    Array.concat (Array.to_list (Array.init len (fun _ ->
-        let i = Random.int len in
-        let v = Float.of_int i *. Float.pi /. Float.of_int len in
-        let c = Float.cos v and s = Float.sin v in
-        [| c +. noise (); s +. noise (); 1.0 -. c +. noise (); 0.5 -. s +. noise () |])))
+  let coordinates =
+    Array.concat
+      (Array.to_list
+         (Array.init len (fun _ ->
+              let i = Random.int len in
+              let v = Float.of_int i *. Float.pi /. Float.of_int len in
+              let c = Float.cos v and s = Float.sin v in
+              [| c +. noise (); s +. noise (); 1.0 -. c +. noise (); 0.5 -. s +. noise () |])))
in
let labels = Array.init (len * 2) (fun i -> if i mod 2 = 0 then 1. else -1.) in
-  (coordinates, labels)
\ No newline at end of file
+  (coordinates, labels)
File "bin/primitive_ops.ml", line 1, characters 0-0:
diff --git a/_build/default/bin/primitive_ops.ml b/_build/default/bin/.formatted/primitive_ops.ml
index 52f4c78..d04550f 100644
--- a/_build/default/bin/primitive_ops.ml
+++ b/_build/default/bin/.formatted/primitive_ops.ml
@@ -26,9 +26,7 @@ let%debug_sexp graph_t () : unit =
let size = 50 in
let xs = Array.init size ~f:Float.(fun i -> (of_int i / 10.) + 0.1) in
let x_flat =
-    Tensor.term ~grad_spec:Require_grad ~label:[ "x_flat" ]
-      ~fetch_op:(Constant_fill xs)
-      ()
+    Tensor.term ~grad_spec:Require_grad ~label:[ "x_flat" ] ~fetch_op:(Constant_fill xs) ()
in
let step_sym, bindings = IDX.get_static_symbol ~static_range:size IDX.empty in
let%op xkcd = x_flat @| step_sym in
File "bin/moons_benchmark.ml", line 1, characters 0-0:
diff --git a/_build/default/bin/moons_benchmark.ml b/_build/default/bin/.formatted/moons_benchmark.ml
index 31d245e..f0fb1af 100644
--- a/_build/default/bin/moons_benchmark.ml
+++ b/_build/default/bin/.formatted/moons_benchmark.ml
@@ -57,7 +57,9 @@ let classify_moons ~seed ~on_device ~inlining_cutoff ~num_streams ~batch_size ~b
(* let init_lr = 0.1 in *)
let init_lr = 0.01 in
let moons_config = Datasets.Half_moons.Config.{ noise_range = 0.1; seed = Some seed } in
-  let moons_coordinates, moons_labels = Datasets.Half_moons.generate ~config:moons_config ~len:flat_len () in
+  let moons_coordinates, moons_labels =
+    Datasets.Half_moons.generate ~config:moons_config ~len:flat_len ()
+  in
let moons_flat_ndarray = Ir.Ndarray.as_array Ir.Ops.Double moons_coordinates in
let moons_classes_ndarray = Ir.Ndarray.as_array Ir.Ops.Double moons_labels in
let moons_flat ~b:_ = TDSL.rebatch ~l:"moons_flat" moons_flat_ndarray in
@@ -83,14 +85,14 @@ let classify_moons ~seed ~on_device ~inlining_cutoff ~num_streams ~batch_size ~b
@@ Backend.get_global_debug_info ();
let per_batch_callback ~at_batch ~at_step ~learning_rate ~batch_loss ~epoch_loss =
Stdio.printf "Batch=%d, step=%d, lr=%f, batch loss=%f, epoch loss=%f\n%!" at_batch at_step
-       learning_rate batch_loss epoch_loss;
+      learning_rate batch_loss epoch_loss;
if Option.is_none !start_time then start_time := Some (Time_now.nanoseconds_since_unix_epoch ())
in
(* Tn.print_accessible_headers (); *)
let per_epoch_callback ~at_step ~at_epoch ~learning_rate ~epoch_loss =
(* if at_epoch % 10 = 9 then *)
-      Stdio.printf "Epoch=%d, step=%d, lr=%f, epoch loss=%f\n%!" at_epoch at_step learning_rate
-        epoch_loss
+    Stdio.printf "Epoch=%d, step=%d, lr=%f, epoch loss=%f\n%!" at_epoch at_step learning_rate
+      epoch_loss
in


let {
File "bin/zero2hero_1of7.ml", line 1, characters 0-0:
diff --git a/_build/default/bin/zero2hero_1of7.ml b/_build/default/bin/.formatted/zero2hero_1of7.ml
index 4212f12..babcfc2 100644
--- a/_build/default/bin/zero2hero_1of7.ml
+++ b/_build/default/bin/.formatted/zero2hero_1of7.ml
@@ -57,8 +57,7 @@ let _suspended () =
let x_flat =
Tensor.term ~grad_spec:Tensor.Require_grad
~label:[ "x_flat" ] (* ~input_dims:[] ~output_dims:[ 1 ] *)
-      ~fetch_op:(Constant_fill values)
-      ()
+      ~fetch_op:(Constant_fill values) ()
in
let step_sym, bindings = IDX.get_static_symbol ~static_range:size IDX.empty in
(* The [let x =] line is the same as this except [let%op x =] uses [~grad_spec:If_needed]. *)
@@ -110,9 +109,7 @@ let _suspended () =
let xs = Array.init size ~f:Float.(fun i -> (of_int i / 10.) - 5.) in
(* Yay, the whole shape gets inferred! *)
let x_flat =
-    Tensor.term ~grad_spec:Require_grad ~label:[ "x_flat" ]
-      ~fetch_op:(Constant_fill xs)
-      ()
+    Tensor.term ~grad_spec:Require_grad ~label:[ "x_flat" ] ~fetch_op:(Constant_fill xs) ()
in
let step_sym, bindings = IDX.get_static_symbol ~static_range:size IDX.empty in
let%op x = x_flat @| step_sym in
File "lib/ppx_op.ml", line 1, characters 0-0:
diff --git a/_build/default/lib/ppx_op.ml b/_build/default/lib/.formatted/ppx_op.ml
index fe5d727..24aa423 100644
--- a/_build/default/lib/ppx_op.ml
+++ b/_build/default/lib/.formatted/ppx_op.ml
@@ -21,7 +21,7 @@ let make_p ~has_config ~loc =


let make_vb ?value ~has_config ~loc ~str_loc ~ident string =
let pat = Ast_helper.Pat.var ~loc { loc = str_loc; txt = ident } in
-  let value = match value with Some c -> [%expr Some [%e c] ] | None -> [%expr None] in
+  let value = match value with Some c -> [%expr Some [%e c]] | None -> [%expr None] in
let v = [%expr [%e make_p ~has_config ~loc] ?value:[%e value] [%e string]] in
let vb = Ast_helper.Vb.mk ~loc pat v in
(pat, vb)
File "arrayjit/lib/assignments.ml", line 1, characters 0-0:
diff --git a/_build/default/arrayjit/lib/assignments.ml b/_build/default/arrayjit/lib/.formatted/assignments.ml
index b7df201..a1de0e3 100644
--- a/_build/default/arrayjit/lib/assignments.ml
+++ b/_build/default/arrayjit/lib/.formatted/assignments.ml
@@ -171,7 +171,7 @@ let%diagn2_sexp to_low_level code =
assert (Array.length idcs = Array.length (Lazy.force tn.Tn.dims));
match buffer with
| Node tn -> Low_level.Get (tn, idcs)
-    | Merge_buffer tn ->
+    | Merge_buffer tn ->
(* FIXME: NOT IMPLEMENTED YET - need to handle merge buffer access differently now *)
Low_level.Get (tn, idcs)
in
@@ -267,7 +267,6 @@ let%diagn2_sexp to_low_level code =
| Fetch { array; fetch_op = Embed_symbol s; dims } ->
Low_level.loop_over_dims (Lazy.force dims) ~body:(fun idcs ->
set array idcs @@ Embed_index (Iterator s.static_symbol))
-
| Fetch { array; fetch_op = Range_over_offsets; dims = (lazy dims) } ->
Low_level.loop_over_dims dims ~body:(fun idcs ->
let offset = Indexing.reflect_projection ~dims ~projection:idcs in
File "lib/operation.ml", line 1, characters 0-0:
diff --git a/_build/default/lib/operation.ml b/_build/default/lib/.formatted/operation.ml
index bb5851b..eb36e4c 100644
--- a/_build/default/lib/operation.ml
+++ b/_build/default/lib/.formatted/operation.ml
@@ -447,8 +447,8 @@ end
omitted. Note: the data should have no padding and if padding is inferred, the data will be
copied; otherwise, the resulting tensor value shares host memory with the ndarray. *)
let reshape ~l ?b ?(i = []) ?o ndarray =
-  Tensor.term ~label:[ l ] ?batch_dims:b ~input_dims:i ?output_dims:o ~init_data:(Asgns.Reshape ndarray)
-    ()
+  Tensor.term ~label:[ l ] ?batch_dims:b ~input_dims:i ?output_dims:o
+    ~init_data:(Asgns.Reshape ndarray) ()


(** The dimensions are taken from the provided ndarray, but the split into axis kinds still needs to
be inferred (or provided). Assumes no padding. See also: {!reshape} and {!TDSL.wrap_param}. *)
File "arrayjit/lib/tnode.ml", line 1, characters 0-0:
diff --git a/_build/default/arrayjit/lib/tnode.ml b/_build/default/arrayjit/lib/.formatted/tnode.ml
index 0a44d3a..2d628d3 100644
--- a/_build/default/arrayjit/lib/tnode.ml
+++ b/_build/default/arrayjit/lib/.formatted/tnode.ml
@@ -458,7 +458,7 @@ let has a = match a.array with (lazy (Some _)) -> true | _ -> false


let dims_to_string ?(with_axis_numbers = false) arr =
let dims_s =
-    if Lazy.is_val arr.dims then
+    if Lazy.is_val arr.dims then
let padding = Option.map ~f:fst (Lazy.force arr.padding) in
Nd.int_dims_to_string ~with_axis_numbers ?padding @@ Lazy.force arr.dims
else "<not-in-yet>"
@@ -732,7 +732,8 @@ let points_1d ?from_axis ~xdim tn =
let points_2d ?from_axis ~xdim ~ydim tn =
do_read tn;
let padding = Option.map ~f:fst (Lazy.force tn.padding) in
-  Option.value_map ~default:[||] ~f:(fun arr -> Nd.retrieve_2d_points ?from_axis ?padding ~xdim ~ydim arr)
+  Option.value_map ~default:[||] ~f:(fun arr ->
+      Nd.retrieve_2d_points ?from_axis ?padding ~xdim ~ydim arr)
@@ Lazy.force tn.array


let set_value tn =
File "arrayjit/lib/metal_backend.ml", line 1, characters 0-0:
diff --git a/_build/default/arrayjit/lib/metal_backend.ml b/_build/default/arrayjit/lib/.formatted/metal_backend.ml
index 5bad5b7..5eb7e2f 100644
--- a/_build/default/arrayjit/lib/metal_backend.ml
+++ b/_build/default/arrayjit/lib/.formatted/metal_backend.ml
@@ -526,8 +526,8 @@ end) : Ir.Backend_impl.Lowered_backend = struct
^^ string ("0.0" ^ s)))
| ToPowOf, _ -> func "pow"
| Threefry4x32, _ ->
-        (* FIXME: NOT IMPLEMENTED YET *)
-         func "threefry4x32" (* Metal implementation of Threefry4x32 *)
+          (* FIXME: NOT IMPLEMENTED YET *)
+          func "threefry4x32" (* Metal implementation of Threefry4x32 *)
| Arg1, _ | Arg2, _ -> invalid_arg "Metal C_syntax_config: Arg1/Arg2 not operators"


let unop_syntax prec op =
@@ -559,7 +559,11 @@ end) : Ir.Backend_impl.Lowered_backend = struct
| Not, _ -> fun v -> string "!" ^^ v
| Uint4x32_to_prec_uniform target_prec, _ ->
(* FIXME: NOT IMPLEMENTED YET - placeholder for Uint4x32_to_prec_uniform conversion *)
-          fun _v -> string ("/* FIXME: uint4x32_to_" ^ Ops.prec_string target_prec ^ "_uniform */ (0.0" ^ metal_prec_suffix_float target_prec ^ ")")
+          fun _v ->
+            string
+              ("/* FIXME: uint4x32_to_" ^ Ops.prec_string target_prec ^ "_uniform */ (0.0"
+              ^ metal_prec_suffix_float target_prec
+              ^ ")")
(* Logical not *)


let convert_precision ~from ~to_ =
File "arrayjit/lib/ndarray.ml", line 1, characters 0-0:
diff --git a/_build/default/arrayjit/lib/ndarray.ml b/_build/default/arrayjit/lib/.formatted/ndarray.ml
index 0b5136d..62d960e 100644
--- a/_build/default/arrayjit/lib/ndarray.ml
+++ b/_build/default/arrayjit/lib/.formatted/ndarray.ml
@@ -204,17 +204,15 @@ let get_voidptr_not_managed nd : unit Ctypes.ptr =
let adjust_idx_for_padding ?padding idx =
match padding with
| None -> idx
-  | Some padding_arr ->
-      Array.mapi idx ~f:(fun i dim_idx ->
-        if i < Array.length padding_arr then
-          dim_idx + padding_arr.(i).left
-        else dim_idx)
+  | Some padding_arr ->
+      Array.mapi idx ~f:(fun i dim_idx ->
+          if i < Array.length padding_arr then dim_idx + padding_arr.(i).left else dim_idx)


(** Helper function to compute end index for iteration, respecting padding margins *)
let compute_end_idx ?padding dims axis =
match padding with
| None -> dims.(axis) - 1
-  | Some padding_arr when axis < Array.length padding_arr ->
+  | Some padding_arr when axis < Array.length padding_arr ->
dims.(axis) - padding_arr.(axis).left - padding_arr.(axis).right - 1
| Some _ -> dims.(axis) - 1


@@ -247,12 +245,11 @@ let fold_bigarray ?padding arr ~init ~f =
let dims = A.dims arr in
let accu = ref init in
let rec cloop idx col =
-    if col = Array.length idx then
+    if col = Array.length idx then
let adjusted_idx = adjust_idx_for_padding ?padding idx in
accu := f !accu idx @@ A.get arr adjusted_idx
else
-      let end_idx = compute_end_idx ?padding dims col
-      in
+      let end_idx = compute_end_idx ?padding dims col in
for j = 0 to end_idx do
idx.(col) <- j;
cloop idx (Int.succ col)
@@ -265,15 +262,22 @@ let fold_bigarray ?padding arr ~init ~f =
let fold_as_float ?padding ~init ~f arr =
match arr with
| Byte_nd arr ->
-      fold_bigarray ?padding ~init ~f:(fun accu idx c -> f accu idx @@ Float.of_int @@ Char.to_int c) arr
-  | Uint16_nd arr -> fold_bigarray ?padding ~init ~f:(fun accu idx v -> f accu idx @@ Float.of_int v) arr
-  | Int32_nd arr -> fold_bigarray ?padding ~init ~f:(fun accu idx v -> f accu idx @@ Int32.to_float v) arr
-  | Uint4x32_nd arr -> fold_bigarray ?padding ~init ~f:(fun accu idx c -> f accu idx c.Stdlib.Complex.re) arr
+      fold_bigarray ?padding ~init
+        ~f:(fun accu idx c -> f accu idx @@ Float.of_int @@ Char.to_int c)
+        arr
+  | Uint16_nd arr ->
+      fold_bigarray ?padding ~init ~f:(fun accu idx v -> f accu idx @@ Float.of_int v) arr
+  | Int32_nd arr ->
+      fold_bigarray ?padding ~init ~f:(fun accu idx v -> f accu idx @@ Int32.to_float v) arr
+  | Uint4x32_nd arr ->
+      fold_bigarray ?padding ~init ~f:(fun accu idx c -> f accu idx c.Stdlib.Complex.re) arr
| Half_nd arr -> fold_bigarray ?padding ~init ~f arr
| Bfloat16_nd arr ->
fold_bigarray ?padding ~init ~f:(fun accu idx v -> f accu idx @@ bfloat16_to_float v) arr
| Fp8_nd arr ->
-      fold_bigarray ?padding ~init ~f:(fun accu idx c -> f accu idx @@ fp8_to_float @@ Char.to_int c) arr
+      fold_bigarray ?padding ~init
+        ~f:(fun accu idx c -> f accu idx @@ fp8_to_float @@ Char.to_int c)
+        arr
| Single_nd arr -> fold_bigarray ?padding ~init ~f arr
| Double_nd arr -> fold_bigarray ?padding ~init ~f arr


@@ -317,8 +321,7 @@ let retrieve_2d_points ?from_axis ?padding ~xdim ~ydim arr =
result := (x, y) :: !result
else if axis = from_axis then iter (axis + 1)
else
-        let end_idx = compute_end_idx ?padding dims axis
-        in
+        let end_idx = compute_end_idx ?padding dims axis in
for p = 0 to end_idx do
idx.(axis) <- p;
iter (axis + 1)
@@ -344,8 +347,7 @@ let retrieve_1d_points ?from_axis ?padding ~xdim arr =
result := x :: !result
else if axis = from_axis then iter (axis + 1)
else
-       let end_idx = compute_end_idx ?padding dims axis
-        in
+        let end_idx = compute_end_idx ?padding dims axis in
for p = 0 to end_idx do
idx.(axis) <- p;
iter (axis + 1)
@@ -366,8 +368,7 @@ let retrieve_flat_values ?padding arr =
let x = get_as_float ?padding arr idx in
result := x :: !result
else
-        let end_idx = compute_end_idx ?padding dims axis
-        in
+        let end_idx = compute_end_idx ?padding dims axis in
for p = 0 to end_idx do
idx.(axis) <- p;
iter (axis + 1)
@@ -386,11 +387,9 @@ let set_flat_values ?padding arr values =
if axis = n_axes then (
if !values_idx < Array.length values then (
set_from_float ?padding arr idx values.(!values_idx);
-          Int.incr values_idx
-        ))
+          Int.incr values_idx))
else
-        let end_idx = compute_end_idx ?padding dims axis
-        in
+        let end_idx = compute_end_idx ?padding dims axis in
for p = 0 to end_idx do
idx.(axis) <- p;
iter (axis + 1)
@@ -466,18 +465,18 @@ let int_dims_to_string ?(with_axis_numbers = false) ?padding dims =
String.concat_array ~sep:" x "
@@ Array.mapi dims ~f:(fun d s -> Int.to_string d ^ ":" ^ Int.to_string s)
else
-    let dim_strings = Array.mapi dims ~f:(fun i dim ->
-      match padding with
-      | None -> Int.to_string dim
-      | Some padding_arr when i < Array.length padding_arr ->
-          let unpadded_dim = dim - padding_arr.(i).left - padding_arr.(i).right in
-          let total_padding = padding_arr.(i).left + padding_arr.(i).right in
-          if total_padding > 0 then
-            Int.to_string unpadded_dim ^ "+" ^ Int.to_string total_padding
-          else
-            Int.to_string dim
-      | Some _ -> Int.to_string dim
-    ) in
+    let dim_strings =
+      Array.mapi dims ~f:(fun i dim ->
+          match padding with
+          | None -> Int.to_string dim
+          | Some padding_arr when i < Array.length padding_arr ->
+              let unpadded_dim = dim - padding_arr.(i).left - padding_arr.(i).right in
+              let total_padding = padding_arr.(i).left + padding_arr.(i).right in
+              if total_padding > 0 then
+                Int.to_string unpadded_dim ^ "+" ^ Int.to_string total_padding
+              else Int.to_string dim
+          | Some _ -> Int.to_string dim)
+    in
String.concat_array ~sep:"x" dim_strings


(** Logs information about the array on the default ppx_minidebug runtime, if
File "arrayjit/lib/c_syntax.ml", line 1, characters 0-0:
diff --git a/_build/default/arrayjit/lib/c_syntax.ml b/_build/default/arrayjit/lib/.formatted/c_syntax.ml
index 776da2e..722f427 100644
--- a/_build/default/arrayjit/lib/c_syntax.ml
+++ b/_build/default/arrayjit/lib/.formatted/c_syntax.ml
@@ -646,9 +646,9 @@ module C_syntax (B : C_syntax_config) = struct
(defs, expr)
| Unop (Ops.Uint4x32_to_prec_uniform target_prec, v) ->
let defs, expr_v = pp_float prec v in
-        let expr =
-          string ("uint4x32_to_" ^ Ops.prec_string target_prec ^ "_uniform(") ^^
-          expr_v ^^ string ")"
+        let expr =
+          string ("uint4x32_to_" ^ Ops.prec_string target_prec ^ "_uniform(")
+          ^^ expr_v ^^ string ")"
in
(defs, expr)
| Unop (op, v) ->
@@ -720,9 +720,9 @@ module C_syntax (B : C_syntax_config) = struct
(B.binop_syntax prec op v1_doc v2_doc, idcs1 @ idcs2)
| Unop (Ops.Uint4x32_to_prec_uniform target_prec, v) ->
let v_doc, idcs = debug_float prec v in
-        let expr_doc =
-          string ("uint4x32_to_" ^ Ops.prec_string target_prec ^ "_uniform(") ^^
-          v_doc ^^ string "){=" ^^ string B.float_log_style ^^ string "}"
+        let expr_doc =
+          string ("uint4x32_to_" ^ Ops.prec_string target_prec ^ "_uniform(")
+          ^^ v_doc ^^ string "){=" ^^ string B.float_log_style ^^ string "}"
in
(expr_doc, idcs)
| Unop (op, v) ->
File "arrayjit/lib/low_level.ml", line 1, characters 0-0:
diff --git a/_build/default/arrayjit/lib/low_level.ml b/_build/default/arrayjit/lib/.formatted/low_level.ml
index e36eeb6..18d0be0 100644
--- a/_build/default/arrayjit/lib/low_level.ml
+++ b/_build/default/arrayjit/lib/.formatted/low_level.ml
@@ -158,7 +158,6 @@ let is_constexpr_comp traced_store llv =
| Get_local { tn; _ } | Local_scope { id = { tn; _ }; _ } ->
let traced = get_node traced_store tn in
traced.is_scalar_constexpr
-
| Get (tn, _) ->
let traced = get_node traced_store tn in
traced.is_scalar_constexpr
dune build @fmt failed
"/usr/bin/env" "bash" "-c" "opam exec -- dune build @fmt --ignore-promoted-rules || (echo "dune build @fmt failed"; exit 2)" failed with exit status 2
2025-07-14 15:57.16: Job failed: Failed: Build failed