From 873f04c827561360aab6797fc2dcd14d183a00b8 Mon Sep 17 00:00:00 2001
From: Lionel GUEZ <guez@lmd.ens.fr>
Date: Tue, 24 Sep 2024 19:03:37 +0200
Subject: [PATCH] Process only one orientation per run

---
 Trajectories/Analysis/long_trajectories.py | 178 ++++++++++-----------
 Trajectories/Tests/tests.json              |   3 +-
 2 files changed, 89 insertions(+), 92 deletions(-)

diff --git a/Trajectories/Analysis/long_trajectories.py b/Trajectories/Analysis/long_trajectories.py
index 899ac88c..ce69572b 100755
--- a/Trajectories/Analysis/long_trajectories.py
+++ b/Trajectories/Analysis/long_trajectories.py
@@ -20,94 +20,90 @@ SHPC = util_eddies.SHPC_class(
 )
 dir_part = {"Anticyclones": "Graph_anti", "Cyclones": "Graph_cyclo"}
 min_duration = 16 * 7
-
-for orientation in ["Anticyclones", "Cyclones"]:
-    print("Orientation:", orientation)
-
-    with open(
-        "/data/guez/Oceanic_eddies/Eurec4A/"
-        f"{dir_part[orientation]}/expanded_traj.json"
-    ) as f_obj:
-        expanded_traj = json.load(f_obj)
-
-    traj_list = expanded_traj["traj"].values()
-    duration_array = plot_traj.get_duration(
-        traj_list, expanded_traj["e_overestim"]
-    )
-
-    # Create a mask for instantaneous eddies which are in trajectories
-    # longer than min_duration. With this mask, we can read through
-    # the shapefiles sequentially. This is faster than processing
-    # occurrences in the order of trajectories and jumping arouund the
-    # shapefiles to fetch corresponding information. The mask,
-    # `long_traj`, is a list of numpy arrays. Each element of the list
-    # corresponds to a slice of the SHPC. `long_traj` takes a lot of
-    # main memory, though: about 30 MiB.
-
-    t0 = time.perf_counter()
-    long_traj = []
-
-    # Create the arrays in `long_traj` with the right size and
-    # initialize them to False. False means the instantaneous eddy is
-    # not in a long trajectory.
-
-    for i_slice in range(SHPC.n_slices):
-        n_shapes = SHPC.get_n_shapes(i_slice, orientation)
-        x = np.full(n_shapes, False)
-        long_traj.append(x)
-
-    # Define `long_traj`. Note that we do not read the shapefiles
-    # here, so this should be fast.
-    for traj, duration in zip(expanded_traj["traj"].values(), duration_array):
-        if duration >= min_duration:
-            for n in traj:
-                i_slice, ishape = SHPC.comp_ishape_n(
-                    n, expanded_traj["e_overestim"], orientation
-                )
-                long_traj[i_slice][ishape] = True
-
-    t1 = time.perf_counter()
-    print(f"Time to define long_traj: {t1 - t0:.0f} s")
-    t0 = t1
-    my_data = []
-
-    # Now read the extremum shapefiles sequentially:
-    for i_slice in range(SHPC.n_slices):
-        reader = SHPC.get_reader(i_slice, orientation, "extremum")
-
-        for rec in reader.iterRecords(fields=["speed"]):
-            if long_traj[i_slice][rec.oid]:
-                my_data.append(rec["speed"])
-
-    my_data = np.array(my_data, np.float32)
-    fname = f"speed_{orientation.lower()}"
-    np.save(fname, my_data)
-    print("Created", fname)
-    t1 = time.perf_counter()
-    print(f"Time to create {fname}: {t1 - t0:.0f} s")
-    t0 = t1
-    my_data = []
-
-    # Read other shapefiles, max_speed_contour and outermost_contour,
-    # sequentially:
-    for i_slice in range(SHPC.n_slices):
-        reader_max = SHPC.get_reader(i_slice, orientation, "max_speed_contour")
-        reader_out = SHPC.get_reader(i_slice, orientation, "outermost_contour")
-
-        for rec_max, rec_out in zip(
-            reader_max.iterRecords(fields=["r_eq_area"]),
-            reader_out.iterRecords(fields=["r_eq_area"]),
-        ):
-            if long_traj[i_slice][rec_max.oid]:
-                if rec_max.r_eq_area == -100:
-                    my_data.append(rec_out.r_eq_area)
-                else:
-                    my_data.append(rec_max.r_eq_area)
-
-    my_data = np.array(my_data, np.float32)
-    fname = f"radius_{orientation.lower()}"
-    np.save(fname, my_data)
-    print("Created", fname)
-    t1 = time.perf_counter()
-    print(f"Time to create {fname}: {t1 - t0:.0f} s")
-    t0 = t1
+orientation = input("Orientation?")
+
+with open(
+    "/data/guez/Oceanic_eddies/Eurec4A/"
+    f"{dir_part[orientation]}/expanded_traj.json"
+) as f_obj:
+    expanded_traj = json.load(f_obj)
+
+traj_list = expanded_traj["traj"].values()
+duration_array = plot_traj.get_duration(traj_list, expanded_traj["e_overestim"])
+
+# Create a mask for instantaneous eddies which are in trajectories
+# longer than min_duration. With this mask, we can read through
+# the shapefiles sequentially. This is faster than processing
+# occurrences in the order of trajectories and jumping arouund the
+# shapefiles to fetch corresponding information. The mask,
+# `long_traj`, is a list of numpy arrays. Each element of the list
+# corresponds to a slice of the SHPC. `long_traj` takes a lot of
+# main memory, though: about 30 MiB.
+
+t0 = time.perf_counter()
+long_traj = []
+
+# Create the arrays in `long_traj` with the right size and
+# initialize them to False. False means the instantaneous eddy is
+# not in a long trajectory.
+
+for i_slice in range(SHPC.n_slices):
+    n_shapes = SHPC.get_n_shapes(i_slice, orientation)
+    x = np.full(n_shapes, False)
+    long_traj.append(x)
+
+# Define `long_traj`. Note that we do not read the shapefiles
+# here, so this should be fast.
+for traj, duration in zip(expanded_traj["traj"].values(), duration_array):
+    if duration >= min_duration:
+        for n in traj:
+            i_slice, ishape = SHPC.comp_ishape_n(
+                n, expanded_traj["e_overestim"], orientation
+            )
+            long_traj[i_slice][ishape] = True
+
+t1 = time.perf_counter()
+print(f"Time to define long_traj: {t1 - t0:.0f} s")
+t0 = t1
+my_data = []
+
+# Now read the extremum shapefiles sequentially:
+for i_slice in range(SHPC.n_slices):
+    reader = SHPC.get_reader(i_slice, orientation, "extremum")
+
+    for rec in reader.iterRecords(fields=["speed"]):
+        if long_traj[i_slice][rec.oid]:
+            my_data.append(rec["speed"])
+
+my_data = np.array(my_data, np.float32)
+fname = f"speed_{orientation.lower()}"
+np.save(fname, my_data)
+print("Created", fname)
+t1 = time.perf_counter()
+print(f"Time to create {fname}: {t1 - t0:.0f} s")
+t0 = t1
+my_data = []
+
+# Read other shapefiles, max_speed_contour and outermost_contour,
+# sequentially:
+for i_slice in range(SHPC.n_slices):
+    reader_max = SHPC.get_reader(i_slice, orientation, "max_speed_contour")
+    reader_out = SHPC.get_reader(i_slice, orientation, "outermost_contour")
+
+    for rec_max, rec_out in zip(
+        reader_max.iterRecords(fields=["r_eq_area"]),
+        reader_out.iterRecords(fields=["r_eq_area"]),
+    ):
+        if long_traj[i_slice][rec_max.oid]:
+            if rec_max.r_eq_area == -100:
+                my_data.append(rec_out.r_eq_area)
+            else:
+                my_data.append(rec_max.r_eq_area)
+
+my_data = np.array(my_data, np.float32)
+fname = f"radius_{orientation.lower()}"
+np.save(fname, my_data)
+print("Created", fname)
+t1 = time.perf_counter()
+print(f"Time to create {fname}: {t1 - t0:.0f} s")
+t0 = t1
diff --git a/Trajectories/Tests/tests.json b/Trajectories/Tests/tests.json
index c8b33f64..5d3ed310 100644
--- a/Trajectories/Tests/tests.json
+++ b/Trajectories/Tests/tests.json
@@ -314,6 +314,7 @@
     ]
   },
   "Long_trajectories": {
-    "command": "$src_dir/Trajectories/Analysis/long_trajectories.py"
+    "command": "$src_dir/Trajectories/Analysis/long_trajectories.py",
+    "input": "Anticyclones\n"
   }
 }
-- 
GitLab