From d5a7c519e0c504091170f244c17117fe05b80400 Mon Sep 17 00:00:00 2001 From: epernod Date: Mon, 19 Jan 2026 17:06:33 +0100 Subject: [PATCH 01/11] Add export of CSV format --- tools/RegressionSceneData.py | 64 ++++++++++++++++++++++++++++++++++-- tools/RegressionSceneList.py | 2 +- 2 files changed, 63 insertions(+), 3 deletions(-) diff --git a/tools/RegressionSceneData.py b/tools/RegressionSceneData.py index 5992519..d657954 100644 --- a/tools/RegressionSceneData.py +++ b/tools/RegressionSceneData.py @@ -4,6 +4,7 @@ import numpy as np import gzip import pathlib +import csv import Sofa @@ -169,13 +170,72 @@ def load_scene(self): self.parse_node(self.root_node, 0) counter = 0 for mecaObj in self.meca_objs: - _filename = self.file_ref_path + ".reference_mstate_" + str(counter) + "_" + mecaObj.name.value + ".json.gz" + _filename = self.file_ref_path + ".reference_mstate_" + str(counter) + "_" + mecaObj.name.value + ".csv.gz" self.filenames.append(_filename) counter = counter+1 + def write_CSV_references(self): + pbar_simu = tqdm(total=self.steps, disable=self.disable_progress_bar) + pbar_simu.set_description("Simulate: " + self.file_scene_path) + + # Prepare per-mechanical-object CSV rows + nbr_meca = len(self.meca_objs) + csv_rows = [[] for _ in range(nbr_meca)] + + counter_step = 0 + modulo_step = self.steps / self.dump_number_step + dt = self.root_node.dt.value + + for step in range(0, self.steps + 1): + if step == 0 or counter_step >= modulo_step or step == self.steps: + t = dt * step + + for meca_id in range(nbr_meca): + positions = np.asarray(self.meca_objs[meca_id].position.value) + # positions shape: (N, 3) + + row = [t] + row.extend(positions.reshape(-1).tolist()) # flatten vec3d + + csv_rows[meca_id].append(row) + + counter_step = 0 + + Sofa.Simulation.animate(self.root_node, dt) + counter_step += 1 + pbar_simu.update(1) + + pbar_simu.close() + + # Write CSV files (gzipped, like your JSON) + for meca_id in range(nbr_meca): + output_file = pathlib.Path(self.filenames[meca_id]) + output_file.parent.mkdir(exist_ok=True, parents=True) + + with gzip.open(self.filenames[meca_id], "wt", newline="") as f: + writer = csv.writer(f) + + dof_per_point = self.meca_objs[meca_id].position.value.shape[1] + n_points = self.meca_objs[meca_id].position.value.shape[0] + + f.write(f"# dof_per_point={dof_per_point}\n") + f.write(f"# num_points={n_points}\n") + + if dof_per_point == 2: + f.write("# layout=time,X0,Y1,...,Xn,Yn\n") + elif dof_per_point == 3: + f.write("# layout=time,X0,Y1,Z1,...,Xn,Yn,Zn\n") + elif dof_per_point == 7: + f.write("# layout=time,X0,Y1,Z1,Qx1,Qy1,Qz1,Qw1,...,Xn,Yn,Zn,QxN,QyN,QzN1,QwN\n") + else: + f.write("# layout=unknown\n") + + writer.writerows(csv_rows[meca_id]) + + Sofa.Simulation.unload(self.root_node) - def write_references(self): + def write_JSON_references(self): pbar_simu = tqdm(total=self.steps, disable=self.disable_progress_bar) pbar_simu.set_description("Simulate: " + self.file_scene_path) diff --git a/tools/RegressionSceneList.py b/tools/RegressionSceneList.py index 4dff0a7..fe72d96 100644 --- a/tools/RegressionSceneList.py +++ b/tools/RegressionSceneList.py @@ -99,7 +99,7 @@ def write_references(self, id_scene, print_log = False): if print_log is True: self.scenes_data_sets[id_scene].print_meca_objs() - self.scenes_data_sets[id_scene].write_references() + self.scenes_data_sets[id_scene].write_CSV_references() def write_all_references(self): nbr_scenes = len(self.scenes_data_sets) From 2933466a2b13dc5ec7216634c2d4062507a284c3 Mon Sep 17 00:00:00 2001 From: epernod Date: Mon, 19 Jan 2026 20:28:04 +0100 Subject: [PATCH 02/11] Add import in CSV --- tools/RegressionSceneData.py | 163 ++++++++++++++++++++++++++++++++++- tools/RegressionSceneList.py | 2 +- 2 files changed, 162 insertions(+), 3 deletions(-) diff --git a/tools/RegressionSceneData.py b/tools/RegressionSceneData.py index d657954..167880e 100644 --- a/tools/RegressionSceneData.py +++ b/tools/RegressionSceneData.py @@ -271,9 +271,168 @@ def write_JSON_references(self): write_file.write(json.dumps(numpy_data[meca_id], cls=NumpyArrayEncoder).encode('utf-8')) Sofa.Simulation.unload(self.root_node) - - def compare_references(self): + + def compare_csv_references(self): + pbar_simu = tqdm(total=float(self.steps), disable=self.disable_progress_bar) + pbar_simu.set_description("compare_references: " + self.file_scene_path) + + nbr_meca = len(self.meca_objs) + + # Reference data + ref_times = [] # shared timeline + ref_values = [] # List[List[np.ndarray]] + + self.total_error = [] + self.error_by_dof = [] + self.nbr_tested_frame = 0 + self.regression_failed = False + + # -------------------------------------------------- + # Helper: read CSV + metadata + # -------------------------------------------------- + def _read_csv_with_metadata(filename): + meta = {} + data_rows = [] + + with gzip.open(filename, "rt") as f: + # Read metadata + while True: + pos = f.tell() + line = f.readline() + if not line: + break + + if line.startswith("#"): + if "=" in line: + k, v = line[1:].strip().split("=", 1) + meta[k.strip()] = v.strip() + else: + f.seek(pos) + break + + reader = csv.reader(f) + for row in reader: + if row: + data_rows.append(row) + + return meta, data_rows + + # -------------------------------------------------- + # Load reference files + # -------------------------------------------------- + try: + for meca_id in range(nbr_meca): + meta, rows = _read_csv_with_metadata(self.filenames[meca_id]) + + dof_per_point = int(meta["dof_per_point"]) + n_points = int(meta["num_points"]) + + times = [] + values = [] + + for row in rows: + t = float(row[0]) + flat = np.asarray(row[1:], dtype=float) + + expected_size = n_points * dof_per_point + if flat.size != expected_size: + print( + f"Reference size mismatch for file {self.file_scene_path}, " + f"MechanicalObject {meca_id}: " + f"expected {expected_size}, got {flat.size}" + ) + return False + + values.append(flat.reshape((n_points, dof_per_point))) + times.append(t) + + # Keep timeline from first MechanicalObject + if meca_id == 0: + ref_times = times + else: + if len(times) != len(ref_times): + print( + f"Reference timeline mismatch for file {self.file_scene_path}, " + f"MechanicalObject {meca_id}" + ) + return False + + ref_values.append(values) + self.total_error.append(0.0) + self.error_by_dof.append(0.0) + + except FileNotFoundError as e: + print(f"Error while reading references: {str(e)}") + return False + except KeyError as e: + print(f"Missing metadata in reference file: {str(e)}") + return False + + # -------------------------------------------------- + # Simulation + comparison + # -------------------------------------------------- + frame_step = 0 + nbr_frames = len(ref_times) + dt = self.root_node.dt.value + + for step in range(0, self.steps + 1): + simu_time = dt * step + + # Use tolerance for float comparison + if frame_step < nbr_frames and np.isclose(simu_time, ref_times[frame_step]): + for meca_id in range(nbr_meca): + meca_dofs = np.copy(self.meca_objs[meca_id].position.value) + data_ref = ref_values[meca_id][frame_step] + + if meca_dofs.shape != data_ref.shape: + print( + f"Shape mismatch for file {self.file_scene_path}, " + f"MechanicalObject {meca_id}: " + f"reference {data_ref.shape} vs current {meca_dofs.shape}" + ) + return False + + data_diff = data_ref - meca_dofs + + # Compute errors + full_dist = np.linalg.norm(data_diff) + error_by_dof = full_dist / float(data_diff.size) + + if self.verbose: + print( + f"{step} | {self.meca_objs[meca_id].name.value} | " + f"full_dist: {full_dist} | " + f"error_by_dof: {error_by_dof} | " + f"nbrDofs: {data_ref.size}" + ) + + self.total_error[meca_id] += full_dist + self.error_by_dof[meca_id] += error_by_dof + + frame_step += 1 + self.nbr_tested_frame += 1 + + # Safety exit + if frame_step == nbr_frames: + break + + Sofa.Simulation.animate(self.root_node, dt) + pbar_simu.update(1) + + pbar_simu.close() + + # -------------------------------------------------- + # Final regression verdict + # -------------------------------------------------- + for meca_id in range(nbr_meca): + if self.total_error[meca_id] > self.epsilon: + self.regression_failed = True + return False + + return True + + def compare_json_references(self): pbar_simu = tqdm(total=float(self.steps), disable=self.disable_progress_bar) pbar_simu.set_description("compare_references: " + self.file_scene_path) diff --git a/tools/RegressionSceneList.py b/tools/RegressionSceneList.py index fe72d96..fa5a6fe 100644 --- a/tools/RegressionSceneList.py +++ b/tools/RegressionSceneList.py @@ -124,7 +124,7 @@ def compare_references(self, id_scene): self.nbr_errors = self.nbr_errors + 1 print(f'Error while trying to load: {str(e)}') else: - result = self.scenes_data_sets[id_scene].compare_references() + result = self.scenes_data_sets[id_scene].compare_csv_references() if not result: self.nbr_errors = self.nbr_errors + 1 From d82ab74125ec15dc2de0beb8a789eaab37fe97b2 Mon Sep 17 00:00:00 2001 From: epernod Date: Wed, 11 Feb 2026 14:32:27 +0100 Subject: [PATCH 03/11] =?UTF-8?q?=EF=BB=BFMove=20json=20and=20csv=20read/w?= =?UTF-8?q?rite=20method=20into=20a=20dedicated=20file?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- tools/ReferenceFileIO.py | 79 +++++++++++++++++++++++++++ tools/RegressionSceneData.py | 101 +++++++---------------------------- 2 files changed, 98 insertions(+), 82 deletions(-) create mode 100644 tools/ReferenceFileIO.py diff --git a/tools/ReferenceFileIO.py b/tools/ReferenceFileIO.py new file mode 100644 index 0000000..916ac11 --- /dev/null +++ b/tools/ReferenceFileIO.py @@ -0,0 +1,79 @@ +import gzip +import csv +import json +from json import JSONEncoder + + +class NumpyArrayEncoder(JSONEncoder): + def default(self, obj): + if isinstance(obj, np.ndarray): + return obj.tolist() + return JSONEncoder.default(self, obj) + + +# -------------------------------------------------- +# Helper: read CSV + metadata +# -------------------------------------------------- +def read_CSV_reference_file(file_path): + meta = {} + data_rows = [] + + with gzip.open(file_path, "rt") as f: + # Read metadata + while True: + pos = f.tell() + line = f.readline() + if not line: + break + + if line.startswith("#"): + if "=" in line: + k, v = line[1:].strip().split("=", 1) + meta[k.strip()] = v.strip() + else: + f.seek(pos) + break + + reader = csv.reader(f) + for row in reader: + if row: + data_rows.append(row) + + return meta, data_rows + + +def write_CSV_reference_file(file_path, meca_obj): + with gzip.open(file_path, "wt", newline="") as f: + writer = csv.writer(f) + + dof_per_point = meca_obj.position.value.shape[1] + n_points = meca_obj.position.value.shape[0] + + f.write(f"# dof_per_point={dof_per_point}\n") + f.write(f"# num_points={n_points}\n") + + if dof_per_point == 2: + f.write("# layout=time,X0,Y1,...,Xn,Yn\n") + elif dof_per_point == 3: + f.write("# layout=time,X0,Y1,Z1,...,Xn,Yn,Zn\n") + elif dof_per_point == 7: + f.write("# layout=time,X0,Y1,Z1,Qx1,Qy1,Qz1,Qw1,...,Xn,Yn,Zn,QxN,QyN,QzN1,QwN\n") + else: + f.write("# layout=unknown\n") + + writer.writerows(csv_rows[meca_id]) + + +def write_JSON_reference_file(file_path, numpy_data): + with gzip.open(file_path, 'wb') as write_file: + write_file.write(json.dumps(numpy_data, cls=NumpyArrayEncoder).encode('utf-8')) + +def read_JSON_reference_file(file_path): + with gzip.open(self.filenames[meca_id], 'r') as zipfile: + decoded_array = json.loads(zipfile.read().decode('utf-8')) + + keyframes = [] + for key in decoded_array: + keyframes.append(float(key)) + + return decoded_array, keyframes \ No newline at end of file diff --git a/tools/RegressionSceneData.py b/tools/RegressionSceneData.py index 167880e..99ec5bd 100644 --- a/tools/RegressionSceneData.py +++ b/tools/RegressionSceneData.py @@ -1,11 +1,8 @@ from tqdm import tqdm -import json -from json import JSONEncoder import numpy as np -import gzip import pathlib -import csv +import tools.ReferenceFileIO as reference_io import Sofa def is_simulated(node): @@ -50,12 +47,6 @@ def onAnimateEndEvent(self, event): self.frame_step += 1 -class NumpyArrayEncoder(JSONEncoder): - def default(self, obj): - if isinstance(obj, np.ndarray): - return obj.tolist() - return JSONEncoder.default(self, obj) - def is_mapped(node): mapping = node.getMechanicalMapping() @@ -193,7 +184,6 @@ def write_CSV_references(self): for meca_id in range(nbr_meca): positions = np.asarray(self.meca_objs[meca_id].position.value) - # positions shape: (N, 3) row = [t] row.extend(positions.reshape(-1).tolist()) # flatten vec3d @@ -213,25 +203,7 @@ def write_CSV_references(self): output_file = pathlib.Path(self.filenames[meca_id]) output_file.parent.mkdir(exist_ok=True, parents=True) - with gzip.open(self.filenames[meca_id], "wt", newline="") as f: - writer = csv.writer(f) - - dof_per_point = self.meca_objs[meca_id].position.value.shape[1] - n_points = self.meca_objs[meca_id].position.value.shape[0] - - f.write(f"# dof_per_point={dof_per_point}\n") - f.write(f"# num_points={n_points}\n") - - if dof_per_point == 2: - f.write("# layout=time,X0,Y1,...,Xn,Yn\n") - elif dof_per_point == 3: - f.write("# layout=time,X0,Y1,Z1,...,Xn,Yn,Zn\n") - elif dof_per_point == 7: - f.write("# layout=time,X0,Y1,Z1,Qx1,Qy1,Qz1,Qw1,...,Xn,Yn,Zn,QxN,QyN,QzN1,QwN\n") - else: - f.write("# layout=unknown\n") - - writer.writerows(csv_rows[meca_id]) + reference_io.write_CSV_reference_file(self.filenames[meca_id], self.meca_objs[meca_id]) Sofa.Simulation.unload(self.root_node) @@ -266,9 +238,8 @@ def write_JSON_references(self): # make sure the parent directory of the references exists output_file = pathlib.Path(self.filenames[meca_id]) output_file.parent.mkdir(exist_ok=True, parents=True) - - with gzip.open(self.filenames[meca_id], 'wb') as write_file: - write_file.write(json.dumps(numpy_data[meca_id], cls=NumpyArrayEncoder).encode('utf-8')) + + reference_io.write_JSON_reference_file(self.filenames[meca_id], numpy_data[meca_id]) Sofa.Simulation.unload(self.root_node) @@ -288,42 +259,12 @@ def compare_csv_references(self): self.nbr_tested_frame = 0 self.regression_failed = False - # -------------------------------------------------- - # Helper: read CSV + metadata - # -------------------------------------------------- - def _read_csv_with_metadata(filename): - meta = {} - data_rows = [] - - with gzip.open(filename, "rt") as f: - # Read metadata - while True: - pos = f.tell() - line = f.readline() - if not line: - break - - if line.startswith("#"): - if "=" in line: - k, v = line[1:].strip().split("=", 1) - meta[k.strip()] = v.strip() - else: - f.seek(pos) - break - - reader = csv.reader(f) - for row in reader: - if row: - data_rows.append(row) - - return meta, data_rows - # -------------------------------------------------- # Load reference files # -------------------------------------------------- try: for meca_id in range(nbr_meca): - meta, rows = _read_csv_with_metadata(self.filenames[meca_id]) + meta, rows = reference_io.read_CSV_reference_file(self.filenames[meca_id]) dof_per_point = int(meta["dof_per_point"]) n_points = int(meta["num_points"]) @@ -395,7 +336,7 @@ def _read_csv_with_metadata(filename): data_diff = data_ref - meca_dofs - # Compute errors + # Compute total distance between the 2 sets full_dist = np.linalg.norm(data_diff) error_by_dof = full_dist / float(data_diff.size) @@ -413,18 +354,16 @@ def _read_csv_with_metadata(filename): frame_step += 1 self.nbr_tested_frame += 1 - # Safety exit + # security exit if simulation steps exceed nbr_frames if frame_step == nbr_frames: break Sofa.Simulation.animate(self.root_node, dt) - pbar_simu.update(1) + pbar_simu.update(1) pbar_simu.close() - # -------------------------------------------------- - # Final regression verdict - # -------------------------------------------------- + # Final regression returns value for meca_id in range(nbr_meca): if self.total_error[meca_id] > self.epsilon: self.regression_failed = True @@ -442,21 +381,19 @@ def compare_json_references(self): self.total_error = [] self.error_by_dof = [] - try: - for meca_id in range(0, nbr_meca): - with gzip.open(self.filenames[meca_id], 'r') as zipfile: - decoded_array = json.loads(zipfile.read().decode('utf-8')) - numpy_data.append(decoded_array) - - if meca_id == 0: - for key in decoded_array: - keyframes.append(float(key)) + for meca_id in range(0, nbr_meca): + try: + decoded_array, decoded_keyframes = reference_io.read_JSON_reference_file(self.filenames[meca_id]) + numpy_data.append(decoded_array) + + if meca_id == 0: + keyframes = decoded_keyframes self.total_error.append(0.0) self.error_by_dof.append(0.0) - except FileNotFoundError as e: - print(f'Error while reading references: {str(e)}') - return False + except FileNotFoundError as e: + print(f'Error while reading references: {str(e)}') + return False frame_step = 0 From dc65c1ef58f617764c1745c2623153607a5abd0a Mon Sep 17 00:00:00 2001 From: epernod Date: Wed, 11 Feb 2026 14:50:39 +0100 Subject: [PATCH 04/11] [scripts] factorize write_references with the type JSON or CSV as parameter --- tools/ReferenceFileIO.py | 10 ++--- tools/RegressionSceneData.py | 82 ++++++++++++++---------------------- tools/RegressionSceneList.py | 2 +- 3 files changed, 35 insertions(+), 59 deletions(-) diff --git a/tools/ReferenceFileIO.py b/tools/ReferenceFileIO.py index 916ac11..f229193 100644 --- a/tools/ReferenceFileIO.py +++ b/tools/ReferenceFileIO.py @@ -42,15 +42,11 @@ def read_CSV_reference_file(file_path): return meta, data_rows -def write_CSV_reference_file(file_path, meca_obj): +def write_CSV_reference_file(file_path, dof_per_point, num_points, csv_rows): with gzip.open(file_path, "wt", newline="") as f: writer = csv.writer(f) - - dof_per_point = meca_obj.position.value.shape[1] - n_points = meca_obj.position.value.shape[0] - f.write(f"# dof_per_point={dof_per_point}\n") - f.write(f"# num_points={n_points}\n") + f.write(f"# num_points={num_points}\n") if dof_per_point == 2: f.write("# layout=time,X0,Y1,...,Xn,Yn\n") @@ -61,7 +57,7 @@ def write_CSV_reference_file(file_path, meca_obj): else: f.write("# layout=unknown\n") - writer.writerows(csv_rows[meca_id]) + writer.writerows(csv_rows) def write_JSON_reference_file(file_path, numpy_data): diff --git a/tools/RegressionSceneData.py b/tools/RegressionSceneData.py index 99ec5bd..2fe59f6 100644 --- a/tools/RegressionSceneData.py +++ b/tools/RegressionSceneData.py @@ -166,82 +166,62 @@ def load_scene(self): counter = counter+1 - def write_CSV_references(self): + def write_references(self, format = "JSON"): pbar_simu = tqdm(total=self.steps, disable=self.disable_progress_bar) pbar_simu.set_description("Simulate: " + self.file_scene_path) - # Prepare per-mechanical-object CSV rows - nbr_meca = len(self.meca_objs) - csv_rows = [[] for _ in range(nbr_meca)] - + # compute stepping parameters for the simulation counter_step = 0 modulo_step = self.steps / self.dump_number_step dt = self.root_node.dt.value + + # prepae per-mechanical-object data + nbr_meca = len(self.meca_objs) + if format == "CSV": + csv_rows = [[] for _ in range(nbr_meca)] + elif format == "JSON": + numpy_data = [] # List + for meca_id in range(0, nbr_meca): + meca_dofs = {} + numpy_data.append(meca_dofs) + else: + print(f"Unsupported format: {format}") + raise ValueError(f"Unsupported format: {format}") for step in range(0, self.steps + 1): if step == 0 or counter_step >= modulo_step or step == self.steps: t = dt * step - for meca_id in range(nbr_meca): positions = np.asarray(self.meca_objs[meca_id].position.value) - row = [t] - row.extend(positions.reshape(-1).tolist()) # flatten vec3d - - csv_rows[meca_id].append(row) - + if format == "CSV": + row = [t] + row.extend(positions.reshape(-1).tolist()) # flatten vec3d + csv_rows[meca_id].append(row) + elif format == "JSON": + numpy_data[meca_id][t] = np.copy(positions) + counter_step = 0 - + Sofa.Simulation.animate(self.root_node, dt) counter_step += 1 pbar_simu.update(1) pbar_simu.close() - # Write CSV files (gzipped, like your JSON) + # write reference files for meca_id in range(nbr_meca): output_file = pathlib.Path(self.filenames[meca_id]) output_file.parent.mkdir(exist_ok=True, parents=True) - reference_io.write_CSV_reference_file(self.filenames[meca_id], self.meca_objs[meca_id]) - - Sofa.Simulation.unload(self.root_node) - - def write_JSON_references(self): - pbar_simu = tqdm(total=self.steps, disable=self.disable_progress_bar) - pbar_simu.set_description("Simulate: " + self.file_scene_path) - - nbr_meca = len(self.meca_objs) - numpy_data = [] # List - for meca_id in range(0, nbr_meca): - meca_dofs = {} - numpy_data.append(meca_dofs) - - - counter_step = 0 - modulo_step = self.steps / self.dump_number_step - - for step in range(0, self.steps + 1): - # export rest position, final position + modulo steps: - if step == 0 or counter_step >= modulo_step or step == self.steps: - for meca_id in range(0, nbr_meca): - numpy_data[meca_id][self.root_node.dt.value * step] = np.copy(self.meca_objs[meca_id].position.value) - counter_step = 0 - - Sofa.Simulation.animate(self.root_node, self.root_node.dt.value) - counter_step = counter_step + 1 - - pbar_simu.update(1) - pbar_simu.close() - - for meca_id in range(0, nbr_meca): - # make sure the parent directory of the references exists - output_file = pathlib.Path(self.filenames[meca_id]) - output_file.parent.mkdir(exist_ok=True, parents=True) - - reference_io.write_JSON_reference_file(self.filenames[meca_id], numpy_data[meca_id]) + if format == "CSV": + dof_per_point = self.meca_objs[meca_id].position.value.shape[1] + n_points = self.meca_objs[meca_id].position.value.shape[0] + reference_io.write_CSV_reference_file(self.filenames[meca_id], dof_per_point, n_points, csv_rows[meca_id]) + elif format == "JSON": + reference_io.write_JSON_reference_file(self.filenames[meca_id], numpy_data[meca_id]) - Sofa.Simulation.unload(self.root_node) + Sofa.Simulation.unload(self.root_node) def compare_csv_references(self): diff --git a/tools/RegressionSceneList.py b/tools/RegressionSceneList.py index fa5a6fe..36fa5b2 100644 --- a/tools/RegressionSceneList.py +++ b/tools/RegressionSceneList.py @@ -99,7 +99,7 @@ def write_references(self, id_scene, print_log = False): if print_log is True: self.scenes_data_sets[id_scene].print_meca_objs() - self.scenes_data_sets[id_scene].write_CSV_references() + self.scenes_data_sets[id_scene].write_references() def write_all_references(self): nbr_scenes = len(self.scenes_data_sets) From 52716c44622a7f6762ad77174b956e8084993091 Mon Sep 17 00:00:00 2001 From: epernod Date: Wed, 11 Feb 2026 15:08:57 +0100 Subject: [PATCH 05/11] [scripts] factorize compare_references with the type JSON or CSV as parameter --- tools/RegressionSceneData.py | 187 ++++++++++++++--------------------- tools/RegressionSceneList.py | 2 +- 2 files changed, 73 insertions(+), 116 deletions(-) diff --git a/tools/RegressionSceneData.py b/tools/RegressionSceneData.py index 2fe59f6..7ceb994 100644 --- a/tools/RegressionSceneData.py +++ b/tools/RegressionSceneData.py @@ -224,62 +224,86 @@ def write_references(self, format = "JSON"): Sofa.Simulation.unload(self.root_node) - def compare_csv_references(self): + def compare_references(self, format = "JSON"): + if format == "CSV": + return self.compare_csv_references() + elif format == "JSON": + return self.compare_json_references() + else: + print(f"Unsupported format: {format}") + raise ValueError(f"Unsupported format: {format}") + pbar_simu = tqdm(total=float(self.steps), disable=self.disable_progress_bar) pbar_simu.set_description("compare_references: " + self.file_scene_path) nbr_meca = len(self.meca_objs) - + # Reference data - ref_times = [] # shared timeline - ref_values = [] # List[List[np.ndarray]] + keyframes = [] # shared timeline + if format == "CSV": + ref_values = [] # List[List[np.ndarray]] + elif format == "JSON": + numpy_data = [] # List + + # Outputs init self.total_error = [] self.error_by_dof = [] self.nbr_tested_frame = 0 self.regression_failed = False + # -------------------------------------------------- # Load reference files # -------------------------------------------------- - try: - for meca_id in range(nbr_meca): - meta, rows = reference_io.read_CSV_reference_file(self.filenames[meca_id]) - - dof_per_point = int(meta["dof_per_point"]) - n_points = int(meta["num_points"]) - - times = [] - values = [] - - for row in rows: - t = float(row[0]) - flat = np.asarray(row[1:], dtype=float) - - expected_size = n_points * dof_per_point - if flat.size != expected_size: - print( - f"Reference size mismatch for file {self.file_scene_path}, " - f"MechanicalObject {meca_id}: " - f"expected {expected_size}, got {flat.size}" - ) - return False - - values.append(flat.reshape((n_points, dof_per_point))) - times.append(t) - - # Keep timeline from first MechanicalObject - if meca_id == 0: - ref_times = times - else: - if len(times) != len(ref_times): - print( - f"Reference timeline mismatch for file {self.file_scene_path}, " - f"MechanicalObject {meca_id}" - ) - return False + for meca_id in range(nbr_meca): + try: + if format == "CSV": + meta, rows = reference_io.read_CSV_reference_file(self.filenames[meca_id]) + + dof_per_point = int(meta["dof_per_point"]) + n_points = int(meta["num_points"]) + + times = [] + values = [] + + for row in rows: + t = float(row[0]) + flat = np.asarray(row[1:], dtype=float) + + expected_size = n_points * dof_per_point + if flat.size != expected_size: + print( + f"Reference size mismatch for file {self.file_scene_path}, " + f"MechanicalObject {meca_id}: " + f"expected {expected_size}, got {flat.size}" + ) + return False + + values.append(flat.reshape((n_points, dof_per_point))) + times.append(t) + + ref_values.append(values) + + # Keep timeline from first MechanicalObject + if meca_id == 0: + keyframes = times + else: + if len(times) != len(keyframes): + print( + f"Reference timeline mismatch for file {self.file_scene_path}, " + f"MechanicalObject {meca_id}" + ) + return False + + elif format == "JSON": + decoded_array, decoded_keyframes = reference_io.read_JSON_reference_file(self.filenames[meca_id]) + numpy_data.append(decoded_array) + + # Keep timeline from first MechanicalObject + if meca_id == 0: + keyframes = decoded_keyframes - ref_values.append(values) self.total_error.append(0.0) self.error_by_dof.append(0.0) @@ -294,17 +318,20 @@ def compare_csv_references(self): # Simulation + comparison # -------------------------------------------------- frame_step = 0 - nbr_frames = len(ref_times) + nbr_frames = len(keyframes) dt = self.root_node.dt.value - for step in range(0, self.steps + 1): simu_time = dt * step # Use tolerance for float comparison - if frame_step < nbr_frames and np.isclose(simu_time, ref_times[frame_step]): + if frame_step < nbr_frames and np.isclose(simu_time, keyframes[frame_step]): for meca_id in range(nbr_meca): meca_dofs = np.copy(self.meca_objs[meca_id].position.value) - data_ref = ref_values[meca_id][frame_step] + + if format == "CSV": + data_ref = ref_values[meca_id][frame_step] + elif format == "JSON": + data_ref = np.asarray(numpy_data[meca_id][str(keyframes[frame_step])]) if meca_dofs.shape != data_ref.shape: print( @@ -350,76 +377,6 @@ def compare_csv_references(self): return False return True - - def compare_json_references(self): - pbar_simu = tqdm(total=float(self.steps), disable=self.disable_progress_bar) - pbar_simu.set_description("compare_references: " + self.file_scene_path) - - nbr_meca = len(self.meca_objs) - numpy_data = [] # List - keyframes = [] - self.total_error = [] - self.error_by_dof = [] - - for meca_id in range(0, nbr_meca): - try: - decoded_array, decoded_keyframes = reference_io.read_JSON_reference_file(self.filenames[meca_id]) - numpy_data.append(decoded_array) - - if meca_id == 0: - keyframes = decoded_keyframes - - self.total_error.append(0.0) - self.error_by_dof.append(0.0) - except FileNotFoundError as e: - print(f'Error while reading references: {str(e)}') - return False - - - frame_step = 0 - nbr_frames = len(keyframes) - self.nbr_tested_frame = 0 - for step in range(0, self.steps + 1): - simu_time = self.root_node.dt.value * step - - if simu_time == keyframes[frame_step]: - for meca_id in range(0, nbr_meca): - meca_dofs = np.copy(self.meca_objs[meca_id].position.value) - data_ref = np.asarray(numpy_data[meca_id][str(keyframes[frame_step])]) - if (meca_dofs.size != data_ref.size): - print(f'Error while reading reference for file {self.file_scene_path} at mechanicalObject id: {str(meca_id)}. Reference size: {data_ref.size} vs current size: {meca_dofs.size}') - return False - - data_diff = data_ref - meca_dofs - - # Compute total distance between the 2 sets - full_dist = np.linalg.norm(data_diff) - error_by_dof = full_dist / float(data_diff.size) - - if self.verbose: - print (str(step) + "| " + self.meca_objs[meca_id].name.value + " | full_dist: " + str(full_dist) + " | error_by_dof: " + str(error_by_dof) + " | nbrDofs: " + str(data_ref.size)) - - self.total_error[meca_id] = self.total_error[meca_id] + full_dist - self.error_by_dof[meca_id] = self.error_by_dof[meca_id] + error_by_dof - - frame_step = frame_step + 1 - self.nbr_tested_frame = self.nbr_tested_frame + 1 - - # security exit if simulation steps exceed nbr_frames - if frame_step == nbr_frames: - break - - Sofa.Simulation.animate(self.root_node, self.root_node.dt.value) - - pbar_simu.update(1) - pbar_simu.close() - - for meca_id in range(0, nbr_meca): - if self.total_error[meca_id] > self.epsilon: - self.regression_failed = True - return False - - return True def replay_references(self): diff --git a/tools/RegressionSceneList.py b/tools/RegressionSceneList.py index 36fa5b2..4dff0a7 100644 --- a/tools/RegressionSceneList.py +++ b/tools/RegressionSceneList.py @@ -124,7 +124,7 @@ def compare_references(self, id_scene): self.nbr_errors = self.nbr_errors + 1 print(f'Error while trying to load: {str(e)}') else: - result = self.scenes_data_sets[id_scene].compare_csv_references() + result = self.scenes_data_sets[id_scene].compare_references() if not result: self.nbr_errors = self.nbr_errors + 1 From 0fc38f234c66ae17219329a9ac9ec9b6cdbd7a46 Mon Sep 17 00:00:00 2001 From: epernod Date: Wed, 11 Feb 2026 15:19:29 +0100 Subject: [PATCH 06/11] Add support of both filename creation with different extension --- tools/RegressionSceneData.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tools/RegressionSceneData.py b/tools/RegressionSceneData.py index 7ceb994..1b69979 100644 --- a/tools/RegressionSceneData.py +++ b/tools/RegressionSceneData.py @@ -149,7 +149,7 @@ def add_write_state(self): counter = counter+1 - def load_scene(self): + def load_scene(self, format = "JSON"): self.root_node = Sofa.Simulation.load(self.file_scene_path) if not self.root_node: # error while loading print(f'Error while trying to load {self.file_scene_path}') @@ -161,7 +161,10 @@ def load_scene(self): self.parse_node(self.root_node, 0) counter = 0 for mecaObj in self.meca_objs: - _filename = self.file_ref_path + ".reference_mstate_" + str(counter) + "_" + mecaObj.name.value + ".csv.gz" + if format == "CSV": + _filename = self.file_ref_path + ".reference_mstate_" + str(counter) + "_" + mecaObj.name.value + ".csv.gz" + elif format == "JSON": + _filename = self.file_ref_path + ".reference_mstate_" + str(counter) + "_" + mecaObj.name.value + ".json.gz" self.filenames.append(_filename) counter = counter+1 From 85c65068fed0a57ed4620c1bbdc4108668eb22e4 Mon Sep 17 00:00:00 2001 From: epernod Date: Tue, 20 Jan 2026 01:01:48 +0100 Subject: [PATCH 07/11] Add regression version in case we change the format later --- tools/ReferenceFileIO.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/ReferenceFileIO.py b/tools/ReferenceFileIO.py index f229193..83ee769 100644 --- a/tools/ReferenceFileIO.py +++ b/tools/ReferenceFileIO.py @@ -3,6 +3,7 @@ import json from json import JSONEncoder +regression_version = "1.0" class NumpyArrayEncoder(JSONEncoder): def default(self, obj): @@ -45,6 +46,7 @@ def read_CSV_reference_file(file_path): def write_CSV_reference_file(file_path, dof_per_point, num_points, csv_rows): with gzip.open(file_path, "wt", newline="") as f: writer = csv.writer(f) + f.write(f"# format_version={regression_version}\n") f.write(f"# dof_per_point={dof_per_point}\n") f.write(f"# num_points={num_points}\n") From f005934613e93b8b440cfdd1249758290f6e920d Mon Sep 17 00:00:00 2001 From: erik pernod Date: Wed, 11 Feb 2026 16:18:28 +0100 Subject: [PATCH 08/11] Fix indentation from bad merge --- tools/RegressionSceneData.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/RegressionSceneData.py b/tools/RegressionSceneData.py index 1b69979..29947d2 100644 --- a/tools/RegressionSceneData.py +++ b/tools/RegressionSceneData.py @@ -224,7 +224,7 @@ def write_references(self, format = "JSON"): elif format == "JSON": reference_io.write_JSON_reference_file(self.filenames[meca_id], numpy_data[meca_id]) - Sofa.Simulation.unload(self.root_node) + Sofa.Simulation.unload(self.root_node) def compare_references(self, format = "JSON"): From 6229e1243370bbe368bc6117a3e5e2fd7bd3103c Mon Sep 17 00:00:00 2001 From: erik pernod Date: Wed, 11 Feb 2026 16:18:58 +0100 Subject: [PATCH 09/11] Fix missing newline at end of ReferenceFileIO.py Ensure newline at end of file for ReferenceFileIO.py --- tools/ReferenceFileIO.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/ReferenceFileIO.py b/tools/ReferenceFileIO.py index 83ee769..a652974 100644 --- a/tools/ReferenceFileIO.py +++ b/tools/ReferenceFileIO.py @@ -74,4 +74,4 @@ def read_JSON_reference_file(file_path): for key in decoded_array: keyframes.append(float(key)) - return decoded_array, keyframes \ No newline at end of file + return decoded_array, keyframes From 67e62bf57cfb8ebab4d60cc71f1364cda4592cb0 Mon Sep 17 00:00:00 2001 From: epernod Date: Wed, 11 Feb 2026 17:20:08 +0100 Subject: [PATCH 10/11] forgot import numpy in new file --- tools/ReferenceFileIO.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/ReferenceFileIO.py b/tools/ReferenceFileIO.py index a652974..dcfefdc 100644 --- a/tools/ReferenceFileIO.py +++ b/tools/ReferenceFileIO.py @@ -2,6 +2,7 @@ import csv import json from json import JSONEncoder +import numpy as np regression_version = "1.0" From 945db9d17fbe06bd658588fc7e1e7540bfe9e23a Mon Sep 17 00:00:00 2001 From: epernod Date: Wed, 11 Feb 2026 17:41:24 +0100 Subject: [PATCH 11/11] Fix forgotten code during the factorization --- tools/ReferenceFileIO.py | 2 +- tools/RegressionSceneData.py | 13 +++---------- 2 files changed, 4 insertions(+), 11 deletions(-) diff --git a/tools/ReferenceFileIO.py b/tools/ReferenceFileIO.py index dcfefdc..6342a63 100644 --- a/tools/ReferenceFileIO.py +++ b/tools/ReferenceFileIO.py @@ -68,7 +68,7 @@ def write_JSON_reference_file(file_path, numpy_data): write_file.write(json.dumps(numpy_data, cls=NumpyArrayEncoder).encode('utf-8')) def read_JSON_reference_file(file_path): - with gzip.open(self.filenames[meca_id], 'r') as zipfile: + with gzip.open(file_path, 'r') as zipfile: decoded_array = json.loads(zipfile.read().decode('utf-8')) keyframes = [] diff --git a/tools/RegressionSceneData.py b/tools/RegressionSceneData.py index 29947d2..d4a9ae6 100644 --- a/tools/RegressionSceneData.py +++ b/tools/RegressionSceneData.py @@ -228,14 +228,6 @@ def write_references(self, format = "JSON"): def compare_references(self, format = "JSON"): - if format == "CSV": - return self.compare_csv_references() - elif format == "JSON": - return self.compare_json_references() - else: - print(f"Unsupported format: {format}") - raise ValueError(f"Unsupported format: {format}") - pbar_simu = tqdm(total=float(self.steps), disable=self.disable_progress_bar) pbar_simu.set_description("compare_references: " + self.file_scene_path) @@ -247,7 +239,9 @@ def compare_references(self, format = "JSON"): ref_values = [] # List[List[np.ndarray]] elif format == "JSON": numpy_data = [] # List - + else: + print(f"Unsupported format: {format}") + raise ValueError(f"Unsupported format: {format}") # Outputs init self.total_error = [] @@ -255,7 +249,6 @@ def compare_references(self, format = "JSON"): self.nbr_tested_frame = 0 self.regression_failed = False - # -------------------------------------------------- # Load reference files # --------------------------------------------------