Coverage for / dolfinx-env / lib / python3.12 / site-packages / io4dolfinx / backends / exodus / backend.py: 0%
236 statements
« prev ^ index » next coverage.py v7.13.4, created at 2026-02-26 18:16 +0000
« prev ^ index » next coverage.py v7.13.4, created at 2026-02-26 18:16 +0000
1"""
2Exodus interface to io4dolfinx.
3The Exodus2 format is described in:
4https://src.fedoraproject.org/repo/pkgs/exodusii/922137.pdf/a45d67f4a1a8762bcf66af2ec6eb35f9/922137.pdf
5Further documentation from CUBIT on node numbering can be found at:
6https://coreform.com/cubit_help/appendix/element_numbering.htm
8SPDX License identifier: MIT
10Copyright: Jørgen S. Dokken, Henrik N.T. Finsberg, Remi Delaporte-Mathurin,
11 and Simula Research Laboratory
12"""
14from pathlib import Path
15from typing import Any, Literal, cast
17from mpi4py import MPI
19import basix.ufl
20import dolfinx
21import netCDF4
22import numpy as np
23import numpy.typing as npt
25from ...structures import ArrayData, FunctionData, MeshData, MeshTagsData, ReadMeshData
26from ...utils import check_file_exists
27from .. import FileMode, ReadMode
29_interval_to_vertex_map = {0: [0, 1]}
30# Based on: https://src.fedoraproject.org/repo/pkgs/exodusii/922137.pdf/a45d67f4a1a8762bcf66af2ec6eb35f9/922137.pdf
31_tetra_facet_to_vertex_map = {0: [0, 1, 3], 1: [1, 2, 3], 2: [0, 2, 3], 3: [0, 1, 2]}
32# https://coreform.com/cubit_help/appendix/element_numbering.htm
33# Note that triangular side-sets goes from 2 to 4 (with 0 base index)
34_triangle_to_vertex_map = {2: [0, 1], 3: [1, 2], 4: [2, 0]}
35_quad_to_vertex_map = {0: [0, 1], 1: [1, 2], 2: [2, 3], 3: [3, 0]}
36_hex_to_vertex_map = {
37 0: [0, 1, 4, 5],
38 1: [1, 2, 5, 6],
39 2: [2, 3, 6, 7],
40 3: [0, 3, 4, 7],
41 4: [0, 1, 2, 3],
42 5: [4, 5, 6, 7],
43}
45_side_set_to_vertex_map = {
46 "interval": _interval_to_vertex_map,
47 "quadrilateral": _quad_to_vertex_map,
48 "triangle": _triangle_to_vertex_map,
49 "tetrahedron": _tetra_facet_to_vertex_map,
50 "hexahedron": _hex_to_vertex_map,
51}
54_exodus_to_string = {
55 "EDGE2": ("interval", 1),
56 "TRI3": ("triangle", 1),
57 "QUAD": ("quadrilateral", 1),
58 "QUAD4": ("quadrilateral", 1),
59 "TETRA": ("tetrahedron", 1),
60 "HEX": ("hexahedron", 1),
61 "HEX8": ("hexahedron", 1),
62 "BEAM2": ("interval", 1),
63 "HEX27": ("hexahedron", 2),
64}
66read_mode = ReadMode.serial
69def _get_cell_type(connectivity: netCDF4.Variable) -> tuple[dolfinx.mesh.CellType, int]:
70 cell_type, degree = _exodus_to_string[connectivity.elem_type]
71 return dolfinx.mesh.to_type(cell_type), degree
74def _compute_tdim(connectivity: netCDF4.Variable) -> int:
75 d_ct, _ = _get_cell_type(connectivity)
76 return dolfinx.mesh.cell_dim(d_ct)
79def get_default_backend_args(arguments: dict[str, Any] | None) -> dict[str, Any]:
80 args = arguments or {}
81 return args
84def convert_file_mode(mode: FileMode) -> str:
85 match mode:
86 case FileMode.append:
87 return "a"
88 case FileMode.read:
89 return "r"
90 case FileMode.write:
91 return "w"
92 case _:
93 raise NotImplementedError(f"File mode {mode} not implemented")
96def write_attributes(
97 filename: Path | str,
98 comm: MPI.Intracomm,
99 name: str,
100 attributes: dict[str, np.ndarray],
101 backend_args: dict[str, Any] | None = None,
102):
103 """Write attributes to file using H5PY.
105 Args:
106 filename: Path to file to write to
107 comm: MPI communicator used in storage
108 name: Name of the attributes
109 attributes: Dictionary of attributes to write to file
110 backend_args: Arguments to backend
111 """
112 raise NotImplementedError("The Exodus backend cannot write attributes.")
115def read_attributes(
116 filename: Path | str,
117 comm: MPI.Intracomm,
118 name: str,
119 backend_args: dict[str, Any] | None = None,
120) -> dict[str, Any]:
121 """Read attributes from file using H5PY.
123 Args:
124 filename: Path to file to read from
125 comm: MPI communicator used in storage
126 name: Name of the attributes
127 backend_args: Arguments to backend
128 Returns:
129 The attributes
130 """
131 raise NotImplementedError("The Exodus backend cannot read attributes.")
134def snapshot_checkpoint(
135 filename: Path | str,
136 mode: FileMode,
137 u: dolfinx.fem.Function,
138 backend_args: dict[str, Any] | None,
139):
140 """Create a snapshot checkpoint of a dolfinx function.
142 Args:
143 filename: Path to file to read from
144 mode: File-mode to store the function
145 u: dolfinx function to create a snapshot checkpoint for
146 backend_args: Arguments to backend
147 """
148 raise NotImplementedError("The EXODUS backend cannot make checkpoints.")
151def read_hdf5_array(
152 comm: MPI.Intracomm,
153 filename: Path | str,
154 group: str,
155 backend_args: dict[str, Any] | None,
156) -> tuple[np.ndarray, int]:
157 """Read an array from an HDF5 file.
159 Args:
160 comm: MPI communicator used in storage
161 filename: Path to file to read from
162 group: Group in HDF5 file where array is stored
163 backend_args: Arguments to backend
165 Returns:
166 Tuple containing:
167 - Numpy array read from file
168 - Global starting point on the process.
169 Process 0 has [0, M), process 1 [M, N), process 2 [N, O) etc.
170 """
171 raise NotImplementedError("The EXODUS backend cannot read HDF5 arrays")
174def read_timestamps(
175 filename: Path | str,
176 comm: MPI.Intracomm,
177 function_name: str,
178 backend_args: dict[str, Any] | None = None,
179) -> npt.NDArray[np.float64 | str]: # type: ignore[type-var]
180 """Read time-stamps from a checkpoint file.
182 Args:
183 comm: MPI communicator
184 filename: Path to file
185 comm: MPI communicator
186 function_name: Name of the function to read time-stamps for
187 backend_args: Arguments for backend, for instance file type.
189 Returns:
190 The time-stamps
191 """
192 raise NotImplementedError("The Exodus backend cannot read timestamps.")
195def write_mesh(
196 filename: Path | str,
197 comm: MPI.Intracomm,
198 mesh: MeshData,
199 backend_args: dict[str, Any] | None = None,
200 mode: FileMode = FileMode.write,
201 time: float = 0.0,
202):
203 """Write a mesh to file using H5PY
205 Args:
206 filename: Path to file to write to.
207 mesh: Internal data structure for the mesh data to save to file
208 comm: MPI communicator used in storage
209 backend_args: Arguments to backend
210 mode: Mode to use (write or append)
211 time: Time stamp
212 """
213 raise NotImplementedError("The Exodus backend cannot write meshes.")
216def _read_mesh_geometry(infile: netCDF4.Dataset) -> tuple[int, npt.NDArray[np.floating]]:
217 # use page 171 of manual to extract data
218 num_nodes = infile.dimensions["num_nodes"].size
219 gdim = infile.dimensions["num_dim"].size
221 # Get coordinates of mesh
222 coord_var = infile.variables.get("coord")
223 if coord_var is None:
224 coordinates = np.zeros((num_nodes, gdim), dtype=np.float64)
225 for i, coord in enumerate(["x", "y", "z"]):
226 coord_i = infile.variables.get(f"coord{coord}")
227 if coord_i is not None:
228 coordinates[: coord_i.size, i] = coord_i[:]
229 else:
230 coordinates = np.asarray(coord_var)
231 return gdim, coordinates
234def _get_entity_blocks(
235 infile: netCDF4.Dataset, search_type: Literal["cell", "facet"]
236) -> tuple[int, list[netCDF4.Variable]]:
237 # use page 171 of manual to extract data
238 num_blocks = infile.dimensions["num_el_blk"].size
240 # Get element connectivity
241 all_connectivity_variables = [infile.variables[f"connect{i + 1}"] for i in range(num_blocks)]
243 # Compute max topological dimension in mesh and find the correct
244 max_tdim = _compute_tdim(max(all_connectivity_variables, key=_compute_tdim))
246 # Extract only the connectivity blocks that we need
247 if search_type == "cell":
248 search_dim = max_tdim
249 elif search_type == "facet":
250 search_dim = max_tdim - 1
251 else:
252 raise RuntimeError(f"Unknown entity type: {search_type}")
253 return search_dim, list(
254 filter(lambda el: _compute_tdim(el) == search_dim, all_connectivity_variables)
255 )
258def _extract_connectivity_data(
259 entity_blocks: list[netCDF4.Variable],
260) -> tuple[list[npt.NDArray[np.int64]], tuple[dolfinx.mesh.CellType, int], list[int]]:
261 connectivity_arrays = []
262 cell_types = []
263 entity_block_index = []
264 for entity_block in entity_blocks:
265 connectivity_arrays.append(entity_block[:] - 1)
266 cell_types.append(_get_cell_type(entity_block))
267 entity_block_index.append(int(entity_block.name.removeprefix("connect")) - 1)
268 for cell in cell_types:
269 assert cell_types[0] == cell, "Mixed cell types not supported"
270 cell_type = cell_types[0]
271 return connectivity_arrays, cell_type, entity_block_index
274def read_mesh_data(
275 filename: Path | str,
276 comm: MPI.Intracomm,
277 time: str | float | None = 0.0,
278 read_from_partition: bool = False,
279 backend_args: dict[str, Any] | None = None,
280) -> ReadMeshData:
281 """Read mesh data from EXODUS based checkpoint files.
283 Args:
284 filename: Path to input file
285 comm: The MPI communciator to distribute the mesh over
286 time: Time stamp associated with mesh
287 read_from_partition: Read mesh with partition from file
288 backend_args: Arguments to backend
289 Returns:
290 The mesh topology, geometry, UFL domain and partition function
291 """
292 check_file_exists(filename)
293 with netCDF4.Dataset(filename, "r") as infile:
294 if comm.rank == 0:
295 gdim, coordinates = _read_mesh_geometry(infile)
297 _tdim, entity_blocks = _get_entity_blocks(infile, "cell")
298 if len(entity_blocks) > 0:
299 # Extract markers directly from entity-blocks
300 connectivity_arrays, (cell_type, degree), _entity_block_index = (
301 _extract_connectivity_data(entity_blocks)
302 )
304 cells = np.vstack(connectivity_arrays)
305 if isinstance(cells, np.ma.MaskedArray):
306 cells = cells.filled()
307 else:
308 raise ValueError(f"No blocks found in {filename}")
310 if degree == 1:
311 perm = dolfinx.cpp.io.perm_vtk(cell_type, cells.shape[1])
312 elif cell_type == dolfinx.mesh.CellType.hexahedron and degree == 2:
313 # Ordering from Fig 4.14 of: https://sandialabs.github.io/seacas-docs/exodusII-new.pdf
314 dolfinx_to_exodus = np.array(
315 [
316 0,
317 1,
318 5,
319 4,
320 2,
321 3,
322 7,
323 6,
324 8,
325 12,
326 16,
327 10,
328 9,
329 11,
330 18,
331 17,
332 13,
333 15,
334 19,
335 14,
336 26,
337 21,
338 24,
339 22,
340 23,
341 20,
342 25,
343 ]
344 )
345 perm = np.argsort(dolfinx_to_exodus)
346 else:
347 raise NotImplementedError(
348 "Reading Exodus2 mesh with {cell_type} of order {degree} is not supported."
349 )
351 cells = cells[:, perm]
352 cell_type, gdim, xtype, degree, num_dofs_per_cell = comm.bcast(
353 (cell_type, gdim, np.dtype(coordinates.dtype).name, degree, cells.shape[1]), root=0
354 )
356 else:
357 cell_type, gdim, xtype, degree, num_dofs_per_cell = comm.bcast(
358 (None, None, None, None), root=0
359 )
360 coordinates = np.zeros((0, gdim), dtype=xtype)
361 cells = np.zeros((0, num_dofs_per_cell), dtype=np.int64)
362 return ReadMeshData(
363 cells=cells,
364 cell_type=dolfinx.mesh.to_string(cell_type),
365 x=coordinates,
366 lvar=int(basix.LagrangeVariant.equispaced),
367 degree=degree,
368 partition_graph=None,
369 )
372def write_meshtags(
373 filename: str | Path,
374 comm: MPI.Intracomm,
375 data: MeshTagsData,
376 backend_args: dict[str, Any] | None = None,
377):
378 """Write mesh tags to file.
380 Args:
381 filename: Path to file to write to
382 comm: MPI communicator used in storage
383 data: Internal data structure for the mesh tags to save to file
384 backend_args: Arguments to backend
385 """
386 raise NotImplementedError("The Exodus backend cannot write meshtags.")
389def read_meshtags_data(
390 filename: str | Path,
391 comm: MPI.Intracomm,
392 name: str,
393 backend_args: dict[str, Any] | None = None,
394) -> MeshTagsData:
395 """Read mesh tags from file.
397 Args:
398 filename: Path to file to read from
399 comm: MPI communicator used in storage
400 name: Name of the mesh tags to read
401 backend_args: Arguments to backend.
403 Returns:
404 Internal data structure for the mesh tags read from file
405 """
406 if comm.rank == 0:
407 with netCDF4.Dataset(filename, "r") as infile:
408 # Compute max topological dimension in mesh and find the correct
409 if name == "cell" or name == "facet":
410 search_dim, entity_blocks = _get_entity_blocks(
411 infile, cast(Literal["cell", "facet"], name)
412 )
413 else:
414 raise RuntimeError("Expected name='cell' or 'facet' got {name}")
416 if len(entity_blocks) > 0:
417 # Extract markers directly from entity-blocks
418 connectivity_arrays, (cell_type, degree), entity_block_index = (
419 _extract_connectivity_data(entity_blocks)
420 )
421 marked_entities = np.vstack(connectivity_arrays)
422 entity_values = np.zeros(marked_entities.shape[0], dtype=np.int64)
423 if "eb_prop1" in infile.variables.keys():
424 block_values = infile.variables["eb_prop1"][:]
426 # First check if entities are in eb_prop1
427 insert_offset = np.zeros(len(connectivity_arrays) + 1, dtype=np.int64)
428 insert_offset[1:] = np.cumsum([c_arr.shape[0] for c_arr in connectivity_arrays])
429 for i, index in enumerate(entity_block_index):
430 entity_values[insert_offset[i] : insert_offset[i + 1]] = block_values[index]
431 else:
432 num_dofs_per_cell = basix.ufl.element(
433 "Lagrange", dolfinx.mesh.to_string(cell_type), degree
434 ).dim
435 assert num_dofs_per_cell == marked_entities.shape[1]
436 marked_entities = np.zeros((0, marked_entities.shape[1]), dtype=np.int64)
437 entity_values = np.zeros(0, dtype=np.int64)
438 elif name == "facet" and "ss_prop1" in infile.variables.keys():
439 # If we haven't found the cell type as a block, we should be extracting facets
440 # (from side-sets), then we need the parent cell
441 tdim, entity_blocks = _get_entity_blocks(infile, "cell")
442 cell_types = []
443 for entity_block in entity_blocks:
444 cell_types.append(_get_cell_type(entity_block))
445 for cell in cell_types:
446 assert cell_types[0] == cell, "Mixed cell types not supported"
447 cell_type, degree = cell_types[0]
448 local_facet_index = _side_set_to_vertex_map[dolfinx.mesh.to_string(cell_type)]
449 if "num_side_sets" not in infile.dimensions:
450 facet_type = dolfinx.cpp.mesh.cell_entity_type(cell_type, tdim - 1, 0)
451 num_dofs_per_cell = basix.ufl.element(
452 "Lagrange", dolfinx.mesh.to_string(facet_type), degree
453 ).dim
454 marked_entities = np.zeros((0, num_dofs_per_cell), dtype=np.int64)
455 entity_values = np.zeros(0, dtype=np.int64)
456 else:
457 # Extract facet values
458 local_facet_index = _side_set_to_vertex_map[dolfinx.mesh.to_string(cell_type)]
459 num_facet_sets = infile.dimensions["num_side_sets"].size
460 values = infile.variables["ss_prop1"]
461 # Extract all cell blocks to get correct look-up
462 connectivity_arrays = []
463 for entity_block in entity_blocks:
464 connectivity_arrays.append(entity_block[:] - 1)
465 connectivity_array = np.vstack(connectivity_arrays)
467 # Loop through all side sets to extract the correct connectivity
468 facet_indices = []
469 facet_values = []
470 for i in range(num_facet_sets):
471 value = values[i].reshape(-1)
472 elements = infile.variables[f"elem_ss{i + 1}"]
473 local_facets = infile.variables[f"side_ss{i + 1}"]
474 for element, index in zip(elements, local_facets):
475 facet_indices.append(
476 connectivity_array[element - 1, local_facet_index[index - 1]]
477 )
478 facet_values.append(value.data.tolist())
479 marked_entities = np.vstack(facet_indices)
480 entity_values = np.array(facet_values, dtype=np.int64).flatten()
481 else:
482 # If we found no blocks (for instance for facets, we search through the cells)
483 tdim, entity_blocks = _get_entity_blocks(infile, "cell")
484 search_dim = tdim - 1 if name == "facet" else tdim
485 cell_type, degree = _get_cell_type(entity_blocks[0])
486 facet_type = dolfinx.cpp.mesh.cell_entity_type(cell_type, search_dim, 0)
487 num_dofs_per_cell = basix.ufl.element(
488 "Lagrange", dolfinx.mesh.to_string(facet_type), degree
489 ).dim
490 # If we cannot find any information about the blocks we send nothing
491 marked_entities = np.zeros((0, num_dofs_per_cell), dtype=np.int64)
492 entity_values = np.zeros(0, dtype=np.int64)
493 # Broadcast information read by this process to other processes
494 (dim, _, _) = comm.bcast(
495 (search_dim, marked_entities.shape[1], np.dtype(entity_values.dtype).name),
496 root=0,
497 )
499 else:
500 # Other process gets info from process that read the file
501 dim, num_dofs_per_cell, vtype = comm.bcast((None, None, None), root=0)
502 marked_entities = np.zeros((0, num_dofs_per_cell), dtype=np.int64)
503 entity_values = np.zeros(0, dtype=vtype)
504 return MeshTagsData(name=name, values=entity_values, indices=marked_entities, dim=dim)
507def read_dofmap(
508 filename: str | Path,
509 comm: MPI.Intracomm,
510 name: str,
511 backend_args: dict[str, Any] | None,
512) -> dolfinx.graph.AdjacencyList:
513 """Read the dofmap of a function with a given name.
515 Args:
516 filename: Path to file to read from
517 comm: MPI communicator used in storage
518 name: Name of the function to read the dofmap for
519 backend_args: Arguments to backend
521 Returns:
522 Dofmap as an {py:class}`dolfinx.graph.AdjacencyList`
523 """
524 raise NotImplementedError("The Exodus backend cannot read dofmap.")
527def read_dofs(
528 filename: str | Path,
529 comm: MPI.Intracomm,
530 name: str,
531 time: float,
532 backend_args: dict[str, Any] | None,
533) -> tuple[npt.NDArray[np.float32 | np.float64 | np.complex64 | np.complex128], int]:
534 """Read the dofs (values) of a function with a given name from a given timestep.
536 Args:
537 filename: Path to file to read from
538 comm: MPI communicator used in storage
539 name: Name of the function to read the dofs for
540 time: Time stamp associated with the function to read
541 backend_args: Arguments to backend
543 Returns:
544 Contiguous sequence of degrees of freedom (with respect to input data)
545 and the global starting point on the process.
546 Process 0 has [0, M), process 1 [M, N), process 2 [N, O) etc.
547 """
548 raise NotImplementedError("The Exodus backend cannot read dofs.")
551def read_cell_perms(
552 comm: MPI.Intracomm, filename: Path | str, backend_args: dict[str, Any] | None
553) -> npt.NDArray[np.uint32]:
554 """
555 Read cell permutation from file with given communicator,
556 Split in continuous chunks based on number of cells in the input data.
558 Args:
559 comm: MPI communicator used in storage
560 filename: Path to file to read from
561 backend_args: Arguments to backend
563 Returns:
564 Contiguous sequence of permutations (with respect to input data)
565 Process 0 has [0, M), process 1 [M, N), process 2 [N, O) etc.
566 """
568 raise NotImplementedError("The Exodus backend cannot read cell perms.")
571def write_function(
572 filename: str | Path,
573 comm: MPI.Intracomm,
574 u: FunctionData,
575 time: float,
576 mode: FileMode,
577 backend_args: dict[str, Any] | None = None,
578):
579 """Write a function to file.
581 Args:
582 filename: Path to file to write to
583 comm: MPI communicator used in storage
584 u: Internal data structure for the function data to save to file
585 time: Time stamp associated with function
586 mode: File-mode to store the function
587 backend_args: Arguments to backend
588 """
590 raise NotImplementedError("The Exodus backend cannot write function.")
593def read_legacy_mesh(
594 filename: Path | str, comm: MPI.Intracomm, group: str
595) -> tuple[npt.NDArray[np.int64], npt.NDArray[np.floating], str | None]:
596 """Read in the mesh topology, geometry and (optionally) cell type from a
597 legacy DOLFIN HDF5-file.
599 Args:
600 filename: Path to file to read from
601 comm: MPI communicator used in storage
602 group: Group in HDF5 file where mesh is stored
604 Returns:
605 Tuple containing:
606 - Topology as a (num_cells, num_vertices_per_cell) array of global vertex indices
607 - Geometry as a (num_vertices, geometric_dimension) array of vertex coordinates
608 - Cell type as a string (e.g. "tetrahedron") or None if not found
609 """
610 raise NotImplementedError("The Exodus backend cannot read legacy mesh.")
613def read_point_data(
614 filename: Path | str,
615 name: str,
616 comm: MPI.Intracomm,
617 time: float | str | None,
618 backend_args: dict[str, Any] | None,
619) -> tuple[np.ndarray, int]:
620 """Read data from the nodes of a mesh.
622 Args:
623 filename: Path to file
624 name: Name of point data
625 comm: Communicator to launch IO on.
626 time: The time stamp
627 backend_args: The backend arguments
629 Returns:
630 Data local to process (contiguous, no mpi comm) and local start range
631 """
632 num_components = 1 # Default assumption, overriden by data read in having multiple components
633 if comm.rank == 0:
634 with netCDF4.Dataset(filename, "r") as infile:
635 raw_names = infile.variables["name_nod_var"][:].data
636 node_names = netCDF4.chartostring(raw_names)
637 if name not in node_names:
638 raise ValueError(
639 f"Point data with name {name} not found in file.",
640 f"Available variables: {node_names}",
641 )
642 index = np.flatnonzero(name == node_names)[0] + 1
644 temporal_dataset = infile.variables[f"vals_nod_var{index}"]
645 time_steps = infile.variables["time_whole"][:].data
646 if time is None:
647 time_idx = time_steps[0]
648 else:
649 time_indices = np.flatnonzero(np.isclose(time_steps, time))
650 if len(time_indices) == 0:
651 raise ValueError(
652 f"Could not find {name}(t={time}), available time steps are {time_steps}"
653 )
654 time_idx = time_indices[0]
656 dataset = temporal_dataset[time_idx]
657 if len(dataset.shape) == 1:
658 dataset = dataset.reshape(-1, num_components)
659 else:
660 num_components = dataset.shape[1]
661 # Broadcast num components to all other ranks
662 num_components = comm.bcast(num_components, root=0)
663 # Zero data on all other processes
664 if comm.rank != 0:
665 dataset = np.zeros((0, num_components), dtype=np.float64)
666 return dataset, 0
669def read_cell_data(
670 filename: Path | str,
671 name: str,
672 comm: MPI.Intracomm,
673 time: str | float | None,
674 backend_args: dict[str, Any] | None,
675) -> tuple[npt.NDArray[np.int64], np.ndarray]:
676 """Read data from the cells of a mesh.
678 Args:
679 filename: Path to file
680 name: Name of point data
681 comm: Communicator to launch IO on.
682 time: The time stamp
683 backend_args: The backend arguments
684 Returns:
685 A tuple (topology, dofs) where topology contains the
686 vertex indices of the cells, dofs the degrees of
687 freedom within that cell.
688 """
689 num_components = 1 # Default assumption, overriden by data read in having multiple components
690 if comm.rank == 0:
691 with netCDF4.Dataset(filename, "r") as infile:
692 raw_names = infile.variables["name_elem_var"][:].data
693 num_blocks = infile.dimensions["num_el_blk"].size
695 node_names = netCDF4.chartostring(raw_names)
696 if name not in node_names:
697 raise ValueError(
698 f"Cell data with name {name} not found in file.",
699 f"Available variables: {node_names}",
700 )
701 index = np.flatnonzero(name == node_names)[0] + 1
703 entity_blocks = [
704 infile.variables[f"vals_elem_var{index}eb{i + 1}"] for i in range(num_blocks)
705 ]
706 time_steps = infile.variables["time_whole"][:].data
707 if time is None:
708 time_idx = time_steps[0]
709 else:
710 time_indices = np.flatnonzero(np.isclose(time_steps, time))
711 if len(time_indices) == 0:
712 raise ValueError(
713 f"Could not find {name}(t={time}), available time steps are {time_steps}"
714 )
715 time_idx = time_indices[0]
717 if len(entity_blocks) > 0:
718 datasets = []
719 for entity_block in entity_blocks:
720 datasets.append(entity_block[time_idx])
722 dataset = np.hstack(datasets)
724 if len(dataset.shape) == 1:
725 dataset = dataset.reshape(-1, num_components)
726 else:
727 num_components = dataset.shape[1]
728 # Broadcast num components to all other ranks
729 num_components = comm.bcast(num_components, root=0)
731 # Zero data on all other processes
732 if comm.rank != 0:
733 dataset = np.zeros((0, num_components), dtype=np.float64)
734 _time = float(time) if time is not None else None
736 topology = read_mesh_data(filename, comm, _time, False, backend_args=None).cells
737 return topology, dataset
740def read_function_names(
741 filename: Path | str, comm: MPI.Intracomm, backend_args: dict[str, Any] | None
742) -> list[str]:
743 """Read all function names from a file.
745 Args:
746 filename: Path to file
747 comm: MPI communicator to launch IO on.
748 backend_args: Arguments to backend
750 Returns:
751 A list of function names.
752 """
753 with netCDF4.Dataset(filename, "r") as infile:
754 function_names: list[str] = []
755 for key in ["name_elem_var", "name_nod_var"]:
756 raw_names = infile.variables[key][:].data
757 decoded_names = netCDF4.chartostring(raw_names)
758 function_names.extend(decoded_names)
759 return function_names
762def write_data(
763 filename: Path | str,
764 array_data: ArrayData,
765 comm: MPI.Intracomm,
766 time: str | float | None,
767 mode: FileMode,
768 backend_args: dict[str, Any] | None,
769):
770 """Write a 2D-array to file (distributed across proceses with MPI).
772 Args:
773 filename: Path to file
774 array_data: Data to write to file
775 comm: MPI communicator to open the file with
776 time: Time-stamp for data.
777 mode: Append or write
778 backend_args: The backend arguments
779 """
780 raise NotImplementedError("Exodus has not implemented this yet")