Coverage for / dolfinx-env / lib / python3.12 / site-packages / io4dolfinx / original_checkpoint.py: 99%
183 statements
« prev ^ index » next coverage.py v7.14.0, created at 2026-05-12 11:21 +0000
« prev ^ index » next coverage.py v7.14.0, created at 2026-05-12 11:21 +0000
1# Copyright (C) 2024 Jørgen Schartum Dokken
2#
3# This file is part of io4dolfinx
4#
5# SPDX-License-Identifier: MIT
7from __future__ import annotations
9import typing
10from pathlib import Path
12from mpi4py import MPI
14import dolfinx
15import numpy as np
17from . import compat
18from .backends import FileMode, get_backend
19from .comm_helpers import numpy_to_mpi
20from .structures import FunctionData, MeshData
21from .utils import (
22 compute_insert_position,
23 compute_local_range,
24 index_owner,
25 unroll_dofmap,
26 unroll_insert_position,
27)
29__all__ = ["write_function_on_input_mesh", "write_mesh_input_order"]
32def create_original_mesh_data(mesh: dolfinx.mesh.Mesh) -> MeshData:
33 """
34 Store data locally on output process
35 """
37 # 1. Send cell indices owned by current process to the process which owned its input
39 # Get the input cell index for cells owned by this process
40 num_owned_cells = mesh.topology.index_map(mesh.topology.dim).size_local
41 original_cell_index = mesh.topology.original_cell_index[:num_owned_cells]
43 # Compute owner of cells on this process based on the original cell index
44 num_cells_global = mesh.topology.index_map(mesh.topology.dim).size_global
45 output_cell_owner = index_owner(mesh.comm, original_cell_index, num_cells_global)
46 local_cell_range = compute_local_range(mesh.comm, num_cells_global)
48 # Compute outgoing edges from current process to outputting process
49 # Computes the number of cells sent to each process at the same time
50 cell_destinations, _send_cells_per_proc = np.unique(output_cell_owner, return_counts=True)
51 send_cells_per_proc = _send_cells_per_proc.astype(np.int32)
52 del _send_cells_per_proc
53 cell_to_output_comm = mesh.comm.Create_dist_graph(
54 [mesh.comm.rank],
55 [len(cell_destinations)],
56 cell_destinations.tolist(),
57 reorder=False,
58 )
59 cell_sources, cell_dests, _ = cell_to_output_comm.Get_dist_neighbors()
60 assert np.allclose(cell_dests, cell_destinations)
62 # Compute number of recieving cells
63 recv_cells_per_proc = np.zeros_like(cell_sources, dtype=np.int32)
64 if len(send_cells_per_proc) == 0:
65 send_cells_per_proc = np.zeros(1, dtype=np.int32)
66 if len(recv_cells_per_proc) == 0:
67 recv_cells_per_proc = np.zeros(1, dtype=np.int32)
68 send_cells_per_proc = send_cells_per_proc.astype(np.int32)
69 cell_to_output_comm.Neighbor_alltoall(send_cells_per_proc, recv_cells_per_proc)
70 assert recv_cells_per_proc.sum() == local_cell_range[1] - local_cell_range[0]
71 # Pack and send cell indices (used for mapping topology dofmap later)
72 cell_insert_position = compute_insert_position(
73 output_cell_owner, cell_destinations, send_cells_per_proc
74 )
75 send_cells = np.empty_like(cell_insert_position, dtype=np.int64)
76 send_cells[cell_insert_position] = original_cell_index
77 recv_cells = np.empty(recv_cells_per_proc.sum(), dtype=np.int64)
78 send_cells_msg = [send_cells, send_cells_per_proc, MPI.INT64_T]
79 recv_cells_msg = [recv_cells, recv_cells_per_proc, MPI.INT64_T]
80 cell_to_output_comm.Neighbor_alltoallv(send_cells_msg, recv_cells_msg)
81 del send_cells_msg, recv_cells_msg, send_cells
83 # Map received cells to the local index
84 local_cell_index = recv_cells - local_cell_range[0]
86 # 2. Create dofmap based on original geometry indices and re-order in the same order as original
87 # cell indices on output process
89 # Get original node index for all nodes (including ghosts) and convert dofmap to these indices
90 original_node_index = mesh.geometry.input_global_indices
91 _, num_nodes_per_cell = mesh.geometry.dofmap.shape
92 local_geometry_dofmap = mesh.geometry.dofmap[:num_owned_cells, :]
93 global_geometry_dofmap = original_node_index[local_geometry_dofmap.reshape(-1)]
95 # Unroll insert position for geometry dofmap
96 dofmap_insert_position = unroll_insert_position(cell_insert_position, num_nodes_per_cell)
98 # Create and commmnicate connecitivity in original geometry indices
99 send_geometry_dofmap = np.empty_like(dofmap_insert_position, dtype=np.int64)
100 send_geometry_dofmap[dofmap_insert_position] = global_geometry_dofmap
101 del global_geometry_dofmap
102 send_sizes_dofmap = send_cells_per_proc * num_nodes_per_cell
103 recv_sizes_dofmap = recv_cells_per_proc * num_nodes_per_cell
104 recv_geometry_dofmap = np.empty(recv_sizes_dofmap.sum(), dtype=np.int64)
105 send_geometry_dofmap_msg = [send_geometry_dofmap, send_sizes_dofmap, MPI.INT64_T]
106 recv_geometry_dofmap_msg = [recv_geometry_dofmap, recv_sizes_dofmap, MPI.INT64_T]
107 cell_to_output_comm.Neighbor_alltoallv(send_geometry_dofmap_msg, recv_geometry_dofmap_msg)
108 del send_geometry_dofmap_msg, recv_geometry_dofmap_msg
110 # Reshape dofmap and sort by original cell index
111 recv_dofmap = recv_geometry_dofmap.reshape(-1, num_nodes_per_cell)
112 sorted_recv_dofmap = np.empty_like(recv_dofmap)
113 sorted_recv_dofmap[local_cell_index] = recv_dofmap
115 # 3. Move geometry coordinates to input process
116 # Compute outgoing edges from current process and create neighbourhood communicator
117 # Also create number of outgoing cells at the same time
118 num_owned_nodes = mesh.geometry.index_map().size_local
119 num_nodes_global = mesh.geometry.index_map().size_global
120 output_node_owner = index_owner(
121 mesh.comm, original_node_index[:num_owned_nodes], num_nodes_global
122 )
124 node_destinations, _send_nodes_per_proc = np.unique(output_node_owner, return_counts=True)
125 send_nodes_per_proc = _send_nodes_per_proc.astype(np.int32)
126 del _send_nodes_per_proc
128 geometry_to_owner_comm = mesh.comm.Create_dist_graph(
129 [mesh.comm.rank],
130 [len(node_destinations)],
131 node_destinations.tolist(),
132 reorder=False,
133 )
135 node_sources, node_dests, _ = geometry_to_owner_comm.Get_dist_neighbors()
136 assert np.allclose(node_dests, node_destinations)
138 # Compute send node insert positions
139 send_nodes_position = compute_insert_position(
140 output_node_owner, node_destinations, send_nodes_per_proc
141 )
142 unrolled_nodes_positiion = unroll_insert_position(send_nodes_position, 3)
144 send_coordinates = np.empty_like(unrolled_nodes_positiion, dtype=mesh.geometry.x.dtype)
145 send_coordinates[unrolled_nodes_positiion] = mesh.geometry.x[:num_owned_nodes, :].reshape(-1)
147 # Send and recieve geometry sizes
148 send_coordinate_sizes = (send_nodes_per_proc * 3).astype(np.int32)
149 recv_coordinate_sizes = np.zeros_like(node_sources, dtype=np.int32)
150 geometry_to_owner_comm.Neighbor_alltoall(send_coordinate_sizes, recv_coordinate_sizes)
152 # Send node coordinates
153 recv_coordinates = np.empty(recv_coordinate_sizes.sum(), dtype=mesh.geometry.x.dtype)
154 mpi_type = numpy_to_mpi[recv_coordinates.dtype.type]
155 send_coord_msg = [send_coordinates, send_coordinate_sizes, mpi_type]
156 recv_coord_msg = [recv_coordinates, recv_coordinate_sizes, mpi_type]
157 geometry_to_owner_comm.Neighbor_alltoallv(send_coord_msg, recv_coord_msg)
158 del send_coord_msg, recv_coord_msg
160 # Send node ordering for reordering the coordinates on output process
161 send_nodes = np.empty(num_owned_nodes, dtype=np.int64)
162 send_nodes[send_nodes_position] = original_node_index[:num_owned_nodes]
164 recv_indices = np.empty(recv_coordinate_sizes.sum() // 3, dtype=np.int64)
165 send_nodes_msg = [send_nodes, send_nodes_per_proc, MPI.INT64_T]
166 recv_nodes_msg = [recv_indices, recv_coordinate_sizes // 3, MPI.INT64_T]
167 geometry_to_owner_comm.Neighbor_alltoallv(send_nodes_msg, recv_nodes_msg)
169 # Compute local ording of received nodes
170 local_node_range = compute_local_range(mesh.comm, num_nodes_global)
171 recv_indices -= local_node_range[0]
173 # Sort geometry based on input index and strip to gdim
174 gdim = mesh.geometry.dim
175 recv_nodes = recv_coordinates.reshape(-1, 3)
176 _geometry = np.empty(recv_nodes.shape, dtype=mesh.geometry.x.dtype)
177 _geometry[recv_indices, :] = recv_nodes
178 geometry = _geometry[:, :gdim].copy()
179 del _geometry, recv_nodes
181 assert local_node_range[1] - local_node_range[0] == geometry.shape[0]
182 cmap = compat.cmap(mesh)
184 cell_to_output_comm.Free()
185 geometry_to_owner_comm.Free()
187 # NOTE: Could in theory store partitioning information, but would not work nicely
188 # as one would need to read this data rather than the xdmffile.
189 # NOTE: Local geometry type hint skip is only required on DOLFINX<0.10 where
190 # proper `dolfinx.mesh.Geometry` wrapper doesn't exist
191 return MeshData(
192 local_geometry=geometry, # type: ignore[arg-type]
193 local_geometry_pos=local_node_range,
194 num_nodes_global=num_nodes_global,
195 local_topology=sorted_recv_dofmap,
196 local_topology_pos=local_cell_range,
197 num_cells_global=num_cells_global,
198 cell_type=mesh.topology.cell_name(),
199 degree=cmap.degree,
200 lagrange_variant=cmap.variant,
201 store_partition=False,
202 partition_processes=None,
203 ownership_array=None,
204 ownership_offset=None,
205 partition_range=None,
206 partition_global=None,
207 )
210def create_function_data_on_original_mesh(
211 u: dolfinx.fem.Function, name: typing.Optional[str] = None
212) -> FunctionData:
213 """
214 Create data object to save with ADIOS2
215 """
216 mesh = u.function_space.mesh
218 # Compute what cells owned by current process should be sent to what output process
219 # FIXME: Cache this
220 num_owned_cells = mesh.topology.index_map(mesh.topology.dim).size_local
221 original_cell_index = mesh.topology.original_cell_index[:num_owned_cells]
223 # Compute owner of cells on this process based on the original cell index
224 num_cells_global = mesh.topology.index_map(mesh.topology.dim).size_global
225 output_cell_owner = index_owner(mesh.comm, original_cell_index, num_cells_global)
226 local_cell_range = compute_local_range(mesh.comm, num_cells_global)
228 # Compute outgoing edges from current process to outputting process
229 # Computes the number of cells sent to each process at the same time
230 cell_destinations, _send_cells_per_proc = np.unique(output_cell_owner, return_counts=True)
231 send_cells_per_proc = _send_cells_per_proc.astype(np.int32)
232 del _send_cells_per_proc
233 cell_to_output_comm = mesh.comm.Create_dist_graph(
234 [mesh.comm.rank],
235 [len(cell_destinations)],
236 cell_destinations.tolist(),
237 reorder=False,
238 )
239 cell_sources, cell_dests, _ = cell_to_output_comm.Get_dist_neighbors()
240 assert np.allclose(cell_dests, cell_destinations)
242 # Compute number of recieving cells
243 recv_cells_per_proc = np.zeros_like(cell_sources, dtype=np.int32)
244 send_cells_per_proc = send_cells_per_proc.astype(np.int32)
245 cell_to_output_comm.Neighbor_alltoall(send_cells_per_proc, recv_cells_per_proc)
246 assert recv_cells_per_proc.sum() == local_cell_range[1] - local_cell_range[0]
248 # Pack and send cell indices (used for mapping topology dofmap later)
249 cell_insert_position = compute_insert_position(
250 output_cell_owner, cell_destinations, send_cells_per_proc
251 )
252 send_cells = np.empty_like(cell_insert_position, dtype=np.int64)
253 send_cells[cell_insert_position] = original_cell_index
254 recv_cells = np.empty(recv_cells_per_proc.sum(), dtype=np.int64)
255 send_cells_msg = [send_cells, send_cells_per_proc, MPI.INT64_T]
256 recv_cells_msg = [recv_cells, recv_cells_per_proc, MPI.INT64_T]
257 cell_to_output_comm.Neighbor_alltoallv(send_cells_msg, recv_cells_msg)
258 del send_cells_msg, recv_cells_msg
260 # Map received cells to the local index
261 local_cell_index = recv_cells - local_cell_range[0]
263 # Pack and send cell permutation info
264 mesh.topology.create_entity_permutations()
265 cell_permutation_info = mesh.topology.get_cell_permutation_info()[:num_owned_cells]
266 send_perm = np.empty_like(send_cells, dtype=np.uint32)
267 send_perm[cell_insert_position] = cell_permutation_info
268 recv_perm = np.empty_like(recv_cells, dtype=np.uint32)
269 send_perm_msg = [send_perm, send_cells_per_proc, MPI.UINT32_T]
270 recv_perm_msg = [recv_perm, recv_cells_per_proc, MPI.UINT32_T]
271 cell_to_output_comm.Neighbor_alltoallv(send_perm_msg, recv_perm_msg)
272 cell_permutation_info = np.empty_like(recv_perm)
273 cell_permutation_info[local_cell_index] = recv_perm
275 # 2. Extract function data (array is the same, keeping global indices from DOLFINx)
276 # Dofmap is moved by the original cell index similar to the mesh geometry dofmap
277 dofmap = u.function_space.dofmap
278 dmap = dofmap.list
279 num_dofs_per_cell = dmap.shape[1]
280 dofmap_bs = dofmap.bs
281 index_map_bs = dofmap.index_map_bs
283 # Unroll dofmap for block size
284 unrolled_dofmap = unroll_dofmap(dofmap.list[:num_owned_cells, :], dofmap_bs)
285 dmap_loc = (unrolled_dofmap // index_map_bs).reshape(-1)
286 dmap_rem = (unrolled_dofmap % index_map_bs).reshape(-1)
288 # Convert imap index to global index
289 imap_global = dofmap.index_map.local_to_global(dmap_loc)
290 dofmap_global = (imap_global * index_map_bs + dmap_rem).reshape(unrolled_dofmap.shape)
291 num_dofs_per_cell = dofmap_global.shape[1]
292 dofmap_insert_position = unroll_insert_position(cell_insert_position, num_dofs_per_cell)
294 # Create and send array for global dofmap
295 send_function_dofmap = np.empty(len(dofmap_insert_position), dtype=np.int64)
296 send_function_dofmap[dofmap_insert_position] = dofmap_global.reshape(-1)
297 send_sizes_dofmap = send_cells_per_proc * num_dofs_per_cell
298 recv_size_dofmap = recv_cells_per_proc * num_dofs_per_cell
299 recv_function_dofmap = np.empty(recv_size_dofmap.sum(), dtype=np.int64)
300 cell_to_output_comm.Neighbor_alltoallv(
301 [send_function_dofmap, send_sizes_dofmap, MPI.INT64_T],
302 [recv_function_dofmap, recv_size_dofmap, MPI.INT64_T],
303 )
305 shaped_dofmap = recv_function_dofmap.reshape(
306 local_cell_range[1] - local_cell_range[0], num_dofs_per_cell
307 ).copy()
308 _final_dofmap = np.empty_like(shaped_dofmap)
309 _final_dofmap[local_cell_index] = shaped_dofmap
310 final_dofmap = _final_dofmap.reshape(-1)
312 # Get offsets of dofmap
313 num_cells_local = local_cell_range[1] - local_cell_range[0]
314 num_dofs_local_dmap = num_cells_local * num_dofs_per_cell
315 dofmap_imap = dolfinx.common.IndexMap(mesh.comm, num_dofs_local_dmap)
316 local_dofmap_offsets = np.arange(num_cells_local + 1, dtype=np.int64)
317 local_dofmap_offsets[:] *= num_dofs_per_cell
318 local_dofmap_offsets[:] += dofmap_imap.local_range[0]
320 num_dofs_local = dofmap.index_map.size_local * dofmap.index_map_bs
321 num_dofs_global = dofmap.index_map.size_global * dofmap.index_map_bs
322 local_range = np.asarray(dofmap.index_map.local_range, dtype=np.int64) * dofmap.index_map_bs
323 func_name = name if name is not None else u.name
324 cell_to_output_comm.Free()
325 return FunctionData(
326 cell_permutations=cell_permutation_info,
327 local_cell_range=local_cell_range,
328 num_cells_global=num_cells_global,
329 dofmap_array=final_dofmap,
330 dofmap_offsets=local_dofmap_offsets,
331 values=u.x.array[:num_dofs_local].copy(),
332 dof_range=local_range,
333 num_dofs_global=num_dofs_global,
334 dofmap_range=dofmap_imap.local_range,
335 global_dofs_in_dofmap=dofmap_imap.size_global,
336 name=func_name,
337 )
340def write_function_on_input_mesh(
341 filename: Path | str,
342 u: dolfinx.fem.Function,
343 time: float = 0.0,
344 name: typing.Optional[str] = None,
345 mode: FileMode = FileMode.append,
346 backend_args: dict[str, typing.Any] | None = None,
347 backend: str = "adios2",
348):
349 """
350 Write function checkpoint (to be read with the input mesh).
352 Note:
353 Requires backend to implement {py:class}`io4dolfinx.backends.write_function`.
355 Args:
356 filename: The filename to write to
357 u: The function to checkpoint
358 time: Time-stamp associated with function at current write step
359 mode: The mode to use (write or append)
360 name: Name of function. If None, the name of the function is used.
361 backend_args: Arguments to backend
362 backend: Choice of backend module
363 """
364 mesh = u.function_space.mesh
365 function_data = create_function_data_on_original_mesh(u, name)
366 fname = Path(filename)
368 backend_cls = get_backend(backend)
369 backend_args = backend_cls.get_default_backend_args(backend_args)
370 backend_cls.write_function(
371 fname,
372 mesh.comm,
373 function_data,
374 time=time,
375 mode=mode,
376 backend_args=backend_args,
377 )
380def write_mesh_input_order(
381 filename: Path | str,
382 mesh: dolfinx.mesh.Mesh,
383 time: float = 0.0,
384 mode: FileMode = FileMode.write,
385 backend: str = "adios2",
386 backend_args: dict[str, typing.Any] | None = None,
387):
388 """
389 Write mesh to checkpoint file in original input ordering.
391 Note:
392 Requires backend to implement {py:class}`io4dolfinx.backends.write_mesh`.
394 Args:
395 filename: The filename to write to
396 mesh: Mesh to checkpoint
397 time: Time-stamp associated with function at current write step
398 mode: The mode to use (write or append)
399 name: Name of function. If None, the name of the function is used.
400 backend_args: Arguments to backend
401 backend: Choice of backend module
402 """
403 mesh_data = create_original_mesh_data(mesh)
404 fname = Path(filename)
406 backend_cls = get_backend(backend)
407 backend_args = backend_cls.get_default_backend_args(backend_args)
408 backend_cls.write_mesh(
409 fname,
410 mesh.comm,
411 mesh_data,
412 backend_args=backend_args,
413 mode=mode,
414 time=time,
415 )